code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
from typing import Literal, Tuple, Any, Optional
import hydra
import omegaconf
import pytorch_lightning as pl
import rul_datasets
from rul_adapt.approach import LatentAlignApproach
def get_latent_align(
dataset: Literal["cmapss", "xjtu-sy"],
source_fd: int,
target_fd: int,
xjtu_sy_subtask: Optional[int] = None,
**trainer_kwargs: Any,
) -> Tuple[rul_datasets.LatentAlignDataModule, LatentAlignApproach, pl.Trainer]:
"""
Construct a Latent Alignment approach for the selected dataset with the original
hyperparameters.
For the XJTU-SY task only FD001 and FD002 are available. The subtask controls if
the bearing with the id 1 or 2 is used as the target data.
Examples:
```pycon
>>> import rul_adapt
>>> dm, latent, trainer = rul_adapt.construct.get_latent_align("cmapss", 3, 1)
>>> trainer.fit(latent, dm)
>>> trainer.test(latent, dm)
```
Args:
dataset: The dataset to use.
source_fd: The source FD.
target_fd: The target FD.
xjtu_sy_subtask: The subtask for the XJTU-SY (either 1 or 2).
trainer_kwargs: Overrides for the trainer class.
Returns:
dm: The data module for adaption of the sub-datasets.
dann: The Latent Alignment approach with feature extractor and regressor.
trainer: The trainer object.
"""
config = get_latent_align_config(dataset, source_fd, target_fd, xjtu_sy_subtask)
dm, latent_align, trainer = latent_align_from_config(config, **trainer_kwargs)
return dm, latent_align, trainer
def get_latent_align_config(
dataset: Literal["cmapss", "xjtu-sy"],
source_fd: int,
target_fd: int,
xjtu_sy_subtask: Optional[int] = None,
) -> omegaconf.DictConfig:
"""
Get a configuration for the Latent Alignment approach.
For the XJTU-SY task only FD001 and FD002 are available. The subtask controls if
the bearing with the id 1 or 2 is used as the target data. The configuration can
be modified and fed to [latent_align_from_config]
[rul_adapt.construct.latent_align.latent_align_from_config] to create the approach.
Args:
dataset: The dataset to use.
source_fd: The source FD.
target_fd: The target FD.
xjtu_sy_subtask: The subtask for the XJTU-SY (either 1 or 2).
Returns:
The Latent Alignment configuration.
"""
_validate(dataset, source_fd, target_fd, xjtu_sy_subtask)
overrides = [
f"+dataset={dataset}",
f"dm.source.fd={source_fd}",
f"dm.target.fd={target_fd}",
]
if dataset == "xjtu-sy":
overrides.append(f"+subtask=SUB{target_fd}{xjtu_sy_subtask}")
else:
overrides.append(f"+split_steps=FD00{target_fd}")
with hydra.initialize("config", version_base="1.1"):
config = hydra.compose("base", overrides)
return config
def latent_align_from_config(
config: omegaconf.DictConfig, **trainer_kwargs: Any
) -> Tuple[rul_datasets.LatentAlignDataModule, LatentAlignApproach, pl.Trainer]:
"""
Construct a Latent Alignment approach from a configuration.
The configuration can be created by calling [get_latent_align_config]
[rul_adapt.construct.latent_align.get_latent_align_config].
Args:
config: The Latent Alignment configuration.
trainer_kwargs: Overrides for the trainer class.
Returns:
dm: The data module for adaption of the sub-datasets.
dann: The Latent Alignment approach with feature extractor, regressor.
trainer: The trainer object.
"""
source = hydra.utils.instantiate(config.dm.source)
target = hydra.utils.instantiate(config.dm.target)
kwargs = hydra.utils.instantiate(config.dm.kwargs)
dm = rul_datasets.LatentAlignDataModule(
rul_datasets.RulDataModule(source, **kwargs),
rul_datasets.RulDataModule(target, **kwargs),
**config.dm.adaption_kwargs,
)
feature_extractor = hydra.utils.instantiate(config.feature_extractor)
regressor = hydra.utils.instantiate(config.regressor)
approach = hydra.utils.instantiate(config.latent_align)
approach.set_model(feature_extractor, regressor)
trainer = hydra.utils.instantiate(config.trainer, **trainer_kwargs)
return dm, approach, trainer
def _validate(
dataset: Literal["cmapss", "xjtu-sy"],
source_fd: int,
target_fd: int,
xjtu_sy_subtask: Optional[int],
) -> None:
if dataset not in ["cmapss", "xjtu-sy"]:
raise ValueError(f"No configuration for '{dataset}'.")
elif source_fd == target_fd:
raise ValueError(
f"No configuration for adapting from FD{source_fd:03} to itself."
)
elif dataset == "cmapss":
_validate_cmapss(source_fd, target_fd)
elif dataset == "xjtu-sy":
_validate_xjtu_sy(source_fd, target_fd, xjtu_sy_subtask)
def _validate_cmapss(source_fd: int, target_fd: int):
if 1 > source_fd or source_fd > 4:
raise ValueError(f"CMAPSS has only FD001 to FD004 but no FD{source_fd:03}")
elif 1 > target_fd or target_fd > 4:
raise ValueError(f"CMAPSS has only FD001 to FD004 but no FD{target_fd:03}")
def _validate_xjtu_sy(source_fd: int, target_fd: int, subtask: Optional[int]):
if 1 > source_fd or source_fd > 2:
raise ValueError(
"Only FD001 and FD002 of XJTU-SY are used in "
f"this approach but not FD{source_fd:03}."
)
elif 1 > target_fd or target_fd > 2:
raise ValueError(
"Only FD001 and FD002 of XJTU-SY are used in "
f"this approach but not FD{target_fd:03}."
)
elif subtask is None:
raise ValueError("XJTU-SY requires a subtask of 1 or 2.")
elif 1 > subtask or subtask > 2:
raise ValueError(
f"XJTU-SY has only subtasks 1 and 2 but not subtask {subtask}."
) | /rul_adapt-0.2.0-py3-none-any.whl/rul_adapt/construct/latent_align/functional.py | 0.905128 | 0.760406 | functional.py | pypi |
from typing import List, Type, Optional
import torch
from torch import nn
from rul_adapt import utils
from rul_adapt.utils import pairwise
class FullyConnectedHead(nn.Module):
"""A fully connected (FC) network that can be used as a RUL regressor or a domain
discriminator.
This network is a stack of fully connected layers with ReLU activation functions
by default. The activation function can be customized through the `act_func`
parameter. If the last layer of the network should not have an activation
function, `act_func_on_last_layer` can be set to `False`.
The data flow is as follows: `Inputs --> FC x n --> Outputs`
The expected input shape is `[batch_size, num_features]`.
Examples:
Default
>>> import torch
>>> from rul_adapt.model import FullyConnectedHead
>>> regressor = FullyConnectedHead(32, [16, 1])
>>> outputs = regressor(torch.randn(10, 32))
>>> outputs.shape
torch.Size([10, 1])
>>> type(outputs.grad_fn)
<class 'ReluBackward0'>
Custom activation function
>>> import torch
>>> from rul_adapt.model import FullyConnectedHead
>>> regressor = FullyConnectedHead(32, [16, 1], act_func=torch.nn.Sigmoid)
>>> outputs = regressor(torch.randn(10, 32))
>>> type(outputs.grad_fn)
<class 'SigmoidBackward0'>
Without activation function on last layer
>>> import torch
>>> from rul_adapt.model import FullyConnectedHead
>>> regressor = FullyConnectedHead(32, [16, 1], act_func_on_last_layer=False)
>>> outputs = regressor(torch.randn(10, 32))
>>> type(outputs.grad_fn)
<class 'AddmmBackward0'>
"""
def __init__(
self,
input_channels: int,
units: List[int],
dropout: float = 0.0,
act_func: Type[nn.Module] = nn.ReLU,
act_func_on_last_layer: bool = True,
) -> None:
"""
Create a new fully connected head network.
The `units` are the number of output units for each FC layer. The number of
output features is `units[-1]`. If dropout is used, it is applied in *each*
layer, including input.
Args:
input_channels: The number of input channels.
units: The number of output units for the FC layers.
dropout: The dropout probability before each layer. Set to zero to
deactivate.
act_func: The activation function for each layer.
act_func_on_last_layer: Whether to add the activation function to the last
layer.
"""
super().__init__()
self.input_channels = input_channels
self.units = units
self.dropout = dropout
self.act_func: Type[nn.Module] = utils.str2callable( # type: ignore[assignment]
act_func, restriction="torch.nn"
)
self.act_func_on_last_layer = act_func_on_last_layer
if not self.units:
raise ValueError("Cannot build head network with no layers.")
self._layers = self._get_layers()
def _get_layers(self) -> nn.Module:
units = [self.input_channels] + self.units
act_funcs: List[Optional[Type[nn.Module]]] = [self.act_func] * len(self.units)
act_funcs[-1] = self.act_func if self.act_func_on_last_layer else None
layers = nn.Sequential()
for (in_units, out_units), act_func in zip(pairwise(units), act_funcs):
layers.append(self._get_fc_layer(in_units, out_units, act_func))
return layers
def _get_fc_layer(
self, in_units: int, out_units: int, act_func: Optional[Type[nn.Module]]
) -> nn.Module:
layer: nn.Module
if self.dropout > 0:
layer = nn.Sequential(nn.Dropout(self.dropout))
else:
layer = nn.Sequential()
layer.append(nn.Linear(in_units, out_units))
if act_func is not None:
layer.append(act_func())
return layer
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
return self._layers(inputs) | /rul_adapt-0.2.0-py3-none-any.whl/rul_adapt/model/head.py | 0.965495 | 0.784649 | head.py | pypi |
from typing import List, Optional, Union, Type
import torch
from torch import nn
from rul_adapt import utils
from rul_adapt.utils import pairwise
class CnnExtractor(nn.Module):
"""A Convolutional Neural Network (CNN) based network that extracts a feature
vector from same-length time windows.
This feature extractor consists of multiple CNN layers and an optional fully
connected (FC) layer. Each CNN layer can be configured with a number of filters
and a kernel size. Additionally, batch normalization, same-padding and dropout
can be applied. The fully connected layer can have a separate dropout
probability.
Both CNN and FC layers use ReLU activation functions by default. Custom
activation functions can be set for each layer type.
The data flow is as follows: `Input --> CNN x n --> [FC] --> Output`
The expected input shape is `[batch_size, num_features, window_size]`. The output
of this network is always flattened to `[batch_size, num_extracted_features]`.
Examples:
Without FC
>>> import torch
>>> from rul_adapt.model import CnnExtractor
>>> cnn = CnnExtractor(14,units=[16, 1],seq_len=30)
>>> cnn(torch.randn(10, 14, 30)).shape
torch.Size([10, 26])
With FC
>>> import torch
>>> from rul_adapt.model import CnnExtractor
>>> cnn = CnnExtractor(14,units=[16, 1],seq_len=30,fc_units=16)
>>> cnn(torch.randn(10, 14, 30)).shape
torch.Size([10, 16])
"""
def __init__(
self,
input_channels: int,
units: List[int],
seq_len: int,
kernel_size: Union[int, List[int]] = 3,
dilation: int = 1,
stride: int = 1,
padding: bool = False,
fc_units: Optional[int] = None,
dropout: float = 0.0,
fc_dropout: float = 0.0,
batch_norm: bool = False,
act_func: Type[nn.Module] = nn.ReLU,
fc_act_func: Type[nn.Module] = nn.ReLU,
):
"""
Create a new CNN-based feature extractor.
The `conv_filters` are the number of output filters for each CNN layer. The
`seq_len` is needed to calculate the input units for the FC layer. The kernel
size of each CNN layer can be set by passing a list to `kernel_size`. If an
integer is passed, each layer has the same kernel size. If `padding` is true,
same-padding is applied before each CNN layer, which keeps the window_size
the same. If `batch_norm` is set, batch normalization is applied for each CNN
layer. If `fc_units` is set, a fully connected layer is appended.
Dropout can be applied to each CNN layer by setting `conv_dropout` to a
number greater than zero. The same is valid for the fully connected layer and
`fc_dropout`. Dropout will never be applied to the input layer.
The whole network uses ReLU activation functions. This can be customized by
setting either `conv_act_func` or `fc_act_func`.
Args:
input_channels: The number of input channels.
units: The list of output filters for the CNN layers.
seq_len: The window_size of the input data.
kernel_size: The kernel size for the CNN layers. Passing an integer uses
the same kernel size for each layer.
dilation: The dilation for the CNN layers.
stride: The stride for the CNN layers.
padding: Whether to apply same-padding before each CNN layer.
fc_units: Number of output units for the fully connected layer.
dropout: The dropout probability for the CNN layers.
fc_dropout: The dropout probability for the fully connected layer.
batch_norm: Whether to use batch normalization on the CNN layers.
act_func: The activation function for the CNN layers.
fc_act_func: The activation function for the fully connected layer.
"""
super().__init__()
self.input_channels = input_channels
self.units = units
self.seq_len = seq_len
self.kernel_size = kernel_size
self.dilation = dilation
self.stride = stride
self.padding = padding
self.fc_units = fc_units
self.dropout = dropout
self.fc_dropout = fc_dropout
self.batch_norm = batch_norm
self.act_func = utils.str2callable(act_func, restriction="torch.nn")
self.fc_act_func = utils.str2callable(fc_act_func, restriction="torch.nn")
self._kernel_sizes = (
[self.kernel_size] * len(self.units)
if isinstance(self.kernel_size, int)
else self.kernel_size
)
self._layers = self._get_layers()
def _get_layers(self) -> nn.Module:
layers = nn.Sequential()
filter_iter = pairwise([self.input_channels] + self.units)
layer_iter = zip(filter_iter, self._kernel_sizes)
for i, ((in_ch, out_ch), ks) in enumerate(layer_iter):
layers.add_module(f"conv_{i}", self._get_conv_layer(in_ch, out_ch, ks, i))
layers.append(nn.Flatten())
if self.fc_units is not None:
flat_dim = self._get_flat_dim()
layers.add_module("fc", self._get_fc_layer(flat_dim, self.fc_units))
return layers
def _get_conv_layer(
self,
input_channels: int,
output_channels: int,
kernel_size: int,
num_layer: int,
) -> nn.Module:
conv_layer = nn.Sequential()
if num_layer > 0 and self.dropout > 0:
conv_layer.append(nn.Dropout1d(self.dropout))
conv_layer.append(
nn.Conv1d(
input_channels,
output_channels,
kernel_size,
dilation=self.dilation,
stride=self.stride,
bias=not self.batch_norm,
padding=self._get_padding(),
)
)
if self.batch_norm:
conv_layer.append(nn.BatchNorm1d(output_channels))
conv_layer.append(self.act_func())
return conv_layer
def _get_fc_layer(self, input_units: int, output_units: int) -> nn.Module:
fc_layer = nn.Sequential()
if self.fc_dropout > 0:
fc_layer.append(nn.Dropout(self.fc_dropout))
fc_layer.append(nn.Linear(input_units, output_units))
fc_layer.append(self.fc_act_func())
return fc_layer
def _get_padding(self) -> str:
return "same" if self.padding else "valid"
def _get_flat_dim(self) -> int:
after_conv = self.seq_len
for kernel_size in self._kernel_sizes:
cut_off = 0 if self.padding else self.dilation * (kernel_size - 1)
after_conv = (after_conv - cut_off - 1) // self.stride + 1
flat_dim = after_conv * self.units[-1]
return flat_dim
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
return self._layers(inputs) | /rul_adapt-0.2.0-py3-none-any.whl/rul_adapt/model/cnn.py | 0.97631 | 0.796728 | cnn.py | pypi |
from copy import deepcopy
from typing import Dict, List, Optional, Tuple, Any, Callable
import numpy as np
import pytorch_lightning as pl
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset, get_worker_info
from rul_datasets import utils
from rul_datasets.reader import AbstractReader
class RulDataModule(pl.LightningDataModule):
"""
A [data module][pytorch_lightning.core.LightningDataModule] to provide windowed
time series features with RUL targets. It exposes the splits of the underlying
dataset for easy usage with PyTorch and PyTorch Lightning.
The data module implements the `hparams` property used by PyTorch Lightning to
save hyperparameters to checkpoints. It retrieves the hyperparameters of its
underlying reader and adds the batch size to them.
If you want to extract features from the windows, you can pass the
`feature_extractor` and `window_size` arguments to the constructor. The
`feature_extractor` is a callable that takes a windowed time series as a numpy
array with the shape `[num_windows, window_size, num_features]` and returns
another numpy array. Depending on `window_size`, the expected output shapes for
the `feature_extractor` are:
* `window_size is None`: `[num_new_windows, new_window_size, features]`
* `window_size is not None`: `[num_windows, features]`
If `window_size` is set, the extracted features are re-windowed.
Examples:
Default
>>> import rul_datasets
>>> cmapss = rul_datasets.reader.CmapssReader(fd=1)
>>> dm = rul_datasets.RulDataModule(cmapss, batch_size=32)
With Feature Extractor
>>> import rul_datasets
>>> import numpy as np
>>> cmapss = rul_datasets.reader.CmapssReader(fd=1)
>>> dm = rul_datasets.RulDataModule(
... cmapss,
... batch_size=32,
... feature_extractor=lambda x: np.mean(x, axis=1),
... window_size=10
... )
"""
_data: Dict[str, Tuple[torch.Tensor, torch.Tensor]]
def __init__(
self,
reader: AbstractReader,
batch_size: int,
feature_extractor: Optional[Callable] = None,
window_size: Optional[int] = None,
):
"""
Create a new RUL data module from a reader.
This data module exposes a training, validation and test data loader for the
underlying dataset. First, `prepare_data` is called to download and
pre-process the dataset. Afterwards, `setup_data` is called to load all
splits into memory.
If a `feature_extractor` is supplied, the data module extracts new features
from each window of the time series. If `window_size` is `None`,
it is assumed that the extracted features form a new windows themselves. If
`window_size` is an int, it is assumed that the extracted features are a
single feature vectors and should be re-windowed. The expected output shapes
for the `feature_extractor` are:
* `window_size is None`: `[num_new_windows, new_window_size, features]`
* `window_size is not None`: `[num_windows, features]`
The expected input shape for the `feature_extractor` is always
`[num_windows, window_size, features]`.
Args:
reader: The dataset reader for the desired dataset, e.g. CmapssLoader.
batch_size: The size of the batches build by the data loaders.
feature_extractor: A feature extractor that extracts feature vectors from
windows.
window_size: The new window size to apply after the feature extractor.
"""
super().__init__()
self._reader: AbstractReader = reader
self.batch_size = batch_size
self.feature_extractor = feature_extractor
self.window_size = window_size
if (self.feature_extractor is None) and (self.window_size is not None):
raise ValueError(
"A feature extractor has to be supplied "
"to set a window size for re-windowing."
)
hparams = deepcopy(self.reader.hparams)
hparams["batch_size"] = self.batch_size
hparams["feature_extractor"] = (
str(self.feature_extractor) if self.feature_extractor else None
)
hparams["window_size"] = self.window_size or hparams["window_size"]
self.save_hyperparameters(hparams)
@property
def data(self) -> Dict[str, Tuple[torch.Tensor, torch.Tensor]]:
"""
A dictionary of the training, validation and test splits.
Each split is a tuple of feature and target tensors.
The keys are `dev` (training split), `val` (validation split) and `test`
(test split).
"""
return self._data
@property
def reader(self) -> AbstractReader:
"""The underlying dataset reader."""
return self._reader
@property
def fds(self):
"""Index list of the available subsets of the underlying dataset, i.e.
`[1, 2, 3, 4]` for `CMAPSS`."""
return self._reader.fds
def check_compatibility(self, other: "RulDataModule") -> None:
"""
Check if another RulDataModule is compatible to be used together with this one.
RulDataModules can be used together in higher-order data modules,
e.g. AdaptionDataModule. This function checks if `other` is compatible to
this data module to do so. It checks the underlying dataset readers, matching
batch size, feature extractor and window size. If anything is incompatible,
this function will raise a ValueError.
Args:
other: The RulDataModule to check compatibility with.
"""
try:
self.reader.check_compatibility(other.reader)
except ValueError:
raise ValueError("RulDataModules incompatible on reader level.")
if not self.batch_size == other.batch_size:
raise ValueError(
f"The batch size of both data modules has to be the same, "
f"{self.batch_size} vs. {other.batch_size}."
)
if not self.window_size == other.window_size:
raise ValueError(
f"The window size of both data modules has to be the same, "
f"{self.window_size} vs. {other.window_size}."
)
def is_mutually_exclusive(self, other: "RulDataModule") -> bool:
"""
Check if the other data module is mutually exclusive to this one. See
[AbstractReader.is_mutually_exclusive]
[rul_datasets.reader.abstract.AbstractReader.is_mutually_exclusive].
Args:
other: Data module to check exclusivity against.
Returns:
Whether both data modules are mutually exclusive.
"""
return self.reader.is_mutually_exclusive(other.reader)
def prepare_data(self, *args: Any, **kwargs: Any) -> None:
"""
Download and pre-process the underlying data.
This calls the `prepare_data` function of the underlying reader. All
previously completed preparation steps are skipped. It is called
automatically by `pytorch_lightning` and executed on the first GPU in
distributed mode.
Args:
*args: Ignored. Only for adhering to parent class interface.
**kwargs: Ignored. Only for adhering to parent class interface.
"""
self.reader.prepare_data()
def setup(self, stage: Optional[str] = None) -> None:
"""
Load all splits as tensors into memory and optionally apply feature extractor.
The splits are placed inside the [data][rul_datasets.core.RulDataModule.data]
property. If a split is empty, a tuple of empty tensors with the correct
number of dimensions is created as a placeholder. This ensures compatibility
with higher-order data modules.
If the data module was constructed with a `feature_extractor` argument,
the feature windows are passed to the feature extractor. The resulting,
new features may be re-windowed.
Args:
stage: Ignored. Only for adhering to parent class interface.
"""
self._data = {
"dev": self._setup_split("dev"),
"val": self._setup_split("val"),
"test": self._setup_split("test"),
}
def load_split(
self, split: str, alias: Optional[str] = None
) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
"""
Load a split from the underlying reader and apply the feature extractor.
By setting alias, it is possible to load a split aliased as another split,
e.g. load the test split and treat it as the dev split. The data of the split is
loaded but all pre-processing steps of alias are carried out.
Args:
split: The desired split to load.
alias: The split as which the loaded data should be treated.
Returns:
The feature and target tensors of the split's runs.
"""
features, targets = self.reader.load_split(split, alias)
features, targets = self._apply_feature_extractor_per_run(features, targets)
tensor_features, tensor_targets = utils.to_tensor(features, targets)
return tensor_features, tensor_targets
def _setup_split(
self, split: str, alias: Optional[str] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
features, targets = self.load_split(split, alias)
if features:
cat_features, cat_targets = torch.cat(features), torch.cat(targets)
else:
cat_features, cat_targets = torch.empty(0, 0, 0), torch.empty(0)
return cat_features, cat_targets
def _apply_feature_extractor_per_run(
self, features: List[np.ndarray], targets: List[np.ndarray]
) -> Tuple[List[np.ndarray], List[np.ndarray]]:
extracted = [self._extract_and_window(f, t) for f, t in zip(features, targets)]
features, targets = zip(*extracted) if extracted else ((), ())
return list(features), list(targets)
def _extract_and_window(
self, features: np.ndarray, targets: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
if self.feature_extractor is not None:
features, targets = self.feature_extractor(features, targets)
if self.window_size is not None:
cutoff = self.window_size - 1
features = utils.extract_windows(features, self.window_size)
targets = targets[cutoff:]
return features, targets
def train_dataloader(self, *args: Any, **kwargs: Any) -> DataLoader:
"""
Create a [data loader][torch.utils.data.DataLoader] for the training split.
The data loader is configured to shuffle the data. The `pin_memory` option is
activated to achieve maximum transfer speed to the GPU. The data loader is also
configured to drop the last batch of the data if it would only contain one
sample.
The whole split is held in memory. Therefore, the `num_workers` are set to
zero which uses the main process for creating batches.
Args:
*args: Ignored. Only for adhering to parent class interface.
**kwargs: Ignored. Only for adhering to parent class interface.
Returns:
The training data loader
"""
dataset = self.to_dataset("dev")
drop_last = len(dataset) % self.batch_size == 1
loader = DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=True,
drop_last=drop_last,
pin_memory=True,
)
return loader
def val_dataloader(self, *args: Any, **kwargs: Any) -> DataLoader:
"""
Create a [data loader][torch.utils.data.DataLoader] for the validation split.
The data loader is configured to leave the data unshuffled. The `pin_memory`
option is activated to achieve maximum transfer speed to the GPU.
The whole split is held in memory. Therefore, the `num_workers` are set to
zero which uses the main process for creating batches.
Args:
*args: Ignored. Only for adhering to parent class interface.
**kwargs: Ignored. Only for adhering to parent class interface.
Returns:
The validation data loader
"""
return DataLoader(
self.to_dataset("val"),
batch_size=self.batch_size,
shuffle=False,
pin_memory=True,
)
def test_dataloader(self, *args: Any, **kwargs: Any) -> DataLoader:
"""
Create a [data loader][torch.utils.data.DataLoader] for the test split.
The data loader is configured to leave the data unshuffled. The `pin_memory`
option is activated to achieve maximum transfer speed to the GPU.
The whole split is held in memory. Therefore, the `num_workers` are set to
zero which uses the main process for creating batches.
Args:
*args: Ignored. Only for adhering to parent class interface.
**kwargs: Ignored. Only for adhering to parent class interface.
Returns:
The test data loader
"""
return DataLoader(
self.to_dataset("test"),
batch_size=self.batch_size,
shuffle=False,
pin_memory=True,
)
def to_dataset(self, split: str, alias: Optional[str] = None) -> TensorDataset:
"""
Create a dataset of a split.
This convenience function creates a plain [tensor dataset]
[torch.utils.data.TensorDataset] to use outside the `rul_datasets` library.
The data placed inside the dataset will be from the specified `split`. If
`alias` is set, the loaded data will be treated as if from the `alias` split.
For example, one could load the test data and treat them as if it was the
training data. This may be useful for inductive domain adaption.
Args:
split: The split to place inside the dataset.
alias: The split the loaded data should be treated as.
Returns:
A dataset containing the requested split.
"""
if (alias is None) or (split == alias):
features, targets = self._data[split]
else:
features, targets = self._setup_split(split, alias)
split_dataset = TensorDataset(features, targets)
return split_dataset
class PairedRulDataset(IterableDataset):
"""A dataset of sample pairs drawn from the same time series."""
def __init__(
self,
readers: List[AbstractReader],
split: str,
num_samples: int,
min_distance: int,
deterministic: bool = False,
mode: str = "linear",
):
super().__init__()
self.readers = readers
self.split = split
self.min_distance = min_distance
self.num_samples = num_samples
self.deterministic = deterministic
self.mode = mode
for reader in self.readers:
reader.check_compatibility(self.readers[0])
self._run_domain_idx: np.ndarray
self._features: List[np.ndarray]
self._labels: List[np.ndarray]
self._prepare_datasets()
self._max_rul = self._get_max_rul()
self._curr_iter = 0
self._rng = self._reset_rng()
if mode == "linear":
self._get_pair_func = self._get_pair_idx
elif mode == "piecewise":
self._get_pair_func = self._get_pair_idx_piecewise
elif mode == "labeled":
self._get_pair_func = self._get_labeled_pair_idx
def _get_max_rul(self):
max_ruls = [reader.max_rul for reader in self.readers]
if any(m is None for m in max_ruls):
raise ValueError(
"PairedRulDataset needs a set max_rul for all readers "
"but at least one of them has is None."
)
max_rul = max(max_ruls)
return max_rul
def _prepare_datasets(self):
run_domain_idx = []
features = []
labels = []
for domain_idx, reader in enumerate(self.readers):
run_features, run_labels = reader.load_split(self.split)
for feat, lab in zip(run_features, run_labels):
if len(feat) > self.min_distance:
run_domain_idx.append(domain_idx)
features.append(feat)
labels.append(lab)
self._run_domain_idx = np.array(run_domain_idx)
self._features = features
self._labels = labels
def _reset_rng(self, seed=42) -> np.random.Generator:
return np.random.default_rng(seed=seed)
def __len__(self) -> int:
return self.num_samples
def __iter__(self):
self._curr_iter = 0
worker_info = get_worker_info()
if self.deterministic and worker_info is not None:
raise RuntimeError(
"PairedDataset cannot run deterministic in multiprocessing"
)
elif self.deterministic:
self._rng = self._reset_rng()
elif worker_info is not None:
self._rng = self._reset_rng(worker_info.seed)
return self
def __next__(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
if self._curr_iter < self.num_samples:
run_idx, anchor_idx, query_idx, dist, domain_label = self._get_pair_func()
self._curr_iter += 1
run = self._features[run_idx]
return self._build_pair(run, anchor_idx, query_idx, dist, domain_label)
else:
raise StopIteration
def _get_pair_idx(self) -> Tuple[int, int, int, int, int]:
chosen_run_idx = self._rng.integers(0, len(self._features))
domain_label = self._run_domain_idx[chosen_run_idx]
chosen_run = self._features[chosen_run_idx]
run_length = chosen_run.shape[0]
anchor_idx = self._rng.integers(
low=0,
high=run_length - self.min_distance,
)
end_idx = min(run_length, anchor_idx + self._max_rul)
query_idx = self._rng.integers(
low=anchor_idx + self.min_distance,
high=end_idx,
)
distance = query_idx - anchor_idx
return chosen_run_idx, anchor_idx, query_idx, distance, domain_label
def _get_pair_idx_piecewise(self) -> Tuple[int, int, int, int, int]:
chosen_run_idx = self._rng.integers(0, len(self._features))
domain_label = self._run_domain_idx[chosen_run_idx]
chosen_run = self._features[chosen_run_idx]
run_length = chosen_run.shape[0]
middle_idx = run_length // 2
anchor_idx = self._rng.integers(
low=0,
high=run_length - self.min_distance,
)
end_idx = (
middle_idx if anchor_idx < (middle_idx - self.min_distance) else run_length
)
query_idx = self._rng.integers(
low=anchor_idx + self.min_distance,
high=end_idx,
)
distance = query_idx - anchor_idx if anchor_idx > middle_idx else 0
return chosen_run_idx, anchor_idx, query_idx, distance, domain_label
def _get_labeled_pair_idx(self) -> Tuple[int, int, int, int, int]:
chosen_run_idx = self._rng.integers(0, len(self._features))
domain_label = self._run_domain_idx[chosen_run_idx]
chosen_run = self._features[chosen_run_idx]
chosen_labels = self._labels[chosen_run_idx]
run_length = chosen_run.shape[0]
anchor_idx = self._rng.integers(
low=0,
high=run_length - self.min_distance,
)
query_idx = self._rng.integers(
low=anchor_idx + self.min_distance,
high=run_length,
)
# RUL label difference is negative time step difference
distance = int(chosen_labels[anchor_idx] - chosen_labels[query_idx])
return chosen_run_idx, anchor_idx, query_idx, distance, domain_label
def _build_pair(
self,
run: np.ndarray,
anchor_idx: int,
query_idx: int,
distance: int,
domain_label: int,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
anchors = utils.feature_to_tensor(run[anchor_idx], torch.float)
queries = utils.feature_to_tensor(run[query_idx], torch.float)
domain_tensor = torch.tensor(domain_label, dtype=torch.float)
distances = torch.tensor(distance, dtype=torch.float) / self._max_rul
distances = torch.clamp_max(distances, max=1) # max distance is max_rul
return anchors, queries, distances, domain_tensor | /rul_datasets-0.10.5.tar.gz/rul_datasets-0.10.5/rul_datasets/core.py | 0.954594 | 0.849566 | core.py | pypi |
import warnings
from typing import Any, Optional
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from rul_datasets.adaption import AdaptionDataset
from rul_datasets.core import RulDataModule
class SemiSupervisedDataModule(pl.LightningDataModule):
"""
A higher-order [data module][pytorch_lightning.core.LightningDataModule] used for
semi-supervised learning with a labeled data module and an unlabeled one. It
makes sure that both data modules come from the same sub-dataset.
Examples:
>>> import rul_datasets
>>> fd1 = rul_datasets.CmapssReader(fd=1, window_size=20, percent_fail_runs=0.5)
>>> fd1_complement = fd1.get_complement(percent_broken=0.8)
>>> labeled = rul_datasets.RulDataModule(fd1, 32)
>>> unlabeled = rul_datasets.RulDataModule(fd1_complement, 32)
>>> dm = rul_datasets.SemiSupervisedDataModule(labeled, unlabeled)
>>> train_ssl = dm.train_dataloader()
>>> val = dm.val_dataloader()
>>> test = dm.test_dataloader()
"""
def __init__(self, labeled: RulDataModule, unlabeled: RulDataModule) -> None:
"""
Create a new semi-supervised data module from a labeled and unlabeled
[RulDataModule][rul_datasets.RulDataModule].
The both data modules are checked for compatability (see[RulDataModule]
[rul_datasets.core.RulDataModule.check_compatibility]). These
checks include that the `fd` match between them.
Args:
labeled: The data module of the labeled dataset.
unlabeled: The data module of the unlabeled dataset.
"""
super().__init__()
self.labeled = labeled
self.unlabeled = unlabeled
self.batch_size = labeled.batch_size
self._check_compatibility()
self.save_hyperparameters(
{
"fd": self.labeled.reader.fd,
"batch_size": self.batch_size,
"window_size": self.labeled.reader.window_size,
"max_rul": self.labeled.reader.max_rul,
"percent_broken_unlabeled": self.unlabeled.reader.percent_broken,
"percent_fail_runs_labeled": self.labeled.reader.percent_fail_runs,
}
)
def _check_compatibility(self) -> None:
self.labeled.check_compatibility(self.unlabeled)
if not self.labeled.reader.fd == self.unlabeled.reader.fd:
raise ValueError(
"FD of source and target has to be the same for "
"semi-supervised learning, but they are "
f"{self.labeled.reader.fd} and {self.unlabeled.reader.fd}."
)
if self.unlabeled.reader.percent_broken is None:
warnings.warn(
"The unlabeled data is not truncated by 'percent_broken'."
"This may lead to unrealistically good results."
"If this was intentional, please set `percent_broken` "
"to 1.0 to silence this warning."
)
if not self.labeled.is_mutually_exclusive(self.unlabeled):
warnings.warn(
"The data modules are not mutually exclusive. "
"This means there is an overlap between labeled and "
"unlabeled data, which should not be that case for "
"semi-supervised learning. You can check this by calling "
"'is_mutually_exclusive' on a reader or RulDataModule."
)
def prepare_data(self, *args: Any, **kwargs: Any) -> None:
"""
Download and pre-process the underlying data.
This calls the `prepare_data` function for source and target domain. All
previously completed preparation steps are skipped. It is called
automatically by `pytorch_lightning` and executed on the first GPU in
distributed mode.
Args:
*args: Passed down to each data module's `prepare_data` function.
**kwargs: Passed down to each data module's `prepare_data` function..
"""
self.labeled.prepare_data(*args, **kwargs)
self.unlabeled.prepare_data(*args, **kwargs)
def setup(self, stage: Optional[str] = None) -> None:
"""
Load labeled and unlabeled data into memory.
Args:
stage: Passed down to each data module's `setup` function.
"""
self.labeled.setup(stage)
self.unlabeled.setup(stage)
def train_dataloader(self, *args: Any, **kwargs: Any) -> DataLoader:
"""
Create a data loader of an [AdaptionDataset]
[rul_datasets.adaption.AdaptionDataset] using labeled and unlabeled.
The data loader is configured to shuffle the data. The `pin_memory` option is
activated to achieve maximum transfer speed to the GPU.
Args:
*args: Ignored. Only for adhering to parent class interface.
**kwargs: Ignored. Only for adhering to parent class interface.
Returns:
The training data loader
"""
return DataLoader(
self._to_dataset("dev"),
batch_size=self.batch_size,
shuffle=True,
pin_memory=True,
)
def val_dataloader(self, *args: Any, **kwargs: Any) -> DataLoader:
"""
Create a data loader of the labeled validation data.
Args:
*args: Ignored. Only for adhering to parent class interface.
**kwargs: Ignored. Only for adhering to parent class interface.
Returns:
The labeled validation data loader.
"""
return self.labeled.val_dataloader()
def test_dataloader(self, *args: Any, **kwargs: Any) -> DataLoader:
"""
Create a data loader of the labeled test data.
Args:
*args: Ignored. Only for adhering to parent class interface.
**kwargs: Ignored. Only for adhering to parent class interface.
Returns:
The labeled test data loader.
"""
return self.labeled.test_dataloader()
def _to_dataset(self, split: str) -> "AdaptionDataset":
labeled = self.labeled.to_dataset(split)
unlabeled = self.unlabeled.to_dataset(split)
dataset = AdaptionDataset(labeled, unlabeled)
return dataset | /rul_datasets-0.10.5.tar.gz/rul_datasets-0.10.5/rul_datasets/ssl.py | 0.9274 | 0.81119 | ssl.py | pypi |
import warnings
from copy import deepcopy
from typing import List, Optional, Any, Tuple, Callable, Sequence, Union, cast
import numpy as np
import pytorch_lightning as pl
import torch
from torch.utils.data import DataLoader, Dataset
from torch.utils.data.dataset import ConcatDataset, TensorDataset
from rul_datasets import utils
from rul_datasets.core import PairedRulDataset, RulDataModule
class DomainAdaptionDataModule(pl.LightningDataModule):
"""
A higher-order [data module][pytorch_lightning.core.LightningDataModule] used for
unsupervised domain adaption of a labeled source to an unlabeled target domain.
The training data of both domains is wrapped in a [AdaptionDataset]
[rul_datasets.adaption.AdaptionDataset] which provides a random sample of the
target domain with each sample of the source domain. It provides the validation and
test splits of both domains, and optionally a [paired dataset]
[rul_datasets.core.PairedRulDataset] for both.
Examples:
>>> import rul_datasets
>>> fd1 = rul_datasets.CmapssReader(fd=1, window_size=20)
>>> fd2 = rul_datasets.CmapssReader(fd=2, percent_broken=0.8)
>>> source = rul_datasets.RulDataModule(fd1, 32)
>>> target = rul_datasets.RulDataModule(fd2, 32)
>>> dm = rul_datasets.DomainAdaptionDataModule(source, target)
>>> train_1_2 = dm.train_dataloader()
>>> val_1, val_2 = dm.val_dataloader()
>>> test_1, test_2 = dm.test_dataloader()
"""
def __init__(
self,
source: RulDataModule,
target: RulDataModule,
paired_val: bool = False,
inductive: bool = False,
) -> None:
"""
Create a new domain adaption data module from a source and target
[RulDataModule][rul_datasets.RulDataModule]. The source domain is considered
labeled and the target domain unlabeled.
The source and target data modules are checked for compatability (see
[RulDataModule][rul_datasets.core.RulDataModule.check_compatibility]). These
checks include that the `fd` differs between them, as they come from the same
domain otherwise.
Args:
source: The data module of the labeled source domain.
target: The data module of the unlabeled target domain.
paired_val: Whether to include paired data in validation.
"""
super().__init__()
self.source = source
self.target = target
self.paired_val = paired_val
self.batch_size = source.batch_size
self.inductive = inductive
self.target_truncated = deepcopy(self.target.reader)
self.target_truncated.truncate_val = True
self._check_compatibility()
self.save_hyperparameters(
{
"fd_source": self.source.reader.fd,
"fd_target": self.target.reader.fd,
"batch_size": self.batch_size,
"window_size": self.source.reader.window_size,
"max_rul": self.source.reader.max_rul,
"percent_broken": self.target.reader.percent_broken,
"percent_fail_runs": self.target.reader.percent_fail_runs,
}
)
def _check_compatibility(self):
self.source.check_compatibility(self.target)
self.target.reader.check_compatibility(self.target_truncated)
if self.source.reader.fd == self.target.reader.fd:
raise ValueError(
f"FD of source and target has to be different for "
f"domain adaption, but is {self.source.reader.fd} both times."
)
if self.target.reader.percent_broken is None:
warnings.warn(
"The target domain is not truncated by 'percent_broken'."
"This may lead to unrealistically good results."
"If this was intentional, please set `percent_broken` "
"to 1.0 to silence this warning."
)
def prepare_data(self, *args: Any, **kwargs: Any) -> None:
"""
Download and pre-process the underlying data.
This calls the `prepare_data` function for source and target domain. All
previously completed preparation steps are skipped. It is called
automatically by `pytorch_lightning` and executed on the first GPU in
distributed mode.
Args:
*args: Passed down to each data module's `prepare_data` function.
**kwargs: Passed down to each data module's `prepare_data` function..
"""
self.source.prepare_data(*args, **kwargs)
self.target.prepare_data(*args, **kwargs)
def setup(self, stage: Optional[str] = None) -> None:
"""
Load source and target domain into memory.
Args:
stage: Passed down to each data module's `setup` function.
"""
self.source.setup(stage)
self.target.setup(stage)
def train_dataloader(self, *args: Any, **kwargs: Any) -> DataLoader:
"""
Create a data loader of an [AdaptionDataset]
[rul_datasets.adaption.AdaptionDataset] using source and target domain.
The data loader is configured to shuffle the data. The `pin_memory` option is
activated to achieve maximum transfer speed to the GPU.
Args:
*args: Ignored. Only for adhering to parent class interface.
**kwargs: Ignored. Only for adhering to parent class interface.
Returns:
The training data loader
"""
return DataLoader(
self._get_training_dataset(),
batch_size=self.batch_size,
shuffle=True,
pin_memory=True,
)
def val_dataloader(self, *args: Any, **kwargs: Any) -> List[DataLoader]:
"""
Create a data loader of the source, target and paired validation data.
By default, two data loaders are returned, which correspond to the source
and the target validation data loader. An optional third is a data loader of a
[PairedRulDataset][rul_datasets.core.PairedRulDataset] using both source and
target is returned if `paired_val` was set to `True` in the constructor.
Args:
*args: Ignored. Only for adhering to parent class interface.
**kwargs: Ignored. Only for adhering to parent class interface.
Returns:
The source, target and an optional paired validation data loader.
"""
loaders = [
self.source.val_dataloader(*args, **kwargs),
self.target.val_dataloader(*args, **kwargs),
]
if self.paired_val:
loaders.append(
DataLoader(
self._get_paired_dataset(),
batch_size=self.batch_size,
pin_memory=True,
)
)
return loaders
def test_dataloader(self, *args: Any, **kwargs: Any) -> List[DataLoader]:
"""
Create a data loader of the source and target test data.
The data loaders are the return values of `source.test_dataloader`
and `target.test_dataloader`.
Args:
*args: Ignored. Only for adhering to parent class interface.
**kwargs: Ignored. Only for adhering to parent class interface.
Returns:
The source and target test data loader.
"""
return [
self.source.test_dataloader(*args, **kwargs),
self.target.test_dataloader(*args, **kwargs),
]
def _get_training_dataset(self) -> "AdaptionDataset":
source = self.source.to_dataset("dev")
target = self.target.to_dataset(
"test" if self.inductive else "dev", alias="dev"
)
dataset = AdaptionDataset(source, target)
return dataset
def _get_paired_dataset(self) -> PairedRulDataset:
paired = PairedRulDataset(
[self.target_truncated],
"val",
num_samples=25000,
min_distance=1,
deterministic=True,
)
return paired
class LatentAlignDataModule(DomainAdaptionDataModule):
"""
A higher-order [data module][pytorch_lightning.core.LightningDataModule] based on
[DomainAdaptionDataModule][rul_datasets.adaption.DomainAdaptionDataModule].
It is specifically made to work with the latent space alignment approach by Zhang
et al. The training data of both domains is wrapped in a [AdaptionDataset]
[rul_datasets.adaption.AdaptionDataset] which splits the data into healthy and
degrading. For each sample of degrading source data, a random sample of degrading
target data and healthy sample of either source or target data is drawn. The
number of steps in degradation are supplied for each degrading sample, as well.
The data module also provides the validation and test splits of both domains, and
optionally a [paired dataset][rul_datasets.core.PairedRulDataset] for both.
Examples:
>>> import rul_datasets
>>> fd1 = rul_datasets.CmapssReader(fd=1, window_size=20)
>>> fd2 = rul_datasets.CmapssReader(fd=2, percent_broken=0.8)
>>> source = rul_datasets.RulDataModule(fd1, 32)
>>> target = rul_datasets.RulDataModule(fd2, 32)
>>> dm = rul_datasets.LatentAlignDataModule(source, target)
>>> train_1_2 = dm.train_dataloader()
>>> val_1, val_2 = dm.val_dataloader()
>>> test_1, test_2 = dm.test_dataloader()
"""
def __init__(
self,
source: RulDataModule,
target: RulDataModule,
paired_val: bool = False,
inductive: bool = False,
split_by_max_rul: bool = False,
split_by_steps: Optional[int] = None,
) -> None:
"""
Create a new latent align data module from a source and target
[RulDataModule][rul_datasets.RulDataModule]. The source domain is considered
labeled and the target domain unlabeled.
The source and target data modules are checked for compatability (see
[RulDataModule][rul_datasets.core.RulDataModule.check_compatibility]). These
checks include that the `fd` differs between them, as they come from the same
domain otherwise.
The healthy and degrading data can be split by either maximum RUL value or
the number of time steps. See [split_healthy]
[rul_datasets.adaption.split_healthy] for more information.
Args:
source: The data module of the labeled source domain.
target: The data module of the unlabeled target domain.
paired_val: Whether to include paired data in validation.
split_by_max_rul: Whether to split healthy and degrading by max RUL value.
split_by_steps: Split the healthy and degrading data after this number of
time steps.
"""
super().__init__(source, target, paired_val, inductive)
if not split_by_max_rul and (split_by_steps is None):
raise ValueError(
"Either 'split_by_max_rul' or 'split_by_steps' need to be set."
)
self.split_by_max_rul = split_by_max_rul
self.split_by_steps = split_by_steps
def _get_training_dataset(self) -> "AdaptionDataset":
source_healthy, source_degraded = split_healthy(
*self.source.load_split("dev"), by_max_rul=True
)
target_healthy, target_degraded = split_healthy(
*self.target.load_split("test" if self.inductive else "dev", alias="dev"),
self.split_by_max_rul,
self.split_by_steps,
)
healthy: Dataset = ConcatDataset([source_healthy, target_healthy])
dataset = AdaptionDataset(source_degraded, target_degraded, healthy)
return dataset
def split_healthy(
features: Union[List[np.ndarray], List[torch.Tensor]],
targets: Union[List[np.ndarray], List[torch.Tensor]],
by_max_rul: bool = False,
by_steps: Optional[int] = None,
) -> Tuple[TensorDataset, TensorDataset]:
"""
Split the feature and target time series into healthy and degrading parts and
return a dataset of each.
If `by_max_rul` is set to `True` the time steps with the maximum RUL value in
each time series is considered healthy. This option is intended for labeled data
with piece-wise linear RUL functions. If `by_steps` is set to an integer,
the first `by_steps` time steps of each series are considered healthy. This
option is intended for unlabeled data or data with a linear RUL function.
One option has to be set and both are mutually exclusive.
Args:
features: List of feature time series.
targets: List of target time series.
by_max_rul: Whether to split healthy and degrading data by max RUL value.
by_steps: Split healthy and degrading data after this number of time steps.
Returns:
healthy: Dataset of healthy data.
degrading: Dataset of degrading data.
"""
if not by_max_rul and (by_steps is None):
raise ValueError("Either 'by_max_rul' or 'by_steps' need to be set.")
if isinstance(features[0], np.ndarray):
features, targets = cast(Tuple[List[np.ndarray], ...], (features, targets))
_features, _targets = utils.to_tensor(features, targets)
else:
_features, _targets = cast(Tuple[List[torch.Tensor], ...], (features, targets))
healthy = []
degraded = []
for feature, target in zip(_features, _targets):
sections = _get_sections(by_max_rul, by_steps, target)
healthy_feat, degraded_feat = torch.split(feature, sections)
healthy_target, degraded_target = torch.split(target, sections)
degradation_steps = torch.arange(1, len(degraded_target) + 1)
healthy.append((healthy_feat, healthy_target))
degraded.append((degraded_feat, degradation_steps, degraded_target))
healthy_dataset = _to_dataset(healthy)
degraded_dataset = _to_dataset(degraded)
return healthy_dataset, degraded_dataset
def _get_sections(
by_max_rul: bool, by_steps: Optional[int], target: torch.Tensor
) -> List[int]:
# cast is needed for mypy and has no runtime effect
if by_max_rul:
split_idx = cast(int, target.flip(0).argmax().item())
sections = [len(target) - split_idx, split_idx]
else:
by_steps = min(cast(int, by_steps), len(target))
sections = [by_steps, len(target) - by_steps]
return sections
def _to_dataset(data: Sequence[Tuple[torch.Tensor, ...]]) -> TensorDataset:
tensor_data = [torch.cat(h) for h in zip(*data)]
dataset = TensorDataset(*tensor_data)
return dataset
class AdaptionDataset(Dataset):
"""
A torch [dataset][torch.utils.data.Dataset] for unsupervised domain adaption. The
dataset takes a labeled source and one or multiple unlabeled target [dataset]
[torch.utils.data.Dataset] and combines them.
For each label/features pair from the source dataset, a random sample of features
is drawn from each target dataset. The datasets are supposed to provide a sample
as a tuple of tensors. The target datasets' labels are assumed to be the last
element of the tuple and are omitted. The datasets length is determined by the
source dataset. This setup can be used to train with common unsupervised domain
adaption methods like DAN, DANN or JAN.
Examples:
>>> import torch
>>> import rul_datasets
>>> source = torch.utils.data.TensorDataset(torch.randn(10), torch.randn(10))
>>> target = torch.utils.data.TensorDataset(torch.randn(10), torch.randn(10))
>>> dataset = rul_datasets.adaption.AdaptionDataset(source, target)
>>> source_features, source_label, target_features = dataset[0]
"""
_unlabeled_idx: np.ndarray
_get_unlabeled_idx: Callable
def __init__(
self, labeled: Dataset, *unlabeled: Dataset, deterministic: bool = False
) -> None:
"""
Create a new adaption data set from a labeled source and one or multiple
unlabeled target dataset.
By default, a random sample is drawn from each target dataset when a source
sample is accessed. This is the recommended setting for training. To
deactivate this behavior and fix the pairing of source and target samples,
set `deterministic` to `True`. This is the recommended setting for evaluation.
Args:
labeled: The dataset from the labeled domain.
*unlabeled: The dataset(s) from the unlabeled domain(s).
deterministic: Return the same target sample for each source sample.
"""
self.labeled = labeled
self.unlabeled = unlabeled
self.deterministic = deterministic
self._unlabeled_len = [len(ul) for ul in self.unlabeled] # type: ignore
if self.deterministic:
self._rng = np.random.default_rng(seed=42)
size = (len(self), len(self._unlabeled_len))
self._unlabeled_idx = self._rng.integers(0, self._unlabeled_len, size)
self._get_unlabeled_idx = self._get_deterministic_unlabeled_idx
else:
self._rng = np.random.default_rng()
self._get_unlabeled_idx = self._get_random_unlabeled_idx
def _get_random_unlabeled_idx(self, _: int) -> np.ndarray:
return self._rng.integers(0, self._unlabeled_len)
def _get_deterministic_unlabeled_idx(self, idx: int) -> np.ndarray:
return self._unlabeled_idx[idx]
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, ...]:
item = self.labeled[idx]
for unlabeled, ul_idx in zip(self.unlabeled, self._get_unlabeled_idx(idx)):
item += unlabeled[ul_idx][:-1] # drop label tensor in last position
return item
def __len__(self) -> int:
return len(self.labeled) # type: ignore
class PretrainingAdaptionDataModule(pl.LightningDataModule):
def __init__(
self,
source: RulDataModule,
target: RulDataModule,
num_samples: int,
min_distance: int = 1,
distance_mode: str = "linear",
):
super().__init__()
self.source = source
self.target = target
self.num_samples = num_samples
self.batch_size = source.batch_size
self.min_distance = min_distance
self.distance_mode = distance_mode
self.target_loader = self.target.reader
self.source_loader = self.source.reader
self._check_compatibility()
self.save_hyperparameters(
{
"fd_source": self.source_loader.fd,
"fd_target": self.target_loader.fd,
"num_samples": self.num_samples,
"batch_size": self.batch_size,
"window_size": self.source_loader.window_size,
"max_rul": self.source_loader.max_rul,
"min_distance": self.min_distance,
"percent_broken": self.target_loader.percent_broken,
"percent_fail_runs": self.target_loader.percent_fail_runs,
"truncate_target_val": self.target_loader.truncate_val,
"distance_mode": self.distance_mode,
}
)
def _check_compatibility(self):
self.source.check_compatibility(self.target)
if self.source_loader.fd == self.target_loader.fd:
raise ValueError(
f"FD of source and target has to be different for "
f"domain adaption, but is {self.source_loader.fd} bot times."
)
if (
self.target_loader.percent_broken is None
or self.target_loader.percent_broken == 1.0
):
raise ValueError(
"Target data needs a percent_broken smaller than 1 for pre-training."
)
if (
self.source_loader.percent_broken is not None
and self.source_loader.percent_broken < 1.0
):
raise ValueError(
"Source data cannot have a percent_broken smaller than 1, "
"otherwise it would not be failed, labeled data."
)
if not self.target_loader.truncate_val:
warnings.warn(
"Validation data of unfailed runs is not truncated. "
"The validation metrics will not be valid."
)
def prepare_data(self, *args, **kwargs):
self.source_loader.prepare_data()
self.target_loader.prepare_data()
def setup(self, stage: Optional[str] = None):
self.source.setup(stage)
self.target.setup(stage)
def train_dataloader(self, *args, **kwargs) -> DataLoader:
return DataLoader(
self._get_paired_dataset("dev"), batch_size=self.batch_size, pin_memory=True
)
def val_dataloader(self, *args, **kwargs) -> List[DataLoader]:
combined_loader = DataLoader(
self._get_paired_dataset("val"), batch_size=self.batch_size, pin_memory=True
)
source_loader = self.source.val_dataloader()
target_loader = self.target.val_dataloader()
return [combined_loader, source_loader, target_loader]
def _get_paired_dataset(self, split: str) -> PairedRulDataset:
deterministic = split == "val"
min_distance = 1 if split == "val" else self.min_distance
num_samples = 50000 if split == "val" else self.num_samples
paired = PairedRulDataset(
[self.source_loader, self.target_loader],
split,
num_samples,
min_distance,
deterministic,
mode=self.distance_mode,
)
return paired | /rul_datasets-0.10.5.tar.gz/rul_datasets-0.10.5/rul_datasets/adaption.py | 0.941251 | 0.749156 | adaption.py | pypi |
import warnings
from copy import deepcopy
from typing import List, Optional, Any
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from rul_datasets.core import PairedRulDataset, RulDataModule
class BaselineDataModule(pl.LightningDataModule):
"""
A higher-order [data module][pytorch_lightning.core.LightningDataModule] that
takes a [RulDataModule][rul_datasets.core.RulDataModule]. It provides the
training and validation splits of the sub-dataset selected in the underlying data
module but provides the test splits of all available subsets of the dataset. This
makes it easy to evaluate the generalization of a supervised model on all
sub-datasets.
Examples:
>>> import rul_datasets
>>> cmapss = rul_datasets.reader.CmapssReader(fd=1)
>>> dm = rul_datasets.RulDataModule(cmapss, batch_size=32)
>>> baseline_dm = rul_datasets.BaselineDataModule(dm)
>>> train_fd1 = baseline_dm.train_dataloader()
>>> val_fd1 = baseline_dm.val_dataloader()
>>> test_fd1, test_fd2, test_fd3, test_fd4 = baseline_dm.test_dataloader()
"""
def __init__(self, data_module: RulDataModule) -> None:
"""
Create a new baseline data module from a [RulDataModule]
[rul_datasets.RulDataModule].
It will provide a data loader of the underlying data module's training and
validation splits. Additionally, it provides a data loader of the test split
of all sub-datasets.
The data module keeps the configuration made in the underlying data module.
The same configuration is then passed on to create RulDataModules for all
sub-datasets, beside `percent_fail_runs` and `percent_broken`.
Args:
data_module: the underlying RulDataModule
"""
super().__init__()
self.data_module = data_module
hparams = self.data_module.hparams
self.save_hyperparameters(hparams)
self.subsets = {}
for fd in self.data_module.fds:
self.subsets[fd] = self._get_fd(fd)
def _get_fd(self, fd):
if fd == self.hparams["fd"]:
dm = self.data_module
else:
loader = deepcopy(self.data_module.reader)
loader.fd = fd
loader.percent_fail_runs = None
loader.percent_broken = None
dm = RulDataModule(loader, self.data_module.batch_size)
return dm
def prepare_data(self, *args: Any, **kwargs: Any) -> None:
"""
Download and pre-process the underlying data.
This calls the `prepare_data` function for all sub-datasets. All
previously completed preparation steps are skipped. It is called
automatically by `pytorch_lightning` and executed on the first GPU in
distributed mode.
Args:
*args: Passed down to each data module's `prepare_data` function.
**kwargs: Passed down to each data module's `prepare_data` function..
"""
for dm in self.subsets.values():
dm.prepare_data(*args, **kwargs)
def setup(self, stage: Optional[str] = None):
"""
Load all splits as tensors into memory.
Args:
stage: Passed down to each data module's `setup` function.
"""
for dm in self.subsets.values():
dm.setup(stage)
def train_dataloader(self, *args: Any, **kwargs: Any) -> DataLoader:
"""See [rul_datasets.core.RulDataModule.train_dataloader][]."""
return self.data_module.train_dataloader(*args, **kwargs)
def val_dataloader(self, *args: Any, **kwargs: Any) -> DataLoader:
"""See [rul_datasets.core.RulDataModule.val_dataloader][]."""
return self.data_module.val_dataloader(*args, **kwargs)
def test_dataloader(self, *args: Any, **kwargs: Any) -> List[DataLoader]:
"""
Return data loaders for all sub-datasets.
Args:
*args: Passed down to each data module.
**kwargs: Passed down to each data module.
Returns:
The test dataloaders of all sub-datasets.
"""
test_dataloaders = []
for fd_target in self.data_module.fds:
target_dl = self.subsets[fd_target].test_dataloader(*args, **kwargs)
test_dataloaders.append(target_dl)
return test_dataloaders
class PretrainingBaselineDataModule(pl.LightningDataModule):
def __init__(
self,
failed_data_module: RulDataModule,
unfailed_data_module: RulDataModule,
num_samples: int,
min_distance: int = 1,
distance_mode: str = "linear",
):
super().__init__()
self.failed_loader = failed_data_module.reader
self.unfailed_loader = unfailed_data_module.reader
self.num_samples = num_samples
self.batch_size = failed_data_module.batch_size
self.min_distance = min_distance
self.distance_mode = distance_mode
self.window_size = self.unfailed_loader.window_size
self.source = unfailed_data_module
self._check_loaders()
self.save_hyperparameters(
{
"fd_source": self.unfailed_loader.fd,
"num_samples": self.num_samples,
"batch_size": self.batch_size,
"window_size": self.window_size,
"max_rul": self.unfailed_loader.max_rul,
"min_distance": self.min_distance,
"percent_broken": self.unfailed_loader.percent_broken,
"percent_fail_runs": self.failed_loader.percent_fail_runs,
"truncate_val": self.unfailed_loader.truncate_val,
"distance_mode": self.distance_mode,
}
)
def _check_loaders(self):
self.failed_loader.check_compatibility(self.unfailed_loader)
if not self.failed_loader.fd == self.unfailed_loader.fd:
raise ValueError("Failed and unfailed data need to come from the same FD.")
if self.failed_loader.percent_fail_runs is None or isinstance(
self.failed_loader.percent_fail_runs, float
):
raise ValueError(
"Failed data needs list of failed runs "
"for pre-training but uses a float or is None."
)
if self.unfailed_loader.percent_fail_runs is None or isinstance(
self.unfailed_loader.percent_fail_runs, float
):
raise ValueError(
"Unfailed data needs list of failed runs "
"for pre-training but uses a float or is None."
)
if set(self.failed_loader.percent_fail_runs).intersection(
self.unfailed_loader.percent_fail_runs
):
raise ValueError(
"Runs of failed and unfailed data overlap. "
"Please use mututally exclusive sets of runs."
)
if (
self.unfailed_loader.percent_broken is None
or self.unfailed_loader.percent_broken == 1.0
):
raise ValueError(
"Unfailed data needs a percent_broken smaller than 1 for pre-training."
)
if (
self.failed_loader.percent_broken is not None
and self.failed_loader.percent_broken < 1.0
):
raise ValueError(
"Failed data cannot have a percent_broken smaller than 1, "
"otherwise it would not be failed data."
)
if not self.unfailed_loader.truncate_val:
warnings.warn(
"Validation data of unfailed runs is not truncated. "
"The validation metrics will not be valid."
)
def prepare_data(self, *args, **kwargs):
self.unfailed_loader.prepare_data()
def setup(self, stage: Optional[str] = None):
self.source.setup(stage)
def train_dataloader(self, *args, **kwargs) -> DataLoader:
return DataLoader(
self._get_paired_dataset("dev"), batch_size=self.batch_size, pin_memory=True
)
def val_dataloader(self, *args, **kwargs) -> List[DataLoader]:
combined_loader = DataLoader(
self._get_paired_dataset("val"), batch_size=self.batch_size, pin_memory=True
)
source_loader = self.source.val_dataloader()
return [combined_loader, source_loader]
def _get_paired_dataset(self, split: str) -> PairedRulDataset:
deterministic = split == "val"
min_distance = 1 if split == "val" else self.min_distance
num_samples = 25000 if split == "val" else self.num_samples
paired = PairedRulDataset(
[self.unfailed_loader, self.failed_loader],
split,
num_samples,
min_distance,
deterministic,
mode=self.distance_mode,
)
return paired | /rul_datasets-0.10.5.tar.gz/rul_datasets-0.10.5/rul_datasets/baseline.py | 0.907168 | 0.712657 | baseline.py | pypi |
import os
from typing import List, Optional, Callable, Dict, Tuple
import numpy as np
import requests # type: ignore
import torch
from tqdm import tqdm # type: ignore
def get_files_in_path(path: str, condition: Optional[Callable] = None) -> List[str]:
"""
Return the paths of all files in a path that satisfy a condition in alphabetical
order.
If the condition is `None` all files are returned.
Args:
path: the path to look into
condition: the include-condition for files
Returns:
all files that satisfy the condition in alphabetical order
"""
if condition is None:
feature_files = [f for f in os.listdir(path)]
else:
feature_files = [f for f in os.listdir(path) if condition(f)]
feature_files = sorted(os.path.join(path, f) for f in feature_files)
return feature_files
def get_targets_from_file_paths(
file_paths: Dict[int, List[str]], timestep_from_file_path: Callable
) -> Dict[int, np.ndarray]:
"""
Create the RUL targets based on the file paths of the feature files.
The function extracts the feature file path from each path. The supplied
conversion function extracts the time step from it. Afterwards the RUL is
calculated by subtracting each time step from the maximum time step plus 1.
Args:
file_paths: runs represented as dict of feature file paths
timestep_from_file_path: Function to convert a feature file path to a time step
Returns:
A list of RUL target arrays for each run
"""
targets = {}
for run_idx, run_files in file_paths.items():
run_targets = np.empty(len(run_files))
for i, file_path in enumerate(run_files):
run_targets[i] = timestep_from_file_path(file_path)
run_targets = np.max(run_targets) - run_targets + 1
targets[run_idx] = run_targets
return targets
def extract_windows(seq: np.ndarray, window_size: int) -> np.ndarray:
"""
Extract sliding windows from a sequence.
The step size is considered to be one, which results in `len(seq) - window_size +
1` extracted windows. The resulting array has the shape [num_windows, window_size,
num_channels].
Args:
seq: sequence to extract windows from
window_size: length of the sliding window
Returns:
array of sliding windows
"""
if window_size > len(seq):
raise ValueError(
f"Cannot extract windows of size {window_size} "
f"from a sequence of length {len(seq)}."
)
num_frames = seq.shape[0] - window_size + 1
window_idx = np.arange(window_size)[None, :] + np.arange(num_frames)[:, None]
windows = seq[window_idx]
return windows
def download_file(url: str, save_path: str) -> None:
response = requests.get(url, stream=True)
if not response.status_code == 200:
raise RuntimeError(f"Download failed. Server returned {response.status_code}")
content_len = int(response.headers["Content-Length"]) // 1024
with open(save_path, mode="wb") as f:
for data in tqdm(response.iter_content(chunk_size=1024), total=content_len):
f.write(data)
def to_tensor(
features: List[np.ndarray], *targets: List[np.ndarray]
) -> Tuple[List[torch.Tensor], ...]:
dtype = torch.float32
tensor_feats = [feature_to_tensor(f, dtype) for f in features]
tensor_targets = [
[torch.tensor(t, dtype=dtype) for t in target] for target in targets
]
return tensor_feats, *tensor_targets
def feature_to_tensor(features: np.ndarray, dtype: torch.dtype) -> torch.Tensor:
if len(features.shape) == 2:
return torch.tensor(features, dtype=dtype).permute(1, 0)
else:
return torch.tensor(features, dtype=dtype).permute(0, 2, 1) | /rul_datasets-0.10.5.tar.gz/rul_datasets-0.10.5/rul_datasets/utils.py | 0.932222 | 0.498108 | utils.py | pypi |
from typing import List, Tuple, Iterable, Union, Optional
import numpy as np
def truncate_runs(
features: List[np.ndarray],
targets: List[np.ndarray],
percent_broken: Optional[float] = None,
included_runs: Optional[Union[float, Iterable[int]]] = None,
degraded_only: bool = False,
) -> Tuple[List[np.ndarray], List[np.ndarray]]:
"""
Truncate RUL data according to `percent_broken` and `included_runs`.
RUL data has two dimensions in which it can be truncated: the number of runs and
the length of the runs. Truncating the number of runs limits the inter-run
variety of the data. Truncating the length of the run limits the amount of
available data near failure.
For more information about truncation, see the [reader][rul_datasets.reader]
module page.
Examples:
Truncating via `percent_broken`
>>> import numpy as np
>>> from rul_datasets.reader.truncating import truncate_runs
>>> features = [np.random.randn(i*100, 5) for i in range(1, 6)]
>>> targets = [np.arange(i*100)[::-1] for i in range(1, 6)]
>>> (features[0].shape, targets[0].shape)
((100, 5), (100,))
>>> features, targets = truncate_runs(features, targets, percent_broken=0.8)
>>> (features[0].shape, targets[0].shape) # runs are shorter
((80, 5), (80,))
>>> np.min(targets[0]) # runs contain no failures
20
"""
# Truncate the number of runs
if included_runs is not None:
features, targets = _truncate_included(features, targets, included_runs)
# Truncate the number of samples per run, starting at failure
if percent_broken is not None and percent_broken < 1:
features, targets = _truncate_broken(
features, targets, percent_broken, degraded_only
)
return features, targets
def _truncate_included(
features: List[np.ndarray],
targets: List[np.ndarray],
included_runs: Union[float, Iterable[int]],
) -> Tuple[List[np.ndarray], List[np.ndarray]]:
if isinstance(included_runs, float):
features, targets = _truncate_included_by_percentage(
features, targets, included_runs
)
elif isinstance(included_runs, Iterable):
features, targets = _truncate_included_by_index(
features, targets, included_runs
)
return features, targets
def _truncate_broken(
features: List[np.ndarray],
targets: List[np.ndarray],
percent_broken: float,
degraded_only: bool,
) -> Tuple[List[np.ndarray], List[np.ndarray]]:
features = features.copy() # avoid mutating original list
targets = targets.copy() # avoid mutating original list
for i, (run, target) in enumerate(zip(features, targets)):
if degraded_only:
num_healthy = np.sum(target == np.max(target))
num_degraded = len(run) - num_healthy
num_cycles = num_healthy + int(percent_broken * num_degraded)
else:
num_cycles = int(percent_broken * len(run))
features[i] = run[:num_cycles]
targets[i] = target[:num_cycles]
return features, targets
def _truncate_included_by_index(
features: List[np.ndarray], targets: List[np.ndarray], included_idx: Iterable[int]
) -> Tuple[List[np.ndarray], List[np.ndarray]]:
features = [features[i] for i in included_idx]
targets = [targets[i] for i in included_idx]
return features, targets
def _truncate_included_by_percentage(
features: List[np.ndarray], targets: List[np.ndarray], percent_included: float
) -> Tuple[List[np.ndarray], List[np.ndarray]]:
num_runs = int(percent_included * len(features))
features = features[:num_runs]
targets = targets[:num_runs]
return features, targets | /rul_datasets-0.10.5.tar.gz/rul_datasets-0.10.5/rul_datasets/reader/truncating.py | 0.927802 | 0.826327 | truncating.py | pypi |
import os
import tempfile
import warnings
import zipfile
from typing import Union, List, Tuple, Dict, Optional
import numpy as np
from sklearn import preprocessing as scalers # type: ignore
from rul_datasets.reader import scaling
from rul_datasets.reader.data_root import get_data_root
from rul_datasets.reader.abstract import AbstractReader
from rul_datasets import utils
CMAPSS_URL = "https://kr0k0tsch.de/rul-datasets/CMAPSSData.zip"
class CmapssReader(AbstractReader):
"""
This reader represents the NASA CMAPSS Turbofan Degradation dataset. Each of its
four sub-datasets contain a training and a test split. Upon first usage,
the training split will be further divided into a development and a validation
split. 20% of the original training split are reserved for validation.
The features are provided as sliding windows over each time series in the
dataset. The label of a window is the label of its last time step. The RUL labels
are capped by a maximum value. The original data contains 24 channels per time
step. Following the literature, we omit the constant channels and operation
condition channels by default. Therefore, the default channel indices are 4, 5,
6, 9, 10, 11, 13, 14, 15, 16, 17, 19, 22 and 23.
The features are min-max scaled between -1 and 1. The scaler is fitted on the
development data only.
Examples:
Default channels
>>> import rul_datasets
>>> fd1 = rul_datasets.reader.CmapssReader(fd=1, window_size=30)
>>> fd1.prepare_data()
>>> features, labels = fd1.load_split("dev")
>>> features[0].shape
(163, 30, 14)
Custom channels
>>> import rul_datasets
>>> fd1 = rul_datasets.reader.CmapssReader(fd=1, feature_select=[1, 2, 3])
>>> fd1.prepare_data()
>>> features, labels = fd1.load_split("dev")
>>> features[0].shape
(163, 30, 3)
"""
_FMT: str = (
"%d %d %.4f %.4f %.1f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f "
"%.2f %.2f %.2f %.2f %.2f %.2f %.4f %.2f %d %d %.2f %.2f %.4f"
)
_TRAIN_PERCENTAGE: float = 0.8
_WINDOW_SIZES: Dict[int, int] = {1: 30, 2: 20, 3: 30, 4: 15}
_DEFAULT_CHANNELS: List[int] = [4, 5, 6, 9, 10, 11, 13, 14, 15, 16, 17, 19, 22, 23]
_NUM_TRAIN_RUNS: Dict[int, int] = {1: 80, 2: 208, 3: 80, 4: 199}
_CONDITION_BOUNDARIES: List[Tuple[float, float]] = [
(-0.009, 0.009), # Different from paper to include FD001 and FD003
(9.998, 10.008),
(19.998, 20.008),
(24.998, 25.008),
(34.998, 35.008),
(41.998, 42.008),
]
_CONDITION_COLUMN: int = 0
_CMAPSS_ROOT: str = os.path.join(get_data_root(), "CMAPSS")
def __init__(
self,
fd: int,
window_size: Optional[int] = None,
max_rul: Optional[int] = 125,
percent_broken: Optional[float] = None,
percent_fail_runs: Optional[Union[float, List[int]]] = None,
feature_select: Optional[List[int]] = None,
truncate_val: bool = False,
operation_condition_aware_scaling: bool = False,
truncate_degraded_only: bool = False,
) -> None:
"""
Create a new CMAPSS reader for one of the sub-datasets. The maximum RUL value
is set to 125 by default. The 14 feature channels selected by default can be
overridden by passing a list of channel indices to `feature_select`. The
default window size is defined per sub-dataset as the minimum time series
length in the test set.
The data can be scaled separately for each operation condition, as done by
Ragab et al. This only affects FD002 and FD004 due to them having multiple
operation conditions.
For more information about using readers refer to the [reader]
[rul_datasets.reader] module page.
Args:
fd: Index of the selected sub-dataset
window_size: Size of the sliding window. Default defined per sub-dataset.
max_rul: Maximum RUL value of targets.
percent_broken: The maximum relative degradation per time series.
percent_fail_runs: The percentage or index list of available time series.
feature_select: The index list of selected feature channels.
truncate_val: Truncate the validation data with `percent_broken`, too.
operation_condition_aware_scaling: Scale data separatly for each
operation condition.
truncate_degraded_only: Only truncate the degraded part of the data
(< max RUL).
"""
super().__init__(
fd,
window_size,
max_rul,
percent_broken,
percent_fail_runs,
truncate_val,
truncate_degraded_only,
)
# Select features according to https://doi.org/10.1016/j.ress.2017.11.021
if feature_select is None:
feature_select = self._DEFAULT_CHANNELS
self.feature_select = feature_select
self.operation_condition_aware_scaling = operation_condition_aware_scaling
@property
def fds(self) -> List[int]:
"""Indices of available sub-datasets."""
return list(self._WINDOW_SIZES)
def default_window_size(self, fd: int) -> int:
return self._WINDOW_SIZES[fd]
def prepare_data(self) -> None:
"""
Prepare the CMAPSS dataset. This function needs to be called before using the
dataset for the first time.
The dataset is downloaded from a custom mirror and extracted into the data
root directory. The training data is then split into development and
validation set. Afterwards, a scaler is fit on the development features.
Previously completed steps are skipped.
"""
if not os.path.exists(self._CMAPSS_ROOT):
_download_cmapss(get_data_root())
# Check if training data was already split
if not os.path.exists(self._get_feature_path("dev")):
warnings.warn(
f"Training data for FD{self.fd:03d} not "
f"yet split into dev and val. Splitting now."
)
self._split_fd_train(self._get_feature_path("train"))
if not os.path.exists(self._get_scaler_path()):
self._prepare_scaler()
def _prepare_scaler(self) -> None:
dev_features, ops_cond = self._load_features(self._get_feature_path("dev"))
dev_features, _ = self._split_time_steps_from_features(dev_features)
scaler = self._fit_scaler(dev_features, ops_cond)
scaling.save_scaler(scaler, self._get_scaler_path())
def _fit_scaler(self, features, operation_conditions):
scaler = scalers.MinMaxScaler(feature_range=(-1, 1))
if self.operation_condition_aware_scaling:
scaler = scaling.OperationConditionAwareScaler(
scaler, self._CONDITION_BOUNDARIES
)
operation_conditions = [
c[:, self._CONDITION_COLUMN] for c in operation_conditions
]
scaler = scaling.fit_scaler(features, scaler, operation_conditions)
else:
scaler = scaling.fit_scaler(features, scaler)
return scaler
def _split_fd_train(self, train_path: str) -> None:
train_data = np.loadtxt(train_path)
# Split into runs
_, samples_per_run = np.unique(train_data[:, 0], return_counts=True)
split_indices = np.cumsum(samples_per_run)[:-1]
split_train_data = np.split(train_data, split_indices, axis=0)
split_idx = int(len(split_train_data) * self._TRAIN_PERCENTAGE)
dev_data = np.concatenate(split_train_data[:split_idx])
val_data = np.concatenate(split_train_data[split_idx:])
data_root, train_file = os.path.split(train_path)
dev_file = train_file.replace("train_", "dev_")
dev_file = os.path.join(data_root, dev_file)
np.savetxt(dev_file, dev_data, fmt=self._FMT) # type: ignore
val_file = train_file.replace("train_", "val_")
val_file = os.path.join(data_root, val_file)
np.savetxt(val_file, val_data, fmt=self._FMT) # type: ignore
def _get_scaler_path(self) -> str:
return os.path.join(self._CMAPSS_ROOT, self._get_scaler_name())
def _get_scaler_name(self) -> str:
ops_aware = "_ops_aware" if self.operation_condition_aware_scaling else ""
name = f"FD{self.fd:03d}_scaler_{self.feature_select}{ops_aware}.pkl"
return name
def _get_feature_path(self, split: str) -> str:
return os.path.join(self._CMAPSS_ROOT, self._get_feature_name(split))
def _get_feature_name(self, split: str) -> str:
return f"{split}_FD{self.fd:03d}.txt"
def load_complete_split(
self, split: str, alias: str
) -> Tuple[List[np.ndarray], List[np.ndarray]]:
file_path = self._get_feature_path(split)
features, operation_conditions = self._load_features(file_path)
features, time_steps = self._split_time_steps_from_features(features)
features = self._scale_features(features, operation_conditions)
if split in ["dev", "val"]:
targets = self._generate_targets(time_steps)
elif split == "test":
targets = self._load_targets(features)
else:
raise ValueError(f"Unknown split {split}.")
features, targets = self._pad_data(features, targets)
if alias in ["dev", "val"]:
features, targets = self._window_data(features, targets)
elif alias == "test":
features, targets = self._crop_data(features, targets)
else:
raise ValueError(f"Unknown alias {alias}.")
return features, targets
def _load_features(
self, file_path: str
) -> Tuple[List[np.ndarray], List[np.ndarray]]:
raw_features = np.loadtxt(file_path)
feature_idx = [0, 1] + [idx + 2 for idx in self.feature_select]
operation_conditions = raw_features[:, [2, 3, 4]]
raw_features = raw_features[:, feature_idx]
# Split into runs
_, samples_per_run = np.unique(raw_features[:, 0], return_counts=True)
split_idx = np.cumsum(samples_per_run)[:-1]
features = np.split(raw_features, split_idx, axis=0)
cond_per_run = np.split(operation_conditions, split_idx, axis=0)
return features, cond_per_run
def _scale_features(
self, features: List[np.ndarray], operation_conditions: List[np.ndarray]
) -> List[np.ndarray]:
scaler_path = self._get_scaler_path()
if not os.path.exists(scaler_path):
raise RuntimeError(
f"Scaler for FD{self.fd:03d} with features "
f"{self.feature_select} does not exist. "
f"Did you call prepare_data yet?"
)
scaler = scaling.load_scaler(scaler_path)
if self.operation_condition_aware_scaling:
operation_conditions = [
c[:, self._CONDITION_COLUMN] for c in operation_conditions
]
features = scaling.scale_features(features, scaler, operation_conditions)
else:
features = scaling.scale_features(features, scaler)
return features
@staticmethod
def _split_time_steps_from_features(
features: List[np.ndarray],
) -> Tuple[List[np.ndarray], List[np.ndarray]]:
time_steps = []
for i, seq in enumerate(features):
time_steps.append(seq[:, 1])
seq = seq[:, 2:]
features[i] = seq
return features, time_steps
def _generate_targets(self, time_steps: List[np.ndarray]) -> List[np.ndarray]:
"""Generate RUL targets from time steps."""
max_rul = self.max_rul or np.inf # no capping if max_rul is None
targets = [np.minimum(max_rul, steps)[::-1].copy() for steps in time_steps]
return targets
def _load_targets(self, features: List[np.ndarray]) -> List[np.ndarray]:
"""Load target file."""
file_name = f"RUL_FD{self.fd:03d}.txt"
file_path = os.path.join(self._CMAPSS_ROOT, file_name)
raw_targets = np.loadtxt(file_path)
targets = np.split(raw_targets, len(raw_targets))
targets = [np.arange(len(f), 0, -1) + t - 1 for f, t in zip(features, targets)]
max_rul = self.max_rul or np.inf
targets = [np.minimum(max_rul, t) for t in targets]
return targets
def _pad_data(self, features, targets):
padded_features = []
padded_targets = []
for i, (feat, target) in enumerate(zip(features, targets)):
if feat.shape[0] < self.window_size:
warnings.warn(
f"Run {i} of CMAPSS FD{self.fd:03d} is shorter than window "
f"size {self.window_size} and will be zero-padded."
)
missing = self.window_size - feat.shape[0]
feat_pad = (missing, feat.shape[1])
feat = np.concatenate([np.zeros(feat_pad), feat])
target = np.concatenate([np.zeros(missing), target])
padded_features.append(feat)
padded_targets.append(target)
return padded_features, padded_targets
def _window_data(
self, features: List[np.ndarray], targets: List[np.ndarray]
) -> Tuple[List[np.ndarray], List[np.ndarray]]:
"""Window features with specified window size."""
windowed_features = []
windowed_targets = []
for seq, target in zip(features, targets):
windows = utils.extract_windows(seq, self.window_size)
target = target[self.window_size - 1 :]
windowed_features.append(windows)
windowed_targets.append(target)
return windowed_features, windowed_targets
def _crop_data(
self, features: List[np.ndarray], targets: List[np.ndarray]
) -> Tuple[List[np.ndarray], List[np.ndarray]]:
"""Crop length of data to specified window size."""
cropped_features = []
cropped_targets = []
for seq, target in zip(features, targets):
cropped_features.append(seq[None, -self.window_size :])
cropped_targets.append(target[-1, None])
return cropped_features, cropped_targets
def _download_cmapss(data_root: str) -> None:
with tempfile.TemporaryDirectory() as tmp_path:
print("Download CMAPSS dataset")
download_path = os.path.join(tmp_path, "CMAPSSData.zip")
utils.download_file(CMAPSS_URL, download_path)
print("Extract CMAPSS dataset")
with zipfile.ZipFile(download_path, mode="r") as f:
f.extractall(data_root) | /rul_datasets-0.10.5.tar.gz/rul_datasets-0.10.5/rul_datasets/reader/cmapss.py | 0.880129 | 0.684521 | cmapss.py | pypi |
that want to extend this package with their own dataset. """
import abc
from copy import deepcopy
from typing import Optional, Union, List, Dict, Any, Iterable, Tuple, Literal
import numpy as np
from rul_datasets.reader import truncating
class AbstractReader(metaclass=abc.ABCMeta):
"""
This reader is the abstract base class of all readers.
In case you want to extend this library with a dataset of your own, you should
create a subclass of `AbstractReader`. It defines the public interface that all
data modules in this library use. Just inherit from this class implement the
abstract functions, and you should be good to go.
Please consider contributing your work afterward to help the community.
Examples:
>>> import rul_datasets
>>> class MyReader(rul_datasets.reader.AbstractReader):
... def fds(self):
... return [1]
...
... def prepare_data(self):
... pass
...
... def default_window_size(self, fd):
... return 30
...
... def load_complete_split(self, split, alias):
... features = [np.random.randn(100, 2, 30) for _ in range(10)]
... targets = [np.arange(100, 0, -1) for _ in range(10)]
...
... return features, targets
...
>>> my_reader = MyReader(fd=1)
>>> features, targets = my_reader.load_split("dev")
>>> features[0].shape
torch.Size([100, 2, 30])
"""
fd: int
window_size: int
max_rul: Optional[int]
percent_broken: Optional[float]
percent_fail_runs: Optional[Union[float, List[int], None]]
truncate_val: bool
truncate_degraded_only: bool
_NUM_TRAIN_RUNS: Dict[int, int]
def __init__(
self,
fd: int,
window_size: Optional[int] = None,
max_rul: Optional[int] = None,
percent_broken: Optional[float] = None,
percent_fail_runs: Optional[Union[float, List[int]]] = None,
truncate_val: bool = False,
truncate_degraded_only: bool = False,
) -> None:
"""
Create a new reader. If your reader needs additional input arguments,
create your own `__init__` function and call this one from within as `super(
).__init__(...)`.
For more information about using readers refer to the [reader]
[rul_datasets.reader] module page.
Args:
fd: Index of the selected sub-dataset
window_size: Size of the sliding window. Defaults to 2560.
max_rul: Maximum RUL value of targets.
percent_broken: The maximum relative degradation per time series.
percent_fail_runs: The percentage or index list of available time series.
truncate_val: Truncate the validation data with `percent_broken`, too.
truncate_degraded_only: Only truncate the degraded part of the data
(< max RUL).
"""
self.fd = fd
self.window_size = window_size or self.default_window_size(self.fd)
self.max_rul = max_rul
self.truncate_val = truncate_val
self.percent_broken = percent_broken
self.percent_fail_runs = percent_fail_runs
self.truncate_degraded_only = truncate_degraded_only
@property
def hparams(self) -> Dict[str, Any]:
"""A dictionary containing all input arguments of the constructor. This
information is used by the data modules to log their hyperparameters in
PyTorch Lightning."""
return {
"fd": self.fd,
"window_size": self.window_size,
"max_rul": self.max_rul,
"percent_broken": self.percent_broken,
"percent_fail_runs": self.percent_fail_runs,
"truncate_val": self.truncate_val,
"truncate_degraded_only": self.truncate_degraded_only,
}
@property
@abc.abstractmethod
def fds(self) -> List[int]:
"""The indices of available sub-datasets."""
raise NotImplementedError
@abc.abstractmethod
def prepare_data(self) -> None:
"""Prepare the data. This function should take care of things that need to be
done once, before the data can be used. This may include downloading,
extracting or transforming the data, as well as fitting scalers. It is best
practice to check if a preparation step was completed before to avoid
repeating it unnecessarily."""
raise NotImplementedError
@abc.abstractmethod
def default_window_size(self, fd: int) -> int:
"""
The default window size of the data set. This may vary from sub-dataset to
sub-dataset.
Args:
fd: The index of a sub-dataset.
Returns:
The default window size for the sub-dataset.
"""
raise NotImplementedError
@abc.abstractmethod
def load_complete_split(
self, split: str, alias: str
) -> Tuple[List[np.ndarray], List[np.ndarray]]:
"""
Load a complete split without truncation.
This function should return the features and targets of the desired split.
Both should be contained in a list of numpy arrays. Each of the arrays
contains one time series. The features should have a shape of `[num_windows,
window_size, num_channels]` and the targets a shape of `[num_windows]`. The
features should be scaled as desired. The targets should be capped by
`max_rul`.
By setting `alias`, it should be possible to load a split aliased as another
split, e.g. load the test split and treat it as the dev split. The data of
`split` should be loaded but all pre-processing steps of `alias` should be
carried out.
This function is used internally in [load_split]
[rul_datasets.reader.abstract.AbstractReader.load_split] which takes care of
truncation.
Args:
split: The name of the split to load.
alias: The split as which the loaded data should be treated.
Returns:
features: The complete, scaled features of the desired split.
targets: The capped target values corresponding to the features.
"""
raise NotImplementedError
def load_split(
self, split: str, alias: Optional[str] = None
) -> Tuple[List[np.ndarray], List[np.ndarray]]:
"""
Load a split as tensors and apply truncation to it.
This function loads the scaled features and the targets of a split into
memory. Afterwards, truncation is applied if the `split` is set to `dev`. The
validation set is also truncated with `percent_broken` if `truncate_val` is
set to `True`.
By setting `alias`, it is possible to load a split aliased as another split,
e.g. load the test split and treat it as the dev split. The data of `split`
is loaded but all pre-processing steps of `alias` are carried out.
Args:
split: The desired split to load.
alias: The split as which the loaded data should be treated.
Returns:
features: The scaled, truncated features of the desired split.
targets: The truncated targets of the desired split.
"""
alias = alias or split
features, targets = self.load_complete_split(split, alias)
if alias == "dev":
features, targets = truncating.truncate_runs(
features,
targets,
self.percent_broken,
self.percent_fail_runs,
self.truncate_degraded_only,
)
elif alias == "val" and self.truncate_val:
features, targets = truncating.truncate_runs(
features,
targets,
self.percent_broken,
degraded_only=self.truncate_degraded_only,
)
return features, targets
def get_compatible(
self,
fd: Optional[int] = None,
percent_broken: Optional[float] = None,
percent_fail_runs: Union[float, List[int], None] = None,
truncate_val: Optional[bool] = None,
consolidate_window_size: Literal["override", "min", "none"] = "override",
) -> "AbstractReader":
"""
Create a new reader of the desired sub-dataset that is compatible to this one
(see [check_compatibility]
[rul_datasets.reader.abstract.AbstractReader.check_compatibility]). Useful for
domain adaption.
The values for `percent_broken`, `percent_fail_runs` and `truncate_val` of
the new reader can be overridden.
When constructing a compatible reader for another sub-dataset, the window
size of this reader will be used to override the default window size of the
new reader. This behavior can be changed by setting `consolidate_window_size`
to `"min"`. The window size of this reader and the new one will be set to the
minimum of this readers window size and the default window size of the new
reader. Please be aware that this will change the window size of *this*
reader, too. If the new reader should use its default window size,
set `consolidate_window_size` to `"none"`.
Args:
fd: The index of the sub-dataset for the new reader.
percent_broken: Override this value in the new reader.
percent_fail_runs: Override this value in the new reader.
truncate_val: Override this value in the new reader.
consolidate_window_size: How to consolidate the window size of the readers.
Returns:
A compatible reader with optional overrides.
"""
other = deepcopy(self)
if percent_broken is not None:
other.percent_broken = percent_broken
if percent_fail_runs is not None:
other.percent_fail_runs = percent_fail_runs
if truncate_val is not None:
other.truncate_val = truncate_val
if fd is not None:
other.fd = fd
self._consolidate_window_size(other, consolidate_window_size)
return other
def _consolidate_window_size(
self, other: "AbstractReader", mode: Literal["override", "min", "none"]
) -> None:
if mode == "override":
other.window_size = self.window_size
elif mode == "min":
window_size = min(self.window_size, self.default_window_size(other.fd))
self.window_size = window_size
other.window_size = window_size
elif mode == "none":
other.window_size = self.default_window_size(other.fd)
def get_complement(
self,
percent_broken: Optional[float] = None,
truncate_val: Optional[bool] = None,
) -> "AbstractReader":
"""
Get a compatible reader that contains all development runs that are not in
this reader (see [check_compatibility]
[rul_datasets.reader.abstract.AbstractReader.check_compatibility]). Useful for
semi-supervised learning.
The new reader will contain the development runs that were discarded in this
reader due to truncation through `percent_fail_runs`. If `percent_fail_runs`
was not set or this reader contains all development runs, it returns a reader
with an empty development set.
The values for `percent_broken`, and `truncate_val` of the new reader can be
overridden.
Args:
percent_broken: Override this value in the new reader.
truncate_val: Override this value in the new reader.
Returns:
A compatible reader with all development runs missing in this one.
"""
complement_idx = self._get_complement_idx()
other = self.get_compatible(
percent_broken=percent_broken,
percent_fail_runs=complement_idx,
truncate_val=truncate_val,
)
return other
def _get_complement_idx(self) -> List[int]:
num_runs = self._NUM_TRAIN_RUNS[self.fd]
run_idx = list(range(num_runs))
if isinstance(self.percent_fail_runs, float):
split_idx = int(self.percent_fail_runs * num_runs)
complement_idx = run_idx[split_idx:]
elif isinstance(self.percent_fail_runs, Iterable):
complement_idx = list(set(run_idx).difference(self.percent_fail_runs))
else:
complement_idx = []
return complement_idx
def is_mutually_exclusive(self, other: "AbstractReader") -> bool:
"""
Check if this reader is mutually exclusive to another reader.
Two readers are mutually exclusive if:
* they are not of the same class and therefore do not share a dataset
* their `percent_fail_runs` arguments do not overlap (float arguments overlap
if they are greater than zero)
* one of them is empty
Args:
other: The reader to check exclusivity against.
Returns:
Whether the readers are mutually exclusive.
"""
self_runs = 1.0 if self.percent_fail_runs is None else self.percent_fail_runs
other_runs = 1.0 if other.percent_fail_runs is None else other.percent_fail_runs
if not isinstance(self, type(other)):
mutually_exclusive = True
elif self_runs == other and self_runs and other_runs:
mutually_exclusive = False # both the same and not empty
elif isinstance(self_runs, float) and isinstance(other_runs, float):
mutually_exclusive = False # both start with first run -> overlap
elif isinstance(self_runs, float) and isinstance(other_runs, Iterable):
mutually_exclusive = self._is_mutually_exclusive(self, other)
elif isinstance(self_runs, Iterable) and isinstance(other_runs, float):
mutually_exclusive = self._is_mutually_exclusive(other, self)
else:
mutually_exclusive = set(self_runs).isdisjoint(other_runs) # type: ignore
return mutually_exclusive
def _is_mutually_exclusive(
self, floated: "AbstractReader", listed: "AbstractReader"
) -> bool:
"""Listed is mutually exclusive if it is a subset of floated's complement."""
floated_complement = floated.get_complement().percent_fail_runs
listed = listed.percent_fail_runs # type: ignore
exclusive = set(listed).issubset(floated_complement) # type: ignore
return exclusive
def check_compatibility(self, other: "AbstractReader") -> None:
"""
Check if the other reader is compatible with this one.
Compatibility of two readers ensures that training with both will probably
succeed and produce valid results. Two readers are considered compatible, if
they:
* are both children of [AbstractReader]
[rul_datasets.reader.abstract.AbstractReader]
* have the same `window size`
* have the same `max_rul`
If any of these conditions is not met, the readers are considered
misconfigured and a `ValueError` is thrown.
Args:
other: Another reader object.
"""
if not isinstance(other, type(self)):
raise ValueError(
f"The other loader is not of class {type(self)} but {type(other)}."
)
if not self.window_size == other.window_size:
raise ValueError(
f"Window sizes are not compatible "
f"{self.window_size} vs. {other.window_size}"
)
if not self.max_rul == other.max_rul:
raise ValueError(
f"Max RULs are not compatible " f"{self.max_rul} vs. {other.max_rul}"
) | /rul_datasets-0.10.5.tar.gz/rul_datasets-0.10.5/rul_datasets/reader/abstract.py | 0.953719 | 0.547283 | abstract.py | pypi |
import copy
import pickle
from typing import List, Optional, Union, Tuple
import numpy as np
from sklearn import preprocessing as scalers # type: ignore
from sklearn.base import BaseEstimator, TransformerMixin # type: ignore
_Scaler = (
scalers.StandardScaler,
scalers.MinMaxScaler,
scalers.MaxAbsScaler,
scalers.RobustScaler,
)
Scaler = Union[
scalers.StandardScaler,
scalers.MinMaxScaler,
scalers.MaxAbsScaler,
scalers.RobustScaler,
]
"""
Supported scalers:
* [sklearn.preprocessing.StandardScaler][]
* [sklearn.preprocessing.MinMaxScaler][]
* [sklearn.preprocessing.MaxAbsScaler][]
* [sklearn.preprocessing.RobustScaler][]
"""
class OperationConditionAwareScaler(BaseEstimator, TransformerMixin):
"""This scaler is an ensemble of multiple base scalers, e.g. [
sklearn.preprocessing.MinMaxScaler][]. It takes an additional operation condition
array while fitting and transforming that controls which base scaler is used. The
operation condition corresponding to a sample is compared against the boundaries
defined during construction of the scaler. If the condition lies between the
first set of boundaries, the first base scaler is used, and so forth.
If any condition does not fall between any boundaries, an exception will be
raised and the boundaries should be adjusted."""
def __init__(
self, base_scaler: Scaler, boundaries: List[Tuple[float, float]]
) -> None:
"""
Create a new scaler aware of operation conditions.
Each pair in `boundaries` represents the lower and upper value of an
inclusive interval. For each interval a copy of the `base_scaler` is
maintained. If an operation condition value falls inside an interval,
the corresponding scaler is used. The boundaries have to be mutually exclusive.
Args:
base_scaler: The scaler that should be used for each condition.
boundaries: The pairs that form the inclusive boundaries of each condition.
"""
self.base_scalers = [copy.deepcopy(base_scaler) for _ in boundaries]
self.boundaries = boundaries
self._check_boundaries_mutually_exclusive()
def _check_boundaries_mutually_exclusive(self):
b = sorted(self.boundaries, key=lambda x: x[0])
exclusive = all(up < low for (_, up), (low, _) in zip(b[:-1], b[1:]))
if not exclusive:
raise ValueError(
"Boundaries are not mutually exclusive. Be aware that "
"the boundaries are inclusive, i.e. lower <= value <= upper."
)
@property
def n_features_in_(self):
"""Number of expected input features."""
return self.base_scalers[0].n_features_in_
def partial_fit(
self, features: np.ndarray, operation_conditions: np.ndarray
) -> "OperationConditionAwareScaler":
"""
Fit the base scalers partially.
This function calls `partial_fit` on each of the base scalers with the
samples that fall into the corresponding condition boundaries. If any sample
does not fall into one of the boundaries, an exception is raised.
Args:
features: The feature array to be scaled.
operation_conditions: The condition values compared against the boundaries.
Returns:
The partially fitted scaler.
"""
total = 0
for i, (lower, upper) in enumerate(self.boundaries):
idx = self._between(operation_conditions, lower, upper)
if num_elem := np.sum(idx): # guard against empty array
self.base_scalers[i].partial_fit(features[idx])
total += num_elem
self._check_all_transformed(features, total, "fitted")
return self
def transform(
self, features: np.ndarray, operation_conditions: np.ndarray
) -> np.ndarray:
"""
Scale the features with the appropriate condition aware scaler.
This function calls `transform` on each of the base scalers for the
samples that fall into the corresponding condition boundaries. If any sample
does not fall into one of the boundaries, an exception is raised.
Args:
features: The features to be scaled.
operation_conditions: The condition values compared against the boundaries.
Returns:
The scaled features.
"""
scaled = np.empty_like(features)
total = 0
for i, (lower, upper) in enumerate(self.boundaries):
idx = self._between(operation_conditions, lower, upper)
if num_elem := np.sum(idx): # guard against empty array
scaled[idx] = self.base_scalers[i].transform(features[idx])
total += num_elem
self._check_all_transformed(features, total, "scaled")
return scaled
def _check_all_transformed(self, features, total, activity):
"""Guard against unknown conditions"""
if diff := (len(features) - total):
raise RuntimeError(
f"{diff} samples had an unknown condition and could not be {activity}."
"Please adjust the boundaries."
)
def _between(self, inputs: np.ndarray, lower: float, upper: float) -> np.ndarray:
"""Inclusive between."""
return (lower <= inputs) & (inputs <= upper)
def fit_scaler(
features: List[np.ndarray],
scaler: Optional[Union[Scaler, OperationConditionAwareScaler]] = None,
operation_conditions: Optional[List[np.ndarray]] = None,
) -> Union[Scaler, OperationConditionAwareScaler]:
"""
Fit a given scaler to the RUL features. If the scaler is omitted,
a StandardScaler will be created.
If the scaler is an [OperationConditionAwareScaler][
rul_datasets.reader.scaling.OperationConditionAwareScaler] and
`operation_conditions` are passed, the scaler will be fit aware of operation
conditions.
The scaler assumes that the last axis of the features are the channels. Only
scalers unaware of operation conditions can be fit with windowed data.
Args:
features: The RUL features.
scaler: The scaler to be fit. Defaults to a StandardScaler.
operation_conditions: The operation conditions for condition aware scaling.
Returns:
The fitted scaler.
"""
scaler = scaler or scalers.StandardScaler()
if isinstance(scaler, Scaler.__args__): # type: ignore[attr-defined]
scaler = _fit_scaler_naive(features, scaler)
elif operation_conditions is not None and isinstance(
scaler, OperationConditionAwareScaler
):
scaler = _fit_scaler_operation_condition_aware(
features, scaler, operation_conditions
)
else:
raise ValueError(
"Unsupported combination of scaler type and operation conditions."
)
return scaler
def _fit_scaler_naive(features: List[np.ndarray], scaler: Scaler) -> Scaler:
for run in features:
run = run.reshape(-1, run.shape[-1])
scaler.partial_fit(run)
return scaler
def _fit_scaler_operation_condition_aware(
features: List[np.ndarray],
scaler: OperationConditionAwareScaler,
operation_conditions: List[np.ndarray],
) -> OperationConditionAwareScaler:
assert len(features[0].shape) == 2, "Condition aware scaling can't fit window data"
for run, cond in zip(features, operation_conditions):
scaler.partial_fit(run, cond)
return scaler
def save_scaler(scaler: Scaler, save_path: str) -> None:
"""
Save a scaler to disk.
Args:
scaler: The scaler to be saved.
save_path: The path to save the scaler to.
"""
with open(save_path, mode="wb") as f:
pickle.dump(scaler, f)
def load_scaler(save_path: str) -> Scaler:
"""
Load a scaler from disk.
Args:
save_path: The path the scaler was saved to.
Returns:
The loaded scaler.
"""
with open(save_path, mode="rb") as f:
scaler = pickle.load(f)
return scaler
def scale_features(
features: List[np.ndarray],
scaler: Union[Scaler, OperationConditionAwareScaler],
operation_conditions: Optional[List[np.ndarray]] = None,
) -> List[np.ndarray]:
"""
Scaler the RUL features with a given scaler.
The features can have a shape of `[num_time_steps, channels]` or `[num_windows,
window_size, channels]`. The scaler needs to work on the channel dimension. If it
was not fit with the right number of channels, a `ValueError` is thrown.
If the scaler is operation condition aware, the `operation_conditions` argument
needs to be passed. Windowed data cannot be fit this way.
Args:
features: The RUL features to be scaled.
scaler: The already fitted scaler.
operation_conditions: The operation conditions for condition aware scaling.
Returns:
The scaled features.
"""
if operation_conditions is None:
features = _scale_features_naive(features, scaler)
else:
features = _scale_features_condition_aware(
features, scaler, operation_conditions
)
return features
def _scale_features_naive(
features: List[np.ndarray], scaler: Scaler
) -> List[np.ndarray]:
features = copy.copy(features)
for i, run in enumerate(features):
_check_channels(run, scaler)
if len(run.shape) == 3:
features[i] = _scale_windowed_features(run, scaler)
else:
features[i] = scaler.transform(run)
return features
def _scale_features_condition_aware(
features: List[np.ndarray],
scaler: OperationConditionAwareScaler,
operation_conditions: List[np.ndarray],
) -> List[np.ndarray]:
assert len(features[0].shape) == 2, "No condition aware scaling for window data"
features = copy.copy(features)
for i, (run, cond) in enumerate(zip(features, operation_conditions)):
_check_channels(run, scaler)
features[i] = scaler.transform(run, cond)
return features
def _check_channels(
run: np.ndarray, scaler: Union[Scaler, OperationConditionAwareScaler]
) -> None:
if not run.shape[-1] == scaler.n_features_in_:
raise ValueError(
f"The scaler was fit on {scaler.n_features_in_} "
f"channels but the features have {run.shape[1]} channels."
)
def _scale_windowed_features(features: np.ndarray, scaler: Scaler) -> np.ndarray:
num_channels = features.shape[2]
window_size = features.shape[1]
features = features.reshape(-1, num_channels)
features = scaler.transform(features)
features = features.reshape(-1, window_size, num_channels)
return features | /rul_datasets-0.10.5.tar.gz/rul_datasets-0.10.5/rul_datasets/reader/scaling.py | 0.941895 | 0.60013 | scaling.py | pypi |
import os.path
import tempfile
import zipfile
from typing import Tuple, List, Union, Dict, Optional
import numpy as np
from sklearn import preprocessing as scalers # type: ignore
from rul_datasets import utils
from rul_datasets.reader import saving, scaling
from rul_datasets.reader.abstract import AbstractReader
from rul_datasets.reader.data_root import get_data_root
XJTU_SY_URL = "https://kr0k0tsch.de/rul-datasets/XJTU-SY.zip"
class XjtuSyReader(AbstractReader):
"""
This reader represents the XJTU-SY Bearing dataset. Each of its three
sub-datasets contains five runs. By default, the reader assigns the first two to
the development, the third to the validation and the remaining two to the test
split. This run to split assignment can be overridden by setting `run_split_dist`.
The features contain windows with two channels of acceleration data which are
standardized to zero mean and one standard deviation. The scaler is fitted on the
development data.
Examples:
Default splits:
>>> import rul_datasets
>>> fd1 = rul_datasets.reader.XjtuSyReader(fd=1)
>>> fd1.prepare_data()
>>> features, labels = fd1.load_split("dev")
>>> features[0].shape
(123, 32768, 2)
Custom splits:
>>> import rul_datasets
>>> splits = {"dev": [5], "val": [4], "test": [3]}
>>> fd1 = rul_datasets.reader.XjtuSyReader(fd=1, run_split_dist=splits)
>>> fd1.prepare_data()
>>> features, labels = fd1.load_split("dev")
>>> features[0].shape
(52, 32768, 2)
Set first-time-to-predict:
>>> import rul_datasets
>>> fttp = [10, 20, 30, 40, 50]
>>> fd1 = rul_datasets.reader.XjtuSyReader(fd=1, first_time_to_predict=fttp)
>>> fd1.prepare_data()
>>> features, labels = fd1.load_split("dev")
>>> labels[0][:15]
array([113., 113., 113., 113., 113., 113., 113., 113., 113., 113., 113.,
112., 111., 110., 109.])
"""
_XJTU_SY_ROOT: str = os.path.join(get_data_root(), "XJTU-SY")
_NUM_TRAIN_RUNS: Dict[int, int] = {1: 5, 2: 5, 3: 5}
def __init__(
self,
fd: int,
window_size: Optional[int] = None,
max_rul: Optional[int] = None,
percent_broken: Optional[float] = None,
percent_fail_runs: Optional[Union[float, List[int]]] = None,
truncate_val: bool = False,
run_split_dist: Optional[Dict[str, List[int]]] = None,
first_time_to_predict: Optional[List[int]] = None,
norm_rul: bool = False,
truncate_degraded_only: bool = False,
) -> None:
"""
Create a new XJTU-SY reader for one of the sub-datasets. By default, the RUL
values are not capped. The default window size is 32768.
Use `first_time_to_predict` to set an individual RUL inflection point for
each run. It should be a list with an integer index for each run. The index
is the time step after which RUL declines. Before the time step it stays
constant. The `norm_rul` argument can then be used to scale the RUL of each
run between zero and one.
For more information about using readers refer to the [reader]
[rul_datasets.reader] module page.
Args:
fd: Index of the selected sub-dataset
window_size: Size of the sliding window. Defaults to 32768.
max_rul: Maximum RUL value of targets.
percent_broken: The maximum relative degradation per time series.
percent_fail_runs: The percentage or index list of available time series.
truncate_val: Truncate the validation data with `percent_broken`, too.
run_split_dist: Dictionary that assigns each run idx to each split.
first_time_to_predict: The time step for each time series before which RUL
is constant.
norm_rul: Normalize RUL between zero and one.
truncate_degraded_only: Only truncate the degraded part of the data
(< max RUL).
"""
super().__init__(
fd,
window_size,
max_rul,
percent_broken,
percent_fail_runs,
truncate_val,
truncate_degraded_only,
)
if (first_time_to_predict is not None) and (max_rul is not None):
raise ValueError(
"FemtoReader cannot use 'first_time_to_predict' "
"and 'max_rul' in conjunction."
)
self.first_time_to_predict = first_time_to_predict
self.norm_rul = norm_rul
self._preparator = XjtuSyPreparator(self.fd, self._XJTU_SY_ROOT, run_split_dist)
@property
def fds(self) -> List[int]:
"""Indices of available sub-datasets."""
return list(self._NUM_TRAIN_RUNS)
def prepare_data(self) -> None:
"""
Prepare the XJTU-SY dataset. This function needs to be called before using the
dataset and each custom split for the first time.
The dataset is downloaded from a custom mirror and extracted into the data
root directory. The whole dataset is converted com CSV files to NPY files to
speed up loading it from disk. Afterwards, a scaler is fit on the development
features. Previously completed steps are skipped.
"""
if not os.path.exists(self._XJTU_SY_ROOT):
_download_xjtu_sy(get_data_root())
self._preparator.prepare_split()
def load_complete_split(
self, split: str, alias: str
) -> Tuple[List[np.ndarray], List[np.ndarray]]:
features, targets = self._preparator.load_runs(split)
features = [f[:, -self.window_size :, :] for f in features] # crop to window
features = scaling.scale_features(features, self._preparator.load_scaler())
if self.max_rul is not None:
targets = [np.minimum(t, self.max_rul) for t in targets]
elif self.first_time_to_predict is not None:
targets = self._clip_first_time_to_predict(targets, split)
if self.norm_rul:
targets = [t / np.max(t) for t in targets]
return features, targets
def _clip_first_time_to_predict(self, targets, split):
fttp = [
self.first_time_to_predict[i - 1]
for i in self._preparator.run_split_dist[split]
]
targets = [np.minimum(t, len(t) - fttp) for t, fttp in zip(targets, fttp)]
return targets
def default_window_size(self, fd: int) -> int:
return XjtuSyPreparator.DEFAULT_WINDOW_SIZE
class XjtuSyPreparator:
DEFAULT_WINDOW_SIZE: int = 32768
_FD_FOLDERS: Dict[int, str] = {1: "35Hz12kN", 2: "37.5Hz11kN", 3: "40Hz10kN"}
_DEFAULT_RUN_SPLIT_DIST: Dict[str, List[int]] = {
"dev": [1, 2],
"val": [3],
"test": [4, 5],
}
def __init__(
self,
fd: int,
data_root: str,
run_split_dist: Optional[Dict[str, List[int]]] = None,
) -> None:
self.fd = fd
self.data_root = data_root
self.run_split_dist = run_split_dist or self._DEFAULT_RUN_SPLIT_DIST
def prepare_split(self, split: Optional[str] = None) -> None:
run_file_path = self._get_run_file_path(1)
if not saving.exists(run_file_path):
runs = self._load_raw_runs()
runs = self._sort_runs(runs)
self._save_efficient(runs)
if not os.path.exists(self._get_scaler_path()):
features, _ = self.load_runs("dev")
scaler = scaling.fit_scaler(features)
scaling.save_scaler(scaler, self._get_scaler_path())
def load_runs(self, split: str) -> Tuple[List[np.ndarray], List[np.ndarray]]:
self._validate_split(split)
run_idx = self.run_split_dist[split]
run_paths = [self._get_run_file_path(idx) for idx in run_idx]
features, targets = saving.load_multiple(run_paths)
return features, targets
def load_scaler(self) -> scalers.StandardScaler:
return scaling.load_scaler(self._get_scaler_path())
def _load_raw_runs(self) -> Dict[int, Tuple[np.ndarray, np.ndarray]]:
file_paths = self._get_csv_file_paths()
features = saving.load_raw(
file_paths, self.DEFAULT_WINDOW_SIZE, columns=[0, 1], skip_rows=1
)
targets = utils.get_targets_from_file_paths(
file_paths, self._timestep_from_file_path
)
runs = {idx: (features[idx], targets[idx]) for idx in features}
return runs
def _sort_runs(
self, runs: Dict[int, Tuple[np.ndarray, np.ndarray]]
) -> Dict[int, Tuple[np.ndarray, np.ndarray]]:
sort_idx = {run_idx: np.argsort(t)[::-1] for run_idx, (_, t) in runs.items()}
runs = {
run_idx: (f[sort_idx[run_idx]], t[sort_idx[run_idx]])
for run_idx, (f, t) in runs.items()
}
return runs
def _get_csv_file_paths(self) -> Dict[int, List[str]]:
fd_folder_path = self._get_fd_folder_path()
file_paths = {}
run_folders = self._get_run_folders(fd_folder_path)
for run_idx, run_folder in run_folders.items():
run_path = os.path.join(fd_folder_path, run_folder)
feature_files = utils.get_files_in_path(run_path)
file_paths[run_idx] = feature_files
return file_paths
def _get_run_folders(self, split_path: str) -> Dict[int, str]:
all_folders = sorted(os.listdir(split_path))
run_folders = {
int(f[-1]): f
for f in all_folders
if os.path.isdir(os.path.join(split_path, f))
}
return run_folders
@staticmethod
def _timestep_from_file_path(file_path: str) -> int:
file_name = os.path.basename(file_path)
time_step = int(file_name.replace(".csv", ""))
return time_step
def _save_efficient(self, runs) -> None:
for run_idx, (features, targets) in runs.items():
saving.save(self._get_run_file_path(run_idx), features, targets)
def _validate_split(self, split: str) -> None:
if split not in ["dev", "val", "test"]:
raise ValueError(
"XJTU-SY provides a dev, val and test split, "
f"but you provided '{split}'."
)
def _get_scaler_path(self) -> str:
file_name = f"scaler_{self.run_split_dist['dev']}.pkl"
file_path = os.path.join(self._get_fd_folder_path(), file_name)
return file_path
def _get_run_file_path(self, run_idx: int) -> str:
return os.path.join(self._get_fd_folder_path(), f"run_{run_idx}.npy")
def _get_fd_folder_path(self) -> str:
return os.path.join(self.data_root, self._FD_FOLDERS[self.fd])
def _download_xjtu_sy(data_root: str) -> None:
with tempfile.TemporaryDirectory() as tmp_path:
print("Download XJTU-SY dataset")
download_path = os.path.join(tmp_path, "XJTU-SY.zip")
utils.download_file(XJTU_SY_URL, download_path)
print("Extract XJTU-SY dataset")
with zipfile.ZipFile(download_path, mode="r") as f:
f.extractall(data_root) | /rul_datasets-0.10.5.tar.gz/rul_datasets-0.10.5/rul_datasets/reader/xjtu_sy.py | 0.887668 | 0.602822 | xjtu_sy.py | pypi |
import os.path
from typing import Tuple, List, Dict, Literal, Optional
import numpy as np
from tqdm import tqdm # type: ignore
def save(save_path: str, features: np.ndarray, targets: np.ndarray) -> None:
"""
Save features and targets of a run to .npy files.
The arrays are saved to separate .npy files to enable memmap mode in case RAM is
short. The files are saved as <save_path>_features.npy and
<save_path>_targets.npy. To load the files, supply the same `save_path` to the
[load][rul_datasets.reader.saving.load] function. If the `save_path` does not have
the .npy file extension .npy will be appended.
Args:
save_path: The path including file name to save the arrays to.
features: The feature array to save.
targets: The targets array to save.
"""
feature_path = _get_feature_path(save_path)
np.save(feature_path, features, allow_pickle=False)
target_path = _get_target_path(save_path)
np.save(target_path, targets, allow_pickle=False)
def load(save_path: str, memmap: bool = False) -> Tuple[np.ndarray, np.ndarray]:
"""
Load features and targets of a run from .npy files.
This method is used to restore runs that were saved with the [save]
[rul_datasets.reader.saving.save] function. If the runs are too large for the RAM,
`memmap` can be set to True to avoid reading them completely to memory. This
results in slower processing, though.
Args:
save_path: Path that was supplied to the
[save][rul_datasets.reader.saving.save] function.
memmap: whether to use memmap to avoid loading the whole run into memory
Returns:
features: The feature array saved in `save_path`
targets: The target array saved in `save_path`
"""
memmap_mode: Optional[Literal["r"]] = "r" if memmap else None
feature_path = _get_feature_path(save_path)
features = np.load(feature_path, memmap_mode, allow_pickle=False)
target_path = _get_target_path(save_path)
targets = np.load(target_path, memmap_mode, allow_pickle=False)
return features, targets
def load_multiple(
save_paths: List[str], memmap: bool = False
) -> Tuple[List[np.ndarray], List[np.ndarray]]:
"""
Load multiple runs with the [load][rul_datasets.reader.saving.load] function.
Args:
save_paths: The list of run files to load.
memmap: See [load][rul_datasets.reader.saving.load]
Returns:
features: The feature arrays saved in `save_paths`
targets: The target arrays saved in `save_paths`
"""
if save_paths:
runs = [load(save_path, memmap) for save_path in save_paths]
features, targets = [list(x) for x in zip(*runs)]
else:
features, targets = [], []
return features, targets
def exists(save_path: str) -> bool:
"""
Return if the files resulting from a `save` call with `save_path` exist.
Args:
save_path: the `save_path` the [save][rul_datasets.reader.saving.save]
function was called with
Returns:
Whether the files exist
"""
feature_path = _get_feature_path(save_path)
target_path = _get_target_path(save_path)
return os.path.exists(feature_path) and os.path.exists(target_path)
def _get_feature_path(save_path):
if save_path.endswith(".npy"):
save_path = save_path[:-4]
feature_path = f"{save_path}_features.npy"
return feature_path
def _get_target_path(save_path):
if save_path.endswith(".npy"):
save_path = save_path[:-4]
target_path = f"{save_path}_targets.npy"
return target_path
def load_raw(
file_paths: Dict[int, List[str]],
window_size: int,
columns: List[int],
skip_rows: int = 0,
) -> Dict[int, np.ndarray]:
features = {}
for run_idx, run_files in tqdm(file_paths.items(), desc="Runs"):
run_features = np.empty((len(run_files), window_size, len(columns)))
for i, file_path in enumerate(tqdm(run_files, desc="Files", leave=False)):
run_features[i] = _load_raw_features(file_path, skip_rows, columns)
features[run_idx] = run_features
return features
def _load_raw_features(
file_path: str, skip_rows: int, columns: List[int]
) -> np.ndarray:
try:
features = _load_raw_unsafe(file_path, skip_rows)
except ValueError:
_replace_delimiters(file_path)
features = _load_raw_unsafe(file_path, skip_rows)
features = features[:, columns]
return features
def _load_raw_unsafe(file_path: str, skip_rows: int) -> np.ndarray:
return np.loadtxt(file_path, skiprows=skip_rows, delimiter=",")
def _replace_delimiters(file_path: str) -> None:
with open(file_path, mode="r+t") as f:
content = f.read()
f.seek(0)
content = content.replace(";", ",")
f.write(content)
f.truncate() | /rul_datasets-0.10.5.tar.gz/rul_datasets-0.10.5/rul_datasets/reader/saving.py | 0.837952 | 0.516108 | saving.py | pypi |
from typing import Tuple, List, Optional, Union
import numpy as np
from sklearn import preprocessing # type: ignore
from rul_datasets import utils
from rul_datasets.reader import AbstractReader, scaling
class DummyReader(AbstractReader):
"""
This reader represents a simple, small dummy dataset that can be uses to test or
debug RUL estimation approaches. It contains ten runs for each split with a
single feature which makes it easy to hold in memory even on low-end computers.
The dataset is so simple that it can be sufficiently fit by a three-layer
perceptron in less than 50 epochs.
Each run is randomly generated by sampling a run length between 90 and 110 time
steps and creating a piece-wise linear RUL function `y(t)` with a maximum value of
`max_rul`. The feature `x(t)` is then calculated as:
```python
x(t) = exp(-0.05 * y(t) + N(offset, 0.01)) + N(0, noise_factor)
```
where `N(loc, scale)` is a function drawing a sample from a normal distribution
with a mean of `loc` and a standard deviation of `scale`. The `dev`, `val` and
`test` splits are all generated the same way with a different fixed random seed.
This makes generating the dataset deterministic.
The dummy dataset contains two sub-datasets. The first has uses an `offset` of
0.5 and a `noise_factor` of 0.01. The second uses an `offset` of 0.75 and a
`noise_factor` of 0.02. Both use a default window size of 10 and are min-max
scaled between -1 and 1 with a scaler fitted on the `dev` split.
Examples:
>>> import rul_datasets
>>> fd1 = rul_datasets.reader.DummyReader(fd=1)
>>> features, labels = fd1.load_split("dev")
>>> features[0].shape
(81, 10, 1)
"""
_FDS = [1, 2]
_DEFAULT_WINDOW_SIZE = 10
_NOISE_FACTOR = {1: 0.01, 2: 0.02}
_OFFSET = {1: 0.5, 2: 0.75}
_SPLIT_SEED = {"dev": 42, "val": 1337, "test": 101}
def __init__(
self,
fd: int,
window_size: Optional[int] = None,
max_rul: Optional[int] = 50,
percent_broken: Optional[float] = None,
percent_fail_runs: Optional[Union[float, List[int]]] = None,
truncate_val: bool = False,
truncate_degraded_only: bool = False,
):
"""
Create a new dummy reader for one of the two sub-datasets. The maximun RUL
value is set to 50 by default. Please be aware that changing this value will
lead to different features, too, as they are calculated based on the RUL
values.
For more information about using readers refer to the [reader]
[rul_datasets.reader] module page.
Args:
fd: Index of the selected sub-dataset
window_size: Size of the sliding window. Default defined per sub-dataset.
max_rul: Maximum RUL value of targets.
percent_broken: The maximum relative degradation per time series.
percent_fail_runs: The percentage or index list of available time series.
truncate_val: Truncate the validation data with `percent_broken`, too.
truncate_degraded_only: Only truncate the degraded part of the data
(< max RUL).
"""
super(DummyReader, self).__init__(
fd,
window_size,
max_rul,
percent_broken,
percent_fail_runs,
truncate_val,
truncate_degraded_only,
)
features, _ = self._generate_split("dev")
scaler = preprocessing.MinMaxScaler(feature_range=(-1, 1))
self.scaler = scaling.fit_scaler(features, scaler)
@property
def fds(self) -> List[int]:
"""Indices of available sub-datasets."""
return self._FDS
def default_window_size(self, fd: int) -> int:
return self._DEFAULT_WINDOW_SIZE
def prepare_data(self) -> None:
"""This function has no effect as there is nothing to prepare."""
pass
def load_complete_split(
self, split: str, alias: str
) -> Tuple[List[np.ndarray], List[np.ndarray]]:
features, targets = self._generate_split(split, alias)
features = scaling.scale_features(features, self.scaler)
return features, targets
def _generate_split(
self, split: str, alias: Optional[str] = None
) -> Tuple[List[np.ndarray], List[np.ndarray]]:
alias = split if alias is None else alias
features = []
targets = []
rng = np.random.default_rng(self._SPLIT_SEED[split])
for i in range(10):
t = self._generate_targets(rng)
f = self._generate_features(rng, t)
f = utils.extract_windows(f, self.window_size)
t = t[: -(self.window_size - 1)]
features.append(f)
targets.append(t)
if alias == "test":
features, targets = self._truncate_test_split(rng, features, targets)
return features, targets
def _generate_targets(self, rng):
length = rng.integers(90, 110)
t = np.clip(np.arange(length, 1, -1), a_min=0, a_max=self.max_rul)
t = t.astype(float)
return t
def _generate_features(self, rng, targets):
steady = -0.05 * targets + self._OFFSET[self.fd] + rng.normal() * 0.01
noise = rng.normal(size=targets.shape) * self._NOISE_FACTOR[self.fd]
f = np.exp(steady) + noise
return f[:, None]
def _truncate_test_split(self, rng, features, targets):
"""Extract a single window from a random position of the time series."""
for i in range(len(features)):
run_len = len(features[i])
cutoff = rng.integers(run_len // 2, run_len - 1)
features[i] = features[i][None, cutoff]
targets[i] = targets[i][None, cutoff]
return features, targets | /rul_datasets-0.10.5.tar.gz/rul_datasets-0.10.5/rul_datasets/reader/dummy.py | 0.961043 | 0.979255 | dummy.py | pypi |
import os
import re
import tempfile
import warnings
import zipfile
from typing import List, Tuple, Union, Dict, Optional
import numpy as np
import sklearn.preprocessing as scalers # type: ignore
from rul_datasets import utils
from rul_datasets.reader import scaling, saving
from rul_datasets.reader.data_root import get_data_root
from rul_datasets.reader.abstract import AbstractReader
FEMTO_URL = "https://kr0k0tsch.de/rul-datasets/FEMTOBearingDataSet.zip"
class FemtoReader(AbstractReader):
"""
This reader represents the FEMTO (PRONOSTIA) Bearing dataset. Each of its three
sub-datasets contain a training and a test split. By default, the reader
constructs a validation split for sub-datasets 1 and 2 each by taking the first
run of the test split. For sub-dataset 3 the second training run is used for
validation because only one test run is available. The remaining training data is
denoted as the development split. This run to split assignment can be overridden
by setting `run_split_dist`.
The features contain windows with three channels. Only the two acceleration
channels are used because the test runs are missing the temperature channel.
These features are standardized to zero mean and one standard deviation. The
scaler is fitted on the development data.
Examples:
Default splits:
>>> import rul_datasets
>>> fd1 = rul_datasets.reader.FemtoReader(fd=1)
>>> fd1.prepare_data()
>>> features, labels = fd1.load_split("dev")
>>> features[0].shape
(2803, 2560, 2)
Custom splits:
>>> import rul_datasets
>>> splits = {"dev": [5], "val": [4], "test": [3]}
>>> fd1 = rul_datasets.reader.FemtoReader(fd=1, run_split_dist=splits)
>>> fd1.prepare_data()
>>> features, labels = fd1.load_split("dev")
>>> features[0].shape
(2463, 2560, 2)
Set first-time-to-predict:
>>> import rul_datasets
>>> fttp = [10, 20, 30, 40, 50]
>>> fd1 = rul_datasets.reader.FemtoReader(fd=1, first_time_to_predict=fttp)
>>> fd1.prepare_data()
>>> features, labels = fd1.load_split("dev")
>>> labels[0][:15]
array([2793., 2793., 2793., 2793., 2793., 2793., 2793., 2793., 2793.,
2793., 2793., 2792., 2791., 2790., 2789.])
"""
_FEMTO_ROOT: str = os.path.join(get_data_root(), "FEMTOBearingDataSet")
_NUM_TRAIN_RUNS: Dict[int, int] = {1: 2, 2: 2, 3: 2}
def __init__(
self,
fd: int,
window_size: Optional[int] = None,
max_rul: Optional[int] = None,
percent_broken: Optional[float] = None,
percent_fail_runs: Optional[Union[float, List[int]]] = None,
truncate_val: bool = False,
run_split_dist: Optional[Dict[str, List[int]]] = None,
first_time_to_predict: Optional[List[int]] = None,
norm_rul: bool = False,
truncate_degraded_only: bool = False,
) -> None:
"""
Create a new FEMTO reader for one of the sub-datasets. By default, the RUL
values are not capped. The default window size is 2560.
Use `first_time_to_predict` to set an individual RUL inflection point for
each run. It should be a list with an integer index for each run. The index
is the time step after which RUL declines. Before the time step it stays
constant. The `norm_rul` argument can then be used to scale the RUL of each
run between zero and one.
For more information about using readers refer to the [reader]
[rul_datasets.reader] module page.
Args:
fd: Index of the selected sub-dataset
window_size: Size of the sliding window. Defaults to 2560.
max_rul: Maximum RUL value of targets.
percent_broken: The maximum relative degradation per time series.
percent_fail_runs: The percentage or index list of available time series.
truncate_val: Truncate the validation data with `percent_broken`, too.
run_split_dist: Dictionary that assigns each run idx to each split.
first_time_to_predict: The time step for each time series before which RUL
is constant.
norm_rul: Normalize RUL between zero and one.
truncate_degraded_only: Only truncate the degraded part of the data
(< max RUL).
"""
super().__init__(
fd,
window_size,
max_rul,
percent_broken,
percent_fail_runs,
truncate_val,
truncate_degraded_only,
)
if (first_time_to_predict is not None) and (max_rul is not None):
raise ValueError(
"FemtoReader cannot use 'first_time_to_predict' "
"and 'max_rul' in conjunction."
)
self.first_time_to_predict = first_time_to_predict
self.norm_rul = norm_rul
self._preparator = FemtoPreparator(self.fd, self._FEMTO_ROOT, run_split_dist)
@property
def fds(self) -> List[int]:
"""Indices of available sub-datasets."""
return list(self._NUM_TRAIN_RUNS)
def prepare_data(self) -> None:
"""
Prepare the FEMTO dataset. This function needs to be called before using the
dataset and each custom split for the first time.
The dataset is downloaded from a custom mirror and extracted into the data
root directory. The whole dataset is converted from CSV files to NPY files to
speed up loading it from disk. Afterwards, a scaler is fit on the development
features. Previously completed steps are skipped.
"""
if not os.path.exists(self._FEMTO_ROOT):
_download_femto(get_data_root())
self._preparator.prepare_split("dev")
self._preparator.prepare_split("val")
self._preparator.prepare_split("test")
def load_complete_split(
self, split: str, alias: str
) -> Tuple[List[np.ndarray], List[np.ndarray]]:
features, targets = self._preparator.load_runs(split)
features = [f[:, -self.window_size :, :] for f in features] # crop to window
features = scaling.scale_features(features, self._preparator.load_scaler())
if self.max_rul is not None:
targets = [np.minimum(t, self.max_rul) for t in targets]
elif self.first_time_to_predict is not None:
targets = self._clip_first_time_to_predict(targets, split)
if self.norm_rul:
targets = [t / np.max(t) for t in targets]
return features, targets
def _clip_first_time_to_predict(self, targets, split):
fttp = [
self.first_time_to_predict[i - 1]
for i in self._preparator.run_split_dist[split]
]
targets = [np.minimum(t, len(t) - fttp) for t, fttp in zip(targets, fttp)]
return targets
def default_window_size(self, fd: int) -> int:
return FemtoPreparator.DEFAULT_WINDOW_SIZE
class FemtoPreparator:
DEFAULT_WINDOW_SIZE = 2560
_SPLIT_FOLDERS = {
"dev": "Learning_set",
"val": "Full_Test_Set",
"test": "Full_Test_Set",
}
_DEFAULT_RUN_SPLIT_DIST = {
1: {"dev": [1, 2], "val": [3], "test": [4, 5, 6, 7]},
2: {"dev": [1, 2], "val": [3], "test": [4, 5, 6, 7]},
3: {"dev": [1], "val": [2], "test": [3]},
}
def __init__(
self,
fd: int,
data_root: str,
run_split_dist: Optional[Dict[str, List[int]]] = None,
) -> None:
self.fd = fd
self._data_root = data_root
self.run_split_dist = run_split_dist or self._DEFAULT_RUN_SPLIT_DIST[self.fd]
def prepare_split(self, split: str) -> None:
if not self._split_already_prepared(split):
warnings.warn(f"First time use. Pre-process {split} split of FD{self.fd}.")
runs = self._load_raw_runs(split)
self._save_efficient(split, runs)
if split == "dev" and not os.path.exists(self._get_scaler_path()):
features, _ = self.load_runs(split)
scaler = scaling.fit_scaler(features)
scaling.save_scaler(scaler, self._get_scaler_path())
def _split_already_prepared(self, split: str) -> bool:
run_idx_in_split = self._DEFAULT_RUN_SPLIT_DIST[self.fd][split][0]
run_file_path = self._get_run_file_path(split, run_idx_in_split)
already_prepared = saving.exists(run_file_path)
return already_prepared
def load_runs(self, split: str) -> Tuple[List[np.ndarray], List[np.ndarray]]:
self._validate_split(split)
run_idx = self.run_split_dist[split]
run_paths = [self._get_run_file_path(split, idx) for idx in run_idx]
features, targets = saving.load_multiple(run_paths)
return features, targets
def load_scaler(self) -> scalers.StandardScaler:
return scaling.load_scaler(self._get_scaler_path())
def _load_raw_runs(self, split: str) -> Dict[int, Tuple[np.ndarray, np.ndarray]]:
file_paths = self._get_csv_file_paths(split)
features = saving.load_raw(file_paths, self.DEFAULT_WINDOW_SIZE, columns=[4, 5])
targets = utils.get_targets_from_file_paths(
file_paths, self._timestep_from_file_path
)
runs = {idx: (features[idx], targets[idx]) for idx in features}
return runs
def _get_csv_file_paths(self, split: str) -> Dict[int, List[str]]:
split_path = self._get_split_folder(split)
run_folders = self._get_run_folders(split_path)
file_paths = {}
for run_idx, run_folder in run_folders.items():
run_path = os.path.join(split_path, run_folder)
feature_files = utils.get_files_in_path(
run_path, lambda f: f.startswith("acc")
)
file_paths[run_idx] = feature_files
return file_paths
@staticmethod
def _timestep_from_file_path(file_path: str) -> int:
file_name = os.path.basename(file_path)
time_step = int(file_name[4:9])
return time_step
def _save_efficient(
self, split: str, runs: Dict[int, Tuple[np.ndarray, np.ndarray]]
) -> None:
for run_idx, (features, targets) in runs.items():
saving.save(self._get_run_file_path(split, run_idx), features, targets)
def _validate_split(self, split: str) -> None:
if split not in self._SPLIT_FOLDERS:
raise ValueError(f"Unsupported split '{split}' supplied.")
def _get_run_folders(self, split_path: str) -> Dict[int, str]:
pattern = self._get_run_folder_pattern()
content = sorted(os.listdir(split_path))
run_folders = {int(f[-1]): f for f in content if pattern.match(f) is not None}
return run_folders
def _get_run_folder_pattern(self) -> re.Pattern:
return re.compile(rf"Bearing{self.fd}_\d")
def _get_scaler_path(self) -> str:
file_name = f"scaler_{self.fd}_{self.run_split_dist['dev']}.pkl"
file_path = os.path.join(self._data_root, file_name)
return file_path
def _get_run_file_path(self, split: str, run_idx: int) -> str:
return os.path.join(self._data_root, f"run_{self.fd}_{run_idx}.npy")
def _get_split_folder(self, split: str) -> str:
return os.path.join(self._data_root, self._SPLIT_FOLDERS[split])
def _download_femto(data_root: str) -> None:
with tempfile.TemporaryDirectory() as tmp_path:
print("Download FEMTO dataset")
download_path = os.path.join(tmp_path, "FEMTO.zip")
utils.download_file(FEMTO_URL, download_path)
print("Extract FEMTO dataset")
with zipfile.ZipFile(download_path, mode="r") as f:
f.extractall(data_root) | /rul_datasets-0.10.5.tar.gz/rul_datasets-0.10.5/rul_datasets/reader/femto.py | 0.86712 | 0.645246 | femto.py | pypi |
import math
import numpy as np
from rul_pm.dataset.lives_dataset import AbstractLivesDataset, FoldedDataset
from tqdm.auto import tqdm
from sklearn.base import clone
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection._split import _BaseKFold
import multiprocessing
class RULScorerWrapper:
def __init__(self, scorer):
from sklearn.metrics import get_scorer
self.scorer = get_scorer(scorer)
def __call__(self, estimator, X, y=None):
y_true = estimator.true_values(X)
return self.scorer(estimator, X, y_true)
class Fitter:
def __init__(self, model, dataset, folds_genereator, fit_kwargs):
self.model = model
self.folds_genereator = folds_genereator
self.dataset = dataset
self.fit_kwargs = fit_kwargs
def __call__(self, params):
i, params = params
model = clone(self.model)
model.reset()
model.set_params(**params)
params_results = []
for train, validation in self.folds_genereator.split(self.dataset):
train_dataset = FoldedDataset(self.dataset, train)
validation_dataset = FoldedDataset(self.dataset, validation)
model.fit(train_dataset, validation_dataset, **self.fit_kwargs)
y_pred = model.predict(validation_dataset)
y_true = model.true_values(validation_dataset)
params_results.append({"mse": np.mean((y_pred - y_true) ** 2)})
return (params, params_results)
class RULGridSearchCV:
def __init__(self, model, params: dict, folds_genereator: _BaseKFold):
self.params = params
self.model = model
self.folds_genereator = folds_genereator
self.param_list = []
self.results = []
def fit(self, dataset, **fit_kwargs):
pool = multiprocessing.Pool(6)
self.param_list, self.results = zip(
*pool.map(
Fitter(self.model, dataset, self.folds_genereator, fit_kwargs),
enumerate(list(ParameterGrid(self.params))),
)
)
class RULGridSearchPredefinedValidation:
def __init__(self, model, params: dict):
self.params = params
self.model = model
self.param_list = []
self.results = []
def fit(self, dataset_train, dataset_validation, **fit_kwargs):
self.param_list = []
self.results = []
for p in tqdm(list(ParameterGrid(self.params))):
model = clone(self.model)
model.reset()
model.set_params(**p)
r = model.fit(dataset_train, dataset_validation, **fit_kwargs)
self.param_list.append(p)
self.results.append([r])
class GeneticAlgorithmFeatureSelection:
def __init__(self, fitness_fun, population_size, max_iter: int):
self.population_size = population_size
self.fitness_fun = fitness_fun
self.max_iter = max_iter
def init_population(self, number_of_features):
return (
np.array(
[
[math.ceil(e) for e in pop]
for pop in (
np.random.rand(self.population_size, number_of_features) - 0.5
)
]
),
np.zeros((2, number_of_features)) - 1,
)
def single_point_crossover(self, population):
r, c, n = (
population.shape[0],
population.shape[1],
np.random.randint(1, population.shape[1]),
)
for i in range(0, r, 2):
population[i], population[i + 1] = (
np.append(population[i][0:n], population[i + 1][n:c]),
np.append(population[i + 1][0:n], population[i][n:c]),
)
return population
def flip_mutation(self, population):
return population.max() - population
def random_selection(self, population):
r = population.shape[0]
new_population = population.copy()
for i in range(r):
new_population[i] = population[np.random.randint(0, r)]
return new_population
def get_fitness(self, train_dataset, val_dataset, feature_list, population):
fitness = []
for i in range(population.shape[0]):
# columns = [feature_list[j]
# for j in range(population.shape[1]) if population[i, j] == 1]
fitness.append(self.fitness_fun(train_dataset, val_dataset, feature_list))
return fitness
def run(
self,
train_dataset,
validation_dataset,
feature_list,
):
c = len(feature_list)
population, memory = self.init_population(c)
# population, memory = self.replace_duplicate(population, memory)
fitness = self.get_fitness(
train_dataset, validation_dataset, feature_list, population
)
optimal_value = max(fitness)
optimal_solution = population[np.where(fitness == optimal_value)][0]
for i in range(self.max_iter):
population = self.random_selection(population)
population = self.single_point_crossover(population)
if np.random.rand() < 0.3:
population = self.flip_mutation(population)
# population, memory = replace_duplicate(population, memory)
fitness = self.get_fitness(
train_dataset, validation_dataset, feature_list, population
)
if max(fitness) > optimal_value:
optimal_value = max(fitness)
optimal_solution = population[np.where(fitness == optimal_value)][0]
return optimal_solution, optimal_value | /rul_pm-1.1.0.tar.gz/rul_pm-1.1.0/rul_pm/models/selection.py | 0.705075 | 0.39321 | selection.py | pypi |
from typing import Optional
import numpy as np
from rul_pm.results.results import FittedLife
from temporis.dataset.transformed import TransformedDataset
class BaselineModel:
"""Predict the RUL using the mean of the median value of the duration
of the dataset
Parameters
----------
mode: str
Method for computing the duration of the dataset
Possible values are: 'mean' and 'median'
"""
def __init__(self, mode: str = "mean", RUL_threshold: Optional[float] = None):
self.mode = mode
self.RUL_threshold = RUL_threshold
def fit(self, ds: TransformedDataset):
"""Compute the mean or median RUL using the given dataset
Parameters
----------
ds : AbstractTimeSeriesDataset
Dataset from which obtain the true RUL
"""
true = []
for _, y, _ in ds:
y = y
degrading_start, time = FittedLife.compute_time_feature(
y, self.RUL_threshold
)
true.append(y.iloc[0] + time[degrading_start])
if self.mode == "mean":
self.fitted_RUL = np.mean(true)
elif self.mode == "median":
self.fitted_RUL = np.median(true)
def predict(self, ds: TransformedDataset):
"""Predict the whole life using the fitted values
Parameters
----------
ds : AbstractTimeSeriesDataset
Dataset iterator from which obtain the true RUL
Returns
-------
np.array
Predicted target
"""
output = []
for _, y, _ in ds:
_, time = FittedLife.compute_time_feature(
y, self.RUL_threshold
)
y_pred = np.clip(self.fitted_RUL - time, 0, self.fitted_RUL)
output.append(y_pred)
return np.concatenate(output)
class FixedValueBaselineModel:
"""[summary]
Parameters
----------
value: float
Fixed RUL
"""
def __init__(self, value: float):
self.value = value
def predict(
self, ds: TransformedDataset, RUL_threshold: Optional[float] = None
):
"""Predict the whole life using the fixed values
Parameters
----------
ds : LifeDatasetIterator
Dataset iterator from which obtain the true RUL
Returns
-------
np.array
Predicted target
"""
output = []
for _, y, _ in ds:
_, time = FittedLife.compute_time_feature(y, RUL_threshold)
y_pred = np.clip(self.value - time, 0, self.value)
output.append(y_pred)
return np.concatenate(output) | /rul_pm-1.1.0.tar.gz/rul_pm-1.1.0/rul_pm/models/baseline.py | 0.960639 | 0.54952 | baseline.py | pypi |
from typing import Optional
import numpy as np
from rul_pm.iterators.batcher import get_batcher
from rul_pm.models.model import TrainableModel
from torchsummary import summary as model_summary
from tqdm.auto import tqdm
import torch
import torch.nn.functional as F
LOSSES = {
'mae': F.l1_loss,
'mse': F.mse_loss
}
class TorchTrainableModel(TrainableModel):
def __init__(self, learning_rate: float = 0.001, loss: str = 'mse', **kwargs):
super(TorchTrainableModel, self).__init__(**kwargs)
self.learning_rate = learning_rate
self._optimizer = None
self._scheduler = None
self.loss = LOSSES[loss]
def build_optimizer(self):
raise NotImplementedError
def build_scheduler(self):
raise None
def _create_batchers(self, train_dataset, validation_dataset):
train_batcher, val_batcher = super(TorchTrainableModel, self)._create_batchers(
train_dataset, validation_dataset)
train_batcher.restart_at_end = False
return train_batcher, val_batcher
@property
def optimizer(self):
if self._optimizer is None:
self._optimizer = self.build_optimizer()
return self._optimizer
@property
def scheduler(self):
if self._scheduler is None:
self._scheduler = self.build_scheduler()
return self._scheduler
def summary(self):
model_summary(self.model, self.input_shape)
def predict(self, dataset, step=None, batch_size=512, evenly_spaced_points: Optional[int] = None):
step = self.computed_step if step is None else step
batcher = get_batcher(dataset,
self.window,
batch_size,
self.transformer,
step,
shuffle=False,
output_size=self.output_size,
cache_size=self.cache_size,
evenly_spaced_points=evenly_spaced_points,
restart_at_end=False)
y_pred = []
for X, _ in batcher:
y_pred.append(self.model(torch.Tensor(X)).detach().numpy())
return np.concatenate(y_pred)
def fit(self, train_dataset, validation_dataset, epochs: int = 100, refit_transformer: bool = True,
print_summary: bool = True):
if refit_transformer:
self.transformer.fit(train_dataset)
if print_summary:
self.summary()
train_batcher, val_batcher = self._create_batchers(
train_dataset, validation_dataset)
for i in tqdm(range(epochs)):
pbar = tqdm(total=len(train_batcher))
train_loss = []
for X, y in train_batcher:
pbar.set_description('epoch %i' % (i+1))
self.optimizer.zero_grad()
y_pred = self.model(torch.Tensor(X))
single_loss = self.loss(y_pred, torch.Tensor(y))
single_loss.backward()
self.optimizer.step()
train_loss.append(single_loss.detach().numpy())
pbar.set_postfix(train_loss=np.mean(train_loss))
pbar.update()
val_loss = []
for X, y in val_batcher:
y_pred = self.model(torch.Tensor(X))
single_loss = self.loss(y_pred, torch.Tensor(y))
val_loss.append(single_loss.detach().numpy())
pbar.set_postfix(train_loss=np.mean(train_loss),
val_loss=np.mean(val_loss))
pbar.close() | /rul_pm-1.1.0.tar.gz/rul_pm-1.1.0/rul_pm/models/torch/model.py | 0.9226 | 0.232125 | model.py | pypi |
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from rul_pm.models.keras.keras import KerasTrainableModel
from rul_pm.models.keras.losses import weighted_categorical_crossentropy
from rul_pm.models.keras.weibull import WeibullLayer
from sklearn.base import BaseEstimator, TransformerMixin
from tcn import TCN
from tensorflow.keras import Model
from tensorflow.keras.layers import (Attention, BatchNormalization, Dense,
Dropout, Input)
tfd = tfp.distributions
class VariationalWeibull(WeibullLayer):
def __init__(self, hidden):
super(VariationalWeibull, self).__init__(
return_params=True, name='')
self.x1 = Dense(hidden,
activation='relu')
self.scale_uniform = Dense(1, activation='relu')
self.x2 = Dense(hidden,
activation='relu')
self.k1 = Dense(1, activation='relu')
self.k2 = Dense(1, activation='relu')
self.uniform_sampler = tfd.Sample(
tfd.Uniform(),
sample_shape=(1))
def call(self, input):
lambda_ = self.x1(input)
lambda_ = tf.math.exp(self.scale_uniform(lambda_))
uniform = self.uniform_sampler.sample(tf.shape(input)[0])
lambda_ = uniform*lambda_
k = self.x2(input)
k1 = self.k1(k)
k2 = self.k2(k)
uniform1 = self.uniform_sampler.sample(tf.shape(input)[0])
k = tf.nn.softplus(
k1*tf.math.pow((-1 * tf.math.log(1-uniform1)), k2) + 1)
return self._result(lambda_, k)
class RawAndBinClasses(BaseEstimator, TransformerMixin):
"""
A target transformer that outputs
the RUL + nbins binary vectors
"""
def __init__(self, nbins):
self.nbins = nbins
def fit(self, X, y=None):
self.max_RUL = int(X.max())
self.value_ranges = np.linspace(0, self.max_RUL, num=self.nbins+1)
return self
def transform(self, X):
v = X
classes = []
for j in range(len(self.value_ranges)-1):
lower = self.value_ranges[j]
upper = self.value_ranges[j+1]
classes.append(((v >= lower) & (v < upper)))
v = np.vstack((v, *classes)).T
return v
class SoftmaxRegression(KerasTrainableModel):
"""
The network contains stacked layers of 1-dimensional convolutional layers
followed by max poolings
Parameters
----------
self: list of tuples (filters: int, kernel_size: int)
Each element of the list is a layer of the network. The first element of the tuple contaings
the number of filters, the second one, the kernel size.
"""
def __init__(self, raw_and_bins, alpha, window, batch_size, step, transformer, shuffle, models_path,
patience=4, cache_size=30, output_size=3, padding='same'):
super(SoftmaxRegression, self).__init__(window,
batch_size,
step,
transformer,
shuffle,
models_path,
patience=patience,
output_size=output_size,
cache_size=30,
callbacks=[])
if raw_and_bins is not None:
self.raw_and_bins = raw_and_bins
weights = [1 for _ in range(self.raw_and_bins.nbins)]
self.wcc = weighted_categorical_crossentropy(weights)
self.output_size = self.raw_and_bins.nbins
else:
self.output_size = 1
self.alpha = alpha
def _generate_batcher(self, train_batcher, val_batcher):
n_features = self.transformer.n_features
def gen_train():
for X, y in train_batcher:
yield X, y
def gen_val():
for X, y in val_batcher:
yield X, y
a = tf.data.Dataset.from_generator(
gen_train, (tf.float32, tf.float32),
(tf.TensorShape([None, self.window, n_features]),
tf.TensorShape([None, self.output_size])))
b = tf.data.Dataset.from_generator(
gen_val, (tf.float32, tf.float32),
(tf.TensorShape([None, self.window, n_features]),
tf.TensorShape([None, self.output_size])))
return a, b
def _loss(self, y_true, y_pred):
# cross entropy loss
bin_true = y_true[:, 1:]
cont_true = y_true[:, 0]
# y_pred_rul = y_pred[:, 0]
# y_pred_bins = y_pred[:, 1:]
y_pred_bins = y_pred
cls_loss = self.wcc(bin_true, y_pred_bins)
# MSE loss
idx_tensor = self.raw_and_bins.value_ranges[:-1]
pred_cont = tf.reduce_sum(y_pred_bins * idx_tensor, 1)
# pred_cont = tf.keras.backend.argmax(y_pred, axis=1)
rmse_loss_softmax = tf.losses.mean_squared_error(cont_true, pred_cont)
# mse_loss = tf.losses.mean_squared_error(cont_true, y_pred_rul)
# Total loss
total_loss = (cls_loss +
self.alpha * rmse_loss_softmax
)
return total_loss
def mse_softmax(self, y_true, y_pred):
# cross entropy loss
# bin_true = y_true[:, 1:]
cont_true = y_true[:, 0]
# y_pred_rul = y_pred[:, 0]
# y_pred_bins = y_pred[:, 1:]
y_pred_bins = y_pred
idx_tensor = self.raw_and_bins.value_ranges[:-1]
pred_cont = tf.reduce_sum(y_pred_bins * idx_tensor, 1)
# pred_cont = tf.keras.backend.argmax(y_pred, axis=1)
return tf.sqrt(tf.losses.mean_squared_error(cont_true, pred_cont))
def mse_rul(self, y_true, y_pred):
# cross entropy loss
cont_true = y_true[:, 0]
y_pred_rul = y_pred[:, 0]
return tf.losses.mean_squared_error(cont_true, y_pred_rul)
def compile(self):
self.compiled = True
self.model.compile(loss='mse',
optimizer=tf.keras.optimizers.Adam(lr=0.0001))
def build_model(self):
# function to split the input in multiple outputs
def splitter(x):
return [x[:, :, i:i+1] for i in range(n_features)]
n_features = self.transformer.n_features
i = Input(shape=(self.window, n_features))
m = TCN(use_skip_connections=True,
use_batch_norm=True,
return_sequences=True,
dropout_rate=0.1)(i)
m = Attention(64, self.window-1)(m)
m = Dense(100, activation='relu')(m)
m = Dropout(0.5)(m)
m = BatchNormalization()(m)
proba = Dense(150, activation='relu')(m)
proba = BatchNormalization()(proba)
proba = Dropout(0.1)(proba)
proba = Dense(1, activation='linear')(proba)
return Model(inputs=i, outputs=proba)
@property
def name(self):
return 'ConvolutionalSimple'
def get_params(self, deep=False):
params = super().get_params()
params.update({
})
return params | /rul_pm-1.1.0.tar.gz/rul_pm-1.1.0/rul_pm/models/keras/extras.py | 0.941021 | 0.431884 | extras.py | pypi |
import logging
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from temporis.iterators.iterators import WindowedDatasetIterator
from rul_pm.graphics.plots import plot_predictions
from tensorflow.keras.callbacks import Callback
from temporis.iterators.utils import true_values
import tensorflow as tf
from sklearn.metrics import mean_absolute_error as mae
from rul_pm.results.results import PredictionResult
logger = logging.getLogger(__name__)
class PredictionCallback(Callback):
"""Generate a plot after each epoch with the predictions
Parameters
----------
model : tf.keras.Model
The model used predict
output_path : Path
Path of the output image
dataset : [type]
The dataset that want to be plotted
"""
def __init__(
self,
model: tf.keras.Model,
output_path: Path,
dataset: tf.data.Dataset,
units: str='',
filename_suffix: str = "",
):
super().__init__()
self.output_path = output_path
self.dataset = dataset
self.pm_model = model
self.units = units
self.suffix = filename_suffix
if len(filename_suffix) > 0:
self.output_path = self.output_path.with_stem(
filename_suffix + "_" + self.output_path.stem
)
def on_epoch_end(self, epoch, logs={}):
y_pred = self.pm_model.predict(self.dataset)
y_true = true_values(self.dataset)
ax = plot_predictions(
PredictionResult('Model', y_true, y_pred),
figsize=(17, 5),
units=self.units,
)
ax.legend()
ax.figure.savefig(self.output_path, dpi=ax.figure.dpi)
plt.close(ax.figure)
class TerminateOnNaN(Callback):
"""Callback that terminates training when a NaN loss is encountered."""
def __init__(self, batcher):
super().__init__()
self.batcher = batcher
def on_batch_end(self, batch, logs=None):
logs = logs or {}
loss = logs.get("loss")
if loss is not None:
if np.isnan(loss) or np.isinf(loss):
logger.info("Batch %d: Invalid loss, terminating training" % (batch))
self.model.stop_training = True
self.batcher.stop = True | /rul_pm-1.1.0.tar.gz/rul_pm-1.1.0/rul_pm/models/keras/callbacks.py | 0.919156 | 0.419351 | callbacks.py | pypi |
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import (Add, Conv1D, Dense, Dropout, Lambda,
Permute)
class Attention(tf.keras.Model):
"""
Temporal pattern attention for multivariate time series forecasting
Shun-Yao Shih, Fan-Keng Sun & Hung-yi Lee
They propose using a set of filters to extract time-invariant temporal patterns,
similar to transforming time series data into its “frequency domain”.
Then they propose a novel attention mechanism to select relevant time series,
and use its frequency domain information for multivariate forecasting.
"""
def __init__(self, number_of_filters, attention_size, dropout=0.1):
super(Attention, self).__init__()
self.filter_size = 1
self.number_of_filters = number_of_filters
self.attention_size = attention_size
self.dropout = dropout
self.permute = Permute((2, 1))
self.expand = Lambda(lambda x: K.expand_dims(x))
self.dropout = Dropout(self.dropout)
self.squeeze = Lambda(lambda x: K.squeeze(x, 1))
self.prev_states = Lambda(lambda x: x[:, :-1, :])
self.last_state = Lambda(lambda x: x[:, -1, :])
self.permute1 = Permute((2, 1))
def build(self, input_shape):
self.W = self.add_weight(name="att_weight",
shape=(self.number_of_filters,
input_shape[2]),
trainable=True)
self.conv_layer = Conv1D(self.number_of_filters,
self.attention_size,
padding='same')
self.denseHt = Dense(self.number_of_filters, use_bias=False)
self.denseVt = Dense(self.number_of_filters, use_bias=False)
super(Attention, self).build(input_shape)
def call(self, x):
prev_states = self.prev_states(x)
prev_states = self.permute(prev_states)
prev_states = self.conv_layer(prev_states)
prev_states = self.dropout(prev_states)
last_state = self.last_state(x)
score = K.sigmoid(K.batch_dot(
prev_states, K.dot(last_state, K.transpose(self.W))))
score = K.expand_dims(score, axis=2)
score = K.repeat_elements(score, rep=prev_states.shape[2], axis=2)
vt = K.sum(tf.math.multiply(score, prev_states), axis=2)
vt = Add()([self.denseHt(vt), self.denseVt(last_state)])
return vt | /rul_pm-1.1.0.tar.gz/rul_pm-1.1.0/rul_pm/models/keras/attention.py | 0.905109 | 0.607692 | attention.py | pypi |
import numpy as np
import tensorflow as tf
from scipy.special import loggamma
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Concatenate, Dense, Lambda, Multiply
class TFWeibullDistribution:
@staticmethod
def log_likelihood(x: tf.Tensor, alpha: tf.Tensor, beta: tf.Tensor):
ya = (x + 0.00000000001) / alpha
return tf.reduce_mean(K.log(beta) + (beta * K.log(ya)) - K.pow(ya, beta))
@staticmethod
def kl_divergence(l1: tf.Tensor, k1: tf.Tensor, l2: tf.Tensor, k2: tf.Tensor):
term_1 = K.log(k1 / K.pow(l1, k1))
term_2 = K.log(k2 / K.pow(l2, k2))
term_3 = (k1 - k2) * (K.log(l1) - 0.5772 / k1)
term_4 = K.pow((l1 / l2), k2) * K.exp(tf.math.lgamma((k2 / k1) + 1))
tf.print(term_1, term_2, term_3, term_4)
return K.mean(term_1 - term_2 + term_3 + term_4 - 1)
class WeibullDistribution:
@staticmethod
def mean(alpha, beta):
return alpha * np.exp(loggamma(1 + (1 / beta)))
@staticmethod
def mode(alpha, beta):
vmode = alpha * np.power((beta - 1) / beta, 1 / beta)
vmode[beta <= 1] = 0
return vmode
@staticmethod
def median(alpha, beta):
return alpha * (np.power(np.log(2.0), (1 / beta)))
@staticmethod
def variance(alpha, beta):
return alpha ** 2 * (
np.exp(loggamma(1 + (2 / beta))) - (np.exp(loggamma(1 + (1 / beta)))) ** 2
)
@staticmethod
def quantile(q, alpha, beta):
return alpha * np.power(-np.log(1 - q), 1 / beta)
class NotCensoredWeibull(tf.keras.losses.Loss):
def __init__(self, regression_weight: float = 5, likelihood_weight:float = 1):
super().__init__()
self.regression_weight = regression_weight
self.likelihood_weight = likelihood_weight
def call(self, y_true, y_pred):
pRUL = y_pred[:, 0]
alpha = y_pred[:, 1]
beta = y_pred[:, 2]
y_true = tf.squeeze(y_true)
reg_loss = tf.keras.losses.MeanAbsoluteError()(pRUL, y_true)
log_liks = TFWeibullDistribution.log_likelihood(y_true, alpha, beta)
# log_liks = K.clip(log_liks, K.log(0.0000000001), K.log(1 - 0.0000000001))
# + kl_weibull(alpha, beta, alpha, 2.0 )
loss = -self.likelihood_weight*log_liks + self.regression_weight * reg_loss
# + K.pow(ya,beta)
return loss
class WeibullLayer(tf.keras.layers.Layer):
def __init__(
self,
return_params=True,
regression="mode",
name="WeibullParams",
*args,
**kwargs
):
super().__init__(name=name, *args, **kwargs)
self.return_params = return_params
if self.return_params:
self.params = Concatenate(name="Weibullparams")
if regression == "mode":
self.fun = self.mode
elif regression == "mean":
self.fun = self.mean
elif regression == "median":
self.fun = self.median
def mean(self, lambda_pipe, k_pipe):
inner_gamma = Lambda(lambda x: tf.math.exp(tf.math.lgamma(1 + (1 / x))))(k_pipe)
return Multiply(name="RUL")([lambda_pipe, inner_gamma])
def median(self, lambda_pipe, k_pipe):
return lambda_pipe * (tf.math.pow(tf.math.log(2.0), tf.math.reciprocal(k_pipe)))
def mode(self, alpha, beta):
mask = K.cast(K.greater(beta, 1), tf.float32)
beta = tf.clip_by_value(beta, 1 + 0.00000000001, np.inf)
return mask * alpha * tf.math.pow((beta - 1) / beta, (1 / beta))
def _result(self, alpha, beta):
RUL = self.fun(alpha, beta)
if self.return_params:
return self.params([alpha, beta])
else:
return RUL
class WeibullParameters(WeibullLayer):
def __init__(self, hidden, regression="mode", return_params=True, *args, **kwargs):
super(WeibullParameters, self).__init__(
return_params=True, regression=regression, name="", *args, **kwargs
)
self.W = Dense(hidden, activation="relu")
self.xalpha1 = Dense(hidden, activation="relu")
self.xalpha2 = Dense(1, name="w_alpha", activation='softplus')
self.xbeta1 = Dense(hidden, activation="relu")
self.xbeta2 = Dense(1, name="w_beta", activation='softplus')
def call(self, input_tensor, training=False):
x = self.W(input_tensor)
alpha = self.xalpha1(x)
alpha = self.xalpha2(alpha)
beta = self.xbeta1(x)
beta = self.xbeta2(beta)
RUL = self.mode(alpha, beta)
x = Concatenate(axis=1)([RUL, alpha, beta])
return x | /rul_pm-1.1.0.tar.gz/rul_pm-1.1.0/rul_pm/models/keras/weibull.py | 0.932114 | 0.578924 | weibull.py | pypi |
from typing import Tuple
from rul_pm.models.keras.keras import KerasTrainableModel
from tensorflow.keras import Input, Model, optimizers
from tensorflow.keras.layers import (Concatenate, Conv2D, Dense, Dropout,
Flatten, Permute, Reshape)
def MVCNN(input_shape:Tuple[int, int],
dropout: float,
window: int,
batch_size: int,
step: int, transformer,
shuffle, models_path,
patience: int = 4,
cache_size: int = 30,):
"""
Model presented in Remaining useful life estimation in prognostics using deep convolution neural networks
Deafult parameters reported in the article
Number of filters: 10
Window size: 30/20/30/15
Filter length: 10
Neurons in fully-connected layer 100
Dropout rate 0.5
batch_size = 512
Parameters
-----------
n_filters : int
filter_size : int
window: int
batch_size: int
step: int
transformer
shuffle
models_path
patience: int = 4
cache_size: int = 30
"""
n_features = input_shape[1]
window = input_shape[0]
input = Input(shape=(window, n_features))
x = input
x = Permute((2, 1))(x)
x = Reshape((input_shape[0], input_shape[1], window))(x)
x = Conv2D(window, (1, 1), activation='relu', padding='same')(x)
x1 = Conv2D(window, (2, 2), activation='relu', padding='same')(x)
x1 = Conv2D(window, (2, 2), activation='relu', padding='same')(x1)
x2 = Conv2D(window, (3, 3), activation='relu', padding='same')(x)
x2 = Conv2D(window, (3, 3), activation='relu', padding='same')(x2)
x3 = Conv2D(window, (5, 5), activation='relu', padding='same')(x)
x3 = Conv2D(window, (5, 5), activation='relu', padding='same')(x3)
x = Concatenate(axis=1)([x, x1, x2, x3])
x = Conv2D(window, input_shape)(x)
x = Flatten()(x)
x = Dense(100, activation='relu')(x)
x = Dropout(dropout)(x)
x = Dense(100, activation='relu')(x)
x = Dropout(dropout)(x)
output = Dense(1)(x)
model = Model(
inputs=[input],
outputs=[output],
)
return model | /rul_pm-1.1.0.tar.gz/rul_pm-1.1.0/rul_pm/models/keras/models/MVCNN.py | 0.920692 | 0.617195 | MVCNN.py | pypi |
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.layers import (BatchNormalization, Concatenate, MaxPool1D, Activation)
from rul_pm.models.keras.keras import KerasTrainableModel
from rul_pm.models.keras.layers import ExpandDimension, RemoveDimension
from tensorflow.keras import Input, Model, optimizers
from tensorflow.keras.layers import (
Layer,
LayerNormalization,
MultiHeadAttention,
Add,
Conv1D,
Conv2D,
Dense,
Embedding,
Dropout,
Flatten,
GlobalAveragePooling1D,
)
class InceptionTime(KerasTrainableModel):
def __init__(
self,
nb_filters=32,
use_residual=True,
use_bottleneck=True,
depth=6,
kernel_size=41,
bottleneck_size:int = 32,
inception_number:int = 3,
**kwargs
):
super().__init__(**kwargs)
self.nb_filters = nb_filters
self.use_residual = use_residual
self.use_bottleneck = use_bottleneck
self.depth = depth
self.kernel_size = kernel_size - 1
self.bottleneck_size = bottleneck_size
self.inception_number = inception_number
def compile(self):
self.compiled = True
self.model.compile(
loss=self.loss,
optimizer=optimizers.Adam(
lr=self.learning_rate,
beta_1=0.85,
beta_2=0.9,
epsilon=0.001,
amsgrad=True,
),
metrics=self.metrics,
)
def _inception_module(self, input_tensor, stride=1, activation="linear"):
print(self.bottleneck_size)
if self.use_bottleneck and int(input_tensor.shape[-1]) > 1:
input_inception = Conv1D(
filters=self.bottleneck_size,
kernel_size=1,
padding="same",
activation=activation,
use_bias=False,
)(input_tensor)
else:
input_inception = input_tensor
kernel_size_s = [self.kernel_size // (2 ** i) for i in range(self.inception_number)]
conv_list = []
for i in range(len(kernel_size_s)):
conv_list.append(
Conv1D(
filters=self.nb_filters,
kernel_size=kernel_size_s[i],
strides=stride,
padding="same",
activation=activation,
use_bias=False,
)(input_inception)
)
max_pool_1 = MaxPool1D(pool_size=3, strides=stride, padding="same")(
input_tensor
)
conv_6 = Conv1D(
filters=self.nb_filters,
kernel_size=1,
padding="same",
activation=activation,
use_bias=False,
)(max_pool_1)
conv_list.append(conv_6)
x = Concatenate(axis=2)(conv_list)
x = BatchNormalization()(x)
x = Activation(activation="relu")(x)
return x
def _shortcut_layer(self, input_tensor, out_tensor):
shortcut_y = Conv1D(
filters=int(out_tensor.shape[-1]),
kernel_size=1,
padding="same",
use_bias=False,
)(input_tensor)
shortcut_y = BatchNormalization()(shortcut_y)
x = Add()([shortcut_y, out_tensor])
x = Activation("relu")(x)
return x
def build_model(self, input_shape):
input = Input(shape=input_shape)
x = input
input_res = input
for d in range(self.depth):
x = self._inception_module(x)
if self.use_residual and d % 3 == 2:
x = self._shortcut_layer(input_res, x)
input_res = x
gap_layer = GlobalAveragePooling1D()(x)
output_layer = Dense(1, activation="relu")(gap_layer)
model = Model(inputs=input, outputs=output_layer)
return model
def get_params(self, deep=False):
d = super().get_params()
d["nb_filters"] = self.nb_filters
d["use_residual"] = self.use_residual
d["use_bottleneck"] = self.use_bottleneck
d["depth"] = self.depth
d["kernel_size"] = self.kernel_size
d["bottleneck_size"] = self.bottleneck_size
return d
@property
def name(self):
return "InceptionTime" | /rul_pm-1.1.0.tar.gz/rul_pm-1.1.0/rul_pm/models/keras/models/InceptionTime.py | 0.855746 | 0.484685 | InceptionTime.py | pypi |
import numpy as np
import tensorflow as tf
from rul_pm.models.keras.keras import KerasTrainableModel
from rul_pm.models.keras.layers import ExpandDimension, RemoveDimension
from tensorflow.keras import Input, Model, optimizers
from tensorflow.keras.layers import (Layer, LayerNormalization, MultiHeadAttention,
Add, Conv1D, Conv2D, Dense, Embedding,
Dropout, Flatten, GlobalAveragePooling1D)
class Patches(Layer):
def __init__(self, patch_size, features):
super(Patches, self).__init__()
self.patch_size = patch_size
self.features = features
def call(self, images):
batch_size = tf.shape(images)[0]
patches = tf.image.extract_patches(
images=images,
sizes=[1, self.patch_size, self.features, 1],
strides=[1, self.patch_size, self.features, 1],
rates=[1, 1, 1, 1],
padding="VALID",
)
patch_dims = patches.shape[-1]
patches = tf.reshape(patches, [batch_size, -1, patch_dims])
patch_dims = patches.shape[-1]
return patches
class PatchEncoder(Layer):
def __init__(self, num_patches, projection_dim):
super(PatchEncoder, self).__init__()
self.num_patches = num_patches
self.projection = Dense(units=projection_dim)
self.position_embedding = Embedding(
input_dim=num_patches, output_dim=projection_dim
)
def call(self, patch):
positions = tf.range(start=0, limit=self.num_patches, delta=1)
encoded = self.projection(patch) + self.position_embedding(positions)
return encoded
def mlp(x, hidden_units, dropout_rate):
for units in hidden_units:
x = Dense(units, activation=tf.nn.gelu)(x)
x = Dropout(dropout_rate)(x)
return x
class VisionTransformer(KerasTrainableModel):
"""
"""
def __init__(self,
patch_size:int=5,
projection_dim:int = 64,
num_heads:int= 4,
transformer_layers:int= 8,
mlp_head_units = [2048, 1024],
**kwargs):
super().__init__(**kwargs)
self.patch_size = patch_size
self.num_patches = (self.window // patch_size)
self.projection_dim= projection_dim
self.num_heads = num_heads
self.transformer_units = [
projection_dim * 2,
projection_dim,
]
self.transformer_layers = transformer_layers
self.mlp_head_units= mlp_head_units
def compile(self):
self.compiled = True
self.model.compile(
loss=self.loss,
optimizer=optimizers.Adam(lr=self.learning_rate,
beta_1=0.85,
beta_2=0.9,
epsilon=0.001,
amsgrad=True),
metrics=self.metrics)
def build_model(self):
n_features = self.transformer.n_features
input = Input(shape=(self.window, n_features))
x = ExpandDimension()(input)
patches = Patches(self.patch_size, n_features)(x)
encoded_patches = PatchEncoder(self.num_patches, self.projection_dim)(patches)
for _ in range(self.transformer_layers):
x1 = LayerNormalization(epsilon=1e-6)(encoded_patches)
attention_output = MultiHeadAttention(
num_heads=self.num_heads, key_dim=self.projection_dim, dropout=0.1
)(x1, x1)
x2 = Add()([attention_output, encoded_patches])
x3 = LayerNormalization(epsilon=1e-6)(x2)
x3 = mlp(x3, hidden_units=self.transformer_units, dropout_rate=0.1)
encoded_patches = Add()([x3, x2])
# Create a [batch_size, projection_dim] tensor.
representation = LayerNormalization(epsilon=1e-6)(encoded_patches)
representation = Flatten()(representation)
representation = Dropout(0.5)(representation)
features = mlp(representation, hidden_units=self.mlp_head_units, dropout_rate=0.5)
logits = Dense(1, activation='relu')(features)
# Create the Keras model.
model = Model(inputs=input, outputs=logits)
return model
def get_params(self, deep=False):
d = super().get_params()
return d
@property
def name(self):
return "VisionTransformer" | /rul_pm-1.1.0.tar.gz/rul_pm-1.1.0/rul_pm/models/keras/models/VisionTransformer.py | 0.858259 | 0.554893 | VisionTransformer.py | pypi |
from typing import List
import tensorflow as tf
from rul_pm.models.keras.keras import KerasTrainableModel
from rul_pm.models.keras.losses import time_to_failure_rul
from tensorflow.keras import Input, Model, optimizers
from tensorflow.keras.layers import LSTM, Dense
class MultiTaskRUL(KerasTrainableModel):
"""
A Multi task network that learns to regress the RUL and the Time to failure
Two Birds with One Network: Unifying Failure Event Prediction and Time-to-failure Modeling
Karan Aggarwal, Onur Atan, Ahmed K. Farahat, Chi Zhang, Kosta Ristovski, Chetan Gupta
The target
Parameters
-----------
layers_lstm : List[int]
Number of LSTM layers
layers_dense : List[int]
Number of dense layers
window: int
batch_size: int
step: int
transformer
shuffle
models_path
patience: int = 4
cache_size: int = 30
"""
def __init__(self,
layers_lstm: List[int],
layers_dense: List[int],
window: int,
batch_size: int,
step: int, transformer, shuffle, models_path,
patience: int = 4, cache_size: int = 30):
super().__init__(window,
batch_size,
step,
transformer,
shuffle,
models_path,
patience=4,
cache_size=30)
self.layers_dense = layers_dense
self.layers_lstm = layers_lstm
def compile(self):
super().compile()
self.model.compile(
optimizer=optimizers.Adam(lr=0.001),
loss=time_to_failure_rul(weights={
0: 1.,
1: 2.
}),
# {
# 'rul': MeanSquaredError(),
# 'ttf': BinaryCrossentropy(from_logits=True),
# },
loss_weights=[1.0, 1.0],
)
@property
def name(self):
return "MultiTaskRULTTF"
def build_model(self):
n_features = self.transformer.n_features
input = Input(shape=(self.window, n_features))
x = input
n_filters = 15
if len(self.layers_lstm) > 1:
for n_filters in self.layers_lstm:
x = LSTM(n_filters, recurrent_dropout=0.2,
return_sequences=True)(x)
x = LSTM(n_filters, recurrent_dropout=0.2, return_sequences=False)(x)
for n_filters in self.layers_dense:
x = Dense(n_filters, activation='elu')(x)
RUL_output = Dense(1, activation='elu', name='rul')(x)
FP_output = Dense(1, activation='sigmoid', name='ttf')(x)
output = tf.keras.layers.Concatenate(axis=1)([RUL_output, FP_output])
model = Model(
inputs=[input],
outputs=[output],
)
return model
def _generate_batcher(self, train_batcher, val_batcher):
n_features = self.transformer.n_features
def gen_train():
for X, y in train_batcher:
yield X, y
def gen_val():
for X, y in val_batcher:
yield X, y
a = tf.data.Dataset.from_generator(
gen_train, (tf.float32, tf.float32), (tf.TensorShape(
[None, self.window, n_features]), tf.TensorShape([None, 2])))
b = tf.data.Dataset.from_generator(
gen_val, (tf.float32, tf.float32), (tf.TensorShape(
[None, self.window, n_features]), tf.TensorShape([None, 2])))
return a, b | /rul_pm-1.1.0.tar.gz/rul_pm-1.1.0/rul_pm/models/keras/models/MultiTaskRUL.py | 0.960842 | 0.60775 | MultiTaskRUL.py | pypi |
import tensorflow as tf
from rul_pm.models.keras.keras import KerasTrainableModel
from tcn import TCN
from tensorflow.keras import Input, Model, optimizers
from tensorflow.keras.layers import (AveragePooling1D, Concatenate, Conv1D,
Dense, Dropout, Flatten, Lambda,
UpSampling1D)
class EncoderDecoder(KerasTrainableModel):
"""
Parameters
-----------
n_filters : int
filter_size : int
window: int
batch_size: int
step: int
transformer
shuffle
models_path
patience: int = 4
cache_size: int = 30
"""
def __init__(self,
hidden_size: int,
dropout: float,
window: int,
batch_size: int,
step: int, transformer,
shuffle, models_path,
patience: int = 4,
cache_size: int = 30,
**kwargs):
super().__init__(window,
batch_size,
step,
transformer,
shuffle,
models_path,
patience=patience,
cache_size=cache_size,
**kwargs)
self.hidden_size = hidden_size
self.dropout = dropout
def compile(self):
self.compiled = True
self.model.compile(
loss=self.loss,
optimizer=optimizers.Adam(lr=self.learning_rate),
metrics=self.metrics,
loss_weights={'rul': 1, 'signal': 1})
def _generate_keras_batcher(self, train_batcher, val_batcher):
n_features = self.transformer.n_features
def gen_train():
for X, y in train_batcher:
yield X, {'signal': X, 'rul': y}
def gen_val():
for X, y in val_batcher:
yield X, {'signal': X, 'rul': y}
a = tf.data.Dataset.from_generator(
gen_train,
(tf.float32, {'signal': tf.float32, 'rul': tf.float32}),
(
tf.TensorShape([None, self.window, n_features]),
{
'signal': tf.TensorShape([None, self.window, n_features]),
'rul': tf.TensorShape([None, 1])
}
)
)
b = tf.data.Dataset.from_generator(
gen_val,
(tf.float32, {'signal': tf.float32, 'rul': tf.float32}),
(
tf.TensorShape([None, self.window, n_features]),
{
'signal': tf.TensorShape([None, self.window, n_features]),
'rul': tf.TensorShape([None, 1])
}
)
)
return a, b
def build_model(self):
n_features = self.transformer.n_features
input = Input(shape=(self.window, n_features))
x = input
encoder = TCN(self.hidden_size, kernel_size=5, use_batch_norm=True, use_skip_connections=True,
dropout_rate=self.dropout, return_sequences=True, dilations=(1, 2, 4))(x)
encoder = AveragePooling1D(2)(encoder)
decoder = UpSampling1D(2)(encoder)
decoder = Conv1D(n_features, kernel_size=5, padding='same',
activation='relu', name='signal')(decoder)
encoder_last_state = Flatten()(encoder)
decoder_last_state = Lambda(lambda X: X[:, -1, :])(decoder)
output = Concatenate()([encoder_last_state, decoder_last_state])
output = Dense(100, activation='relu')(output)
output = Dropout(self.dropout)(output)
output = Dense(50, activation='relu')(output)
output = Dropout(self.dropout)(output)
output = Dense(1, name='rul')(output)
model = Model(
inputs=[input],
outputs={'signal': decoder, 'rul': output},
)
return model
@property
def name(self):
return "ConvolutionalEncoderDecoder" | /rul_pm-1.1.0.tar.gz/rul_pm-1.1.0/rul_pm/models/keras/models/ConvEncoderDecoder.py | 0.883855 | 0.439326 | ConvEncoderDecoder.py | pypi |
from typing import List, Tuple
import numpy as np
class Segment:
def __init__(self, initial_point:Tuple[float, float], not_increasing:bool = False):
self.n = 1
self.initial = initial_point
self.xx = 0
self.xy = 0
self.yy = 0
self.B = 0
self.segment_error = []
self.std_deviation = 2
self.not_increasing = not_increasing
def compute_endpoint(self, current_t: float):
# Predict with previous point
t = current_t - self.initial[0]
B = self.B
if self.not_increasing:
B = min(B, 0)
hat_s = self.initial[1] + (B * (t))
self.final = (current_t, hat_s)
def can_add(self, current_t: float, value: float) -> bool:
n = self.n + 1
y = value - self.initial[1]
t = current_t - self.initial[0]
xx = self.xx + (((t * t) - self.xx)) / n
xy = self.xy + (((t * y) - self.xy)) / n
yy = self.yy + (((y * y) - self.yy)) / n
if xx > 0:
B = xy / xx
else:
B = 0
SSR = (yy - B * xy) * self.n
new_segment = False
if self.n > 15:
mean_error = np.mean(self.segment_error)
std_error = np.std(self.segment_error)
new_segment = SSR > mean_error + 1.5 *std_error
else:
new_segment = False
return not new_segment
def add(self, current_t: float, value: float):
self.n = self.n + 1
y = value - self.initial[1]
t = current_t - self.initial[0]
self.xx = self.xx + (((t * t) - self.xx)) / self.n
self.xy = self.xy + (((t * y) - self.xy)) / self.n
self.yy = self.yy + (((y * y) - self.yy)) / self.n
if self.xx > 0:
self.B = self.xy / self.xx
else:
self.B = 0
SSR = (self.yy - self.B * self.xy) * self.n
self.segment_error.append(SSR)
self.compute_endpoint(current_t)
class PiecewesieLinearFunction:
"""Function defined picewise with linear segments
Parameters
----------
segments: List[Segment]
List of segments that compose the function
"""
def __init__(self, segments: List[Segment]):
self.parameters = []
self.limits = []
for s in segments:
t1, y1 = s.initial
t2, y2 = s.final
if (t2 - t1) == 0:
m = 0
else:
m = (y2 - y1) / (t2 - t1)
b = y1 - m * t1
self.parameters.append((m, b))
self.limits.append((t1, t2))
def predict_line(self, x_values):
return [self.predict(x) for x in x_values]
def predict(self, x):
for i, (l1, l2) in enumerate(self.limits):
if x >= l1 and x <= l2:
m, b = self.parameters[i]
return m * x + b
m, b = self.parameters[-1]
return m * x + b
def zero(self):
for i, ((l1, l2), (m, b)) in enumerate(zip(self.limits, self.parameters)):
if m == 0:
continue
x_0 = -b / m
if (x_0 >= l1) and (x_0 <= l2):
return x_0
m, b = self.parameters[-1]
while m == 0:
return 0
return -b / m
class PiecewiseLinearRegression:
"""Perform a picewise linear regression
The method is the one presented in:
Time and Memory Efficient Online Piecewise Linear Approximation of Sensor Signals
Florian Grützmacher, Benjamin Beichler, Albert Hein,Dand Thomas Kirste and Christian Haubelt
"""
def __init__(self, not_increasing:bool=False):
self.segments = []
self.not_increasing = not_increasing
def add_point(
self,
t: float,
s: float,
):
"""Add a new point to the regression
Parameters
----------
t : float
x component
s : float
y component
"""
if len(self.segments) == 0:
self.segments.append(Segment((t, s), not_increasing=self.not_increasing))
return
if self.segments[-1].can_add(t, s):
self.segments[-1].add(t, s)
else:
self.segments.append(Segment(self.segments[-1].final, not_increasing=self.not_increasing))
self.segments[-1].add(t, s)
def finish(self) -> PiecewesieLinearFunction:
"""Complete last unfinished segment and return the model computed
Returns
-------
PiecewesieLinearFunction
The picewise linear model fitted
"""
return PiecewesieLinearFunction(self.segments) | /rul_pm-1.1.0.tar.gz/rul_pm-1.1.0/rul_pm/results/picewise_regression.py | 0.889259 | 0.442998 | picewise_regression.py | pypi |
import logging
from dataclasses import dataclass
from typing import Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from rul_pm.results.picewise_regression import (PiecewesieLinearFunction,
PiecewiseLinearRegression)
from sklearn.metrics import mean_absolute_error as mae
from sklearn.metrics import mean_absolute_percentage_error as mape
from sklearn.metrics import mean_squared_error as mse
from uncertainties import ufloat
logger = logging.getLogger(__name__)
@dataclass
class MetricsResult:
mae: float
mse: float
fitting_time: float = 0
prediction_time: float = 0
@dataclass
class PredictionResult:
name: str
true_RUL: np.ndarray
predicted_RUL: np.ndarray
metrics: MetricsResult = MetricsResult(0, 0)
def compute_metrics(self):
self.metrics.mae = mae(self.true_RUL, self.predicted_RUL)
self.metrics.mse = mse(self.true_RUL, self.predicted_RUL)
def compute_sample_weight(sample_weight, y_true, y_pred, c: float = 0.9):
if sample_weight == "relative":
sample_weight = np.abs(y_true - y_pred) / (np.clip(y_true, c, np.inf))
else:
sample_weight = 1
return sample_weight
def compute_rul_line(rul: float, n: int, tt: Optional[np.array] = None):
if tt is None:
tt = -np.ones(n)
z = np.zeros(n)
z[0] = rul
for i in range(len(tt) - 1):
z[i + 1] = max(z[i] + tt[i], 0)
if z[i + 1] - 0 < 0.0000000000001:
break
return z
class CVResults:
def __init__(
self,
y_true: List[List],
y_pred: List[List],
nbins: int = 5,
bin_edges: Optional[np.array] = None,
):
"""
Compute the error histogram
Compute the error with respect to the RUL considering the results of different
folds
Parameters
----------
y_true: List[List]
List with the true values of each hold-out set of a cross validation
y_pred: List[List]
List with the predictions of each hold-out set of a cross validation
nbins: int
Number of bins to compute the histogram
"""
if bin_edges is None:
max_value = np.max([np.max(y) for y in y_true])
bin_edges = np.linspace(0, max_value, nbins + 1)
self.n_folds = len(y_true)
self.n_bins = len(bin_edges) - 1
self.bin_edges = bin_edges
self.mean_error = np.zeros((self.n_folds, self.n_bins))
self.mae = np.zeros((self.n_folds, self.n_bins))
self.mse = np.zeros((self.n_folds, self.n_bins))
self.errors = []
for i, (y_pred, y_true) in enumerate(zip(y_pred, y_true)):
self._add_fold_result(i, y_pred, y_true)
def _add_fold_result(self, fold: int, y_pred: np.array, y_true: np.array):
y_pred = np.squeeze(y_pred)
y_true = np.squeeze(y_true)
for j in range(len(self.bin_edges) - 1):
mask = (y_true >= self.bin_edges[j]) & (y_true <= self.bin_edges[j + 1])
indices = np.where(mask)[0]
if len(indices) == 0:
continue
errors = y_true[indices] - y_pred[indices]
self.mean_error[fold, j] = np.mean(errors)
self.mae[fold, j] = np.mean(np.abs(errors))
self.mse[fold, j] = np.mean((errors) ** 2)
self.errors.append(errors)
def model_cv_results(
results: List[PredictionResult],
nbins: Optional[int] = None,
bin_edges: Optional[np.ndarray] = None,
) -> CVResults:
if nbins is None and bin_edges is None:
raise ValueError("nbins and bin_edges cannot be both None")
if nbins is None:
nbins = len(bin_edges) - 1
if bin_edges is None:
max_y_value = np.max([r.true_RUL.max() for r in results])
bin_edges = np.linspace(0, max_y_value, nbins + 1)
trues = []
predicted = []
for results in results:
trues.append(results.true_RUL)
predicted.append(results.predicted_RUL)
return CVResults(trues, predicted, nbins=nbins, bin_edges=bin_edges)
def models_cv_results(
results_dict: Dict[str, List[PredictionResult]], nbins: int
) -> Tuple[np.ndarray, Dict[str, CVResults]]:
"""Create a dictionary with the result of each cross validation of the model"""
max_y_value = np.max(
[
r.true_RUL.max()
for model_name in results_dict.keys()
for r in results_dict[model_name]
]
)
bin_edges = np.linspace(0, max_y_value, nbins + 1)
model_results = {}
for model_name in results_dict.keys():
model_results[model_name] = model_cv_results(
results_dict[model_name], bin_edges=bin_edges
)
return bin_edges, model_results
class FittedLife:
"""Represent a Fitted Life
Parameters
----------
y_true: np.array
The true RUL target
y_pred: np.array
The predicted target
time: Optional[Union[np.array, int]]
Time feature
fit_line_not_increasing: Optional[bool] = False,
Wether the fitted line can increase or not.
RUL_threshold: Optional[float]
Indicates the thresholding value used during de fit, By default None
"""
def __init__(
self,
y_true: np.array,
y_pred: np.array,
time: Optional[Union[np.array, int]] = None,
fit_line_not_increasing: Optional[bool] = False,
RUL_threshold: Optional[float] = None,
):
self.fit_line_not_increasing = fit_line_not_increasing
y_true = np.squeeze(y_true)
y_pred = np.squeeze(y_pred)
if time is not None:
self.degrading_start = FittedLife._degrading_start(y_true, RUL_threshold)
if isinstance(time, np.ndarray):
self.time = time
else:
self.time = np.array(np.linspace(0, y_true[0], n=len(y_true)))
else:
self.degrading_start, self.time = FittedLife.compute_time_feature(
y_true, RUL_threshold
)
# self.y_pred_fitted_picewise = self._fit_picewise_linear_regression(y_pred)
# self.y_true_fitted_picewise = self._fit_picewise_linear_regression(y_true)
self.RUL_threshold = RUL_threshold
self.y_pred = y_pred
self.y_true = y_true
self.y_pred_fitted_coefficients = np.polyfit(self.time, self.y_pred, 1)
p = np.poly1d(self.y_pred_fitted_coefficients)
self.y_pred_fitted = p(self.time)
self.y_true_fitted_coefficients = np.polyfit(self.time, self.y_true, 1)
p = np.poly1d(self.y_true_fitted_coefficients)
self.y_true_fitted = p(self.time)
@staticmethod
def compute_time_feature(y_true: np.array, RUL_threshold: Optional[float] = None):
degrading_start = FittedLife._degrading_start(y_true, RUL_threshold)
time = FittedLife._compute_time(y_true, degrading_start)
return degrading_start, time
@staticmethod
def _degrading_start(
y_true: np.array, RUL_threshold: Optional[float] = None
) -> float:
"""Obtain the index when the life value is lower than the RUL_threshold
Parameters
----------
y_true : np.array
Array of true values of the RUL of the life
RUL_threshold : float
Returns
-------
float
if RUL_threshold is None, the degradint start if the first index.
Otherwise it is the first index in which y_true < RUL_threshold
"""
degrading_start = 0
if RUL_threshold is not None:
degrading_start_i = np.where(y_true < RUL_threshold)
if len(degrading_start_i[0]) > 0:
degrading_start = degrading_start_i[0][0]
return degrading_start
@staticmethod
def _compute_time(y_true: np.array, degrading_start: int) -> np.array:
"""Compute the passage of time from the true RUL
The passage of time is computed as the cumulative sum of the first
difference of the true labels. In case there are tresholded values,
the time steps of the thresholded zone is assumed to be as the median values
of the time steps computed of the zones of the life in which we have information.
Parameters
----------
y_true : np.array
The true RUL labels
degrading_start : int
The index in which the true RUL values starts to be lower than the treshold
Returns
-------
np.array
[description]
"""
if len(y_true) == 1:
return np.array([0])
time_diff = np.diff(np.squeeze(y_true)[degrading_start:][::-1])
time = np.zeros(len(y_true))
if degrading_start > 0:
if len(time_diff) > 0:
time[0 : degrading_start + 1] = np.median(time_diff)
else:
time[0 : degrading_start + 1] = 1
time[degrading_start + 1 :] = time_diff
return np.cumsum(time)
def _fit_picewise_linear_regression(self, y: np.array) -> PiecewesieLinearFunction:
"""Fit the array trough a picewise linear regression
Parameters
----------
y : np.array
Points to be fitted
Returns
-------
PiecewesieLinearFunction
The Picewise linear function fitted
"""
pwlr = PiecewiseLinearRegression(not_increasing=self.fit_line_not_increasing)
for j in range(len(y)):
pwlr.add_point(self.time[j], y[j])
line = pwlr.finish()
return line
def rmse(self, sample_weight=None) -> float:
N = len(self.y_pred)
sw = compute_sample_weight(sample_weight, self.y_true[:N], self.y_pred)
return np.sqrt(np.mean(sw * (self.y_true[:N] - self.y_pred) ** 2))
def mae(self, sample_weight=None) -> float:
N = len(self.y_pred)
sw = compute_sample_weight(sample_weight, self.y_true[:N], self.y_pred)
return np.mean(sw * np.abs(self.y_true[:N] - self.y_pred))
def noisiness(self) -> float:
"""How much the predictions resemble a line
This metric is computed as the mse of the fitted values
with respect to the least squares fitted line of this
values
"""
return mae(self.y_pred_fitted, self.y_pred)
def slope_resemblance(self):
m1 = self.y_true_fitted_coefficients[0]
m2 = self.y_pred_fitted_coefficients[0]
d = np.arctan((m1 - m2) / (1 + m1 * m2))
d = d / (np.pi / 2)
return 1 - np.abs((d / (np.pi / 2)))
def predicted_end_of_life(self):
z = np.where(self.y_pred == 0)[0]
if len(z) == 0:
return self.time[len(self.y_pred) - 1] + self.y_pred[-1]
else:
return self.time[z[0]]
def end_of_life(self):
z = np.where(self.y_true == 0)[0]
if len(z) == 0:
return self.time[len(self.y_pred) - 1] + self.y_true[-1]
else:
return self.time[z[0]]
def maintenance_point(self, m: float = 0):
"""Compute the maintenance point
The maintenance point is computed as the predicted end of life - m
Parameters
-----------
m: float, optional
Fault horizon Defaults to 0.
Returns
--------
float
Time of maintenance
"""
return self.predicted_end_of_life() - m
def unexploited_lifetime(self, m: float = 0):
"""Compute the unexploited lifetime given a fault horizon window
Machine Learning for Predictive Maintenance: A Multiple Classifiers Approach
Susto, G. A., Schirru, A., Pampuri, S., McLoone, S., & Beghi, A. (2015).
Parameters
----------
m: float, optional
Fault horizon windpw. Defaults to 0.
Returns:
float: unexploited lifetime
"""
if self.maintenance_point(m) < self.end_of_life():
return self.end_of_life() - self.maintenance_point(m)
else:
return 0
def unexpected_break(self, m: float = 0, tolerance: float = 0):
"""Compute wether an unexpected break will produce using a fault horizon window of size m
Machine Learning for Predictive Maintenance: A Multiple Classifiers Approach
Susto, G. A., Schirru, A., Pampuri, S., McLoone, S., & Beghi, A. (2015).
Parameters
----------
m: float, optional
Fault horizon windpw. Defaults to 0.
Returns
-------
bool
Unexploited lifetime
"""
if self.maintenance_point(m) - tolerance < self.end_of_life():
return False
else:
return True
def split_lives_indices(y_true: np.array):
"""Obtain a list of indices for each life
Parameters
----------
y_true : np.array
True vector with the RUL
Returns
-------
List[List[int]]
A list with the indices belonging to each life
"""
assert len(y_true) >= 2
lives_indices = (
[0]
+ (np.where(np.diff(np.squeeze(y_true)) > 0)[0] + 1).tolist()
+ [len(y_true)]
)
indices = []
for i in range(len(lives_indices) - 1):
r = range(lives_indices[i], lives_indices[i + 1])
if len(r) <= 1:
continue
indices.append(r)
return indices
def split_lives(
results: PredictionResult,
RUL_threshold: Optional[float] = None,
fit_line_not_increasing: Optional[bool] = False,
time: Optional[int] = None,
) -> List[FittedLife]:
"""Divide an array of predictions into a list of FittedLife Object
Parameters
----------
y_true : np.array
The true RUL target
y_pred : np.array
The predicted RUL
fit_line_not_increasing : Optional[bool], optional
Wether the fit line can increase, by default False
time : Optional[int], optional
A vector with timestamps. If omitted wil be computed from y_true, by default None
Returns
-------
List[FittedLife]
FittedLife list
"""
lives = []
for r in split_lives_indices(results.true_RUL):
if np.any(np.isnan(results.predicted_RUL[r])):
continue
lives.append(
FittedLife(
results.true_RUL[r],
results.predicted_RUL[r],
RUL_threshold=RUL_threshold,
fit_line_not_increasing=fit_line_not_increasing,
time=time,
)
)
return lives
def unexploited_lifetime(d: PredictionResult, window_size: int, step: int):
bb = [split_lives(cv) for cv in d]
return unexploited_lifetime_from_cv(bb, window_size, step)
def unexploited_lifetime_from_cv(
lives: List[List[FittedLife]], window_size: int, n: int
):
std_per_window = []
mean_per_window = []
windows = np.linspace(0, window_size, n)
for m in windows:
jj = []
for r in lives:
ul_cv_list = [life.unexploited_lifetime(m) for life in r]
jj.extend(ul_cv_list)
mean_per_window.append(np.mean(jj))
std_per_window.append(np.std(jj))
return windows, np.array(mean_per_window), np.array(std_per_window)
def unexpected_breaks(
d: List[PredictionResult], window_size: int, step: int
) -> Tuple[np.ndarray, np.ndarray]:
"""Compute the risk of unexpected breaks with respect to the maintenance window size
Parameters
----------
d : dict
Dictionary with the results
window_size : int
Maximum size of the maintenance windows
step : int
Number of points in which compute the risks.
step different maintenance windows will be used.
Returns
-------
Tuple[np.ndarray, np.ndarray]
* Maintenance window size evaluated
* Risk computed for every window size used
"""
bb = [split_lives(fold) for fold in d]
return unexpected_breaks_from_cv(bb, window_size, step)
def unexpected_breaks_from_cv(
lives: List[List[FittedLife]], window_size: int, n: int
) -> Tuple[np.ndarray, np.ndarray]:
"""Compute the risk of unexpected breaks given a Cross-Validation results
Parameters
----------
lives : List[List[FittedLife]]
Cross validation results.
window_size : int
Maximum size of the maintenance window
n : int
Number of points to evaluate the risk of unexpected breaks
Returns
-------
Tuple[np.ndarray, np.ndarray]
* Maintenance window size evaluated
* Risk computed for every window size used
"""
std_per_window = []
mean_per_window = []
windows = np.linspace(0, window_size, n)
for m in windows:
jj = []
for r in lives:
ul_cv_list = [life.unexpected_break(m) for life in r]
jj.extend(ul_cv_list)
mean_per_window.append(np.mean(jj))
std_per_window.append(np.std(jj))
return windows, np.array(mean_per_window), np.array(std_per_window)
def metric_J_from_cv(lives: List[List[FittedLife]], window_size: int, n: int, q1, q2):
J = []
windows = np.linspace(0, window_size, n)
for m in windows:
J_of_m = []
for r in lives:
ub_cv_list = np.array([life.unexpected_break(m) for life in r])
ub_cv_list = (ub_cv_list / (np.max(ub_cv_list) + 0.0000000001)) * q1
ul_cv_list = np.array([life.unexploited_lifetime(m) for life in r])
ul_cv_list = (ul_cv_list / (np.max(ul_cv_list) + 0.0000000001)) * q2
values = ub_cv_list + ul_cv_list
mean_J = np.mean(values)
std_ul_cv = np.std(values)
J_of_m.append(mean_J)
J.append(np.mean(J_of_m))
return windows, J
def metric_J(d, window_size: int, step: int):
lives_cv = [split_lives(cv) for cv in d]
return metric_J_from_cv(lives_cv, window_size, step)
def cv_regression_metrics_single_model(
results: List[PredictionResult], threshold: float = np.inf
):
errors = {
"MAE": [],
"MAE SW": [],
"MSE": [],
"MSE SW": [],
"MAPE": []
}
for result in results:
y_mask = np.where(result.true_RUL <= threshold)[0]
y_true = np.squeeze(result.true_RUL[y_mask])
y_pred = np.squeeze(result.predicted_RUL[y_mask])
mask = np.isfinite(y_pred)
y_pred = y_pred[mask]
y_true = y_true[mask]
if len(np.unique(y_pred)) == 1:
continue
sw = compute_sample_weight(
"relative",
y_true,
y_pred,
)
try:
MAE_SW = mae(
y_true,
y_pred,
sample_weight=sw,
)
except:
MAE_SW = np.nan
try:
MAE = mae(y_true, y_pred)
except:
MAE = np.nan
try:
MSE_SW = mse(
y_true,
y_pred,
sample_weight=sw,
)
except:
MSE_SW = np.nan
try:
MSE = mse(y_true, y_pred)
except:
MSE = np.nan
try:
MAPE = mape(y_true, y_pred)
except:
MAPE = np.nan
lives = split_lives(result)
errors["MAE"].append(MAE)
errors["MAE SW"].append(MAE_SW)
errors["MSE"].append(MSE)
errors["MSE SW"].append(MSE_SW)
errors["MAPE"].append(MAPE)
errors1 = {}
for k in errors.keys():
errors1[k] = ufloat(np.mean(errors[k]), np.std(errors[k]))
return errors1
def cv_regression_metrics(
results_dict: Dict[str, List[PredictionResult]], threshold: float = np.inf
) -> dict:
"""Compute regression metrics for each model
Parameters
----------
data : dict
Dictionary with the model predictions.
The dictionary must conform the results specification of this module
threshold : float, optional
Compute metrics errors only in RUL values less than the threshold, by default np.inf
Returns
-------
dict
A dictionary with the following format:
{
'MAE': {
'mean':
'std':
},
'MAE SW': {
'mean':
'std':
},
'MSE': {
'mean':
'std':
},
}
"""
out = {}
for model_name in results_dict.keys():
out[model_name] = cv_regression_metrics_single_model(
results_dict[model_name], threshold
)
return out | /rul_pm-1.1.0.tar.gz/rul_pm-1.1.0/rul_pm/results/results.py | 0.90484 | 0.505127 | results.py | pypi |
import gzip
import io
import logging
import os
import pickle
import tarfile
from enum import Enum
from pathlib import Path
from typing import List, Optional, Union
import gdown
import pandas as pd
from joblib import Memory
from rul_pm import CACHE_PATH, DATASET_PATH
from rul_pm.datasets.lives_dataset import AbstractLivesDataset
from tqdm.auto import tqdm
import shutil
import numpy as np
logger = logging.getLogger(__name__)
memory = Memory(CACHE_PATH, verbose=0)
COMPRESSED_FILE = "phm_data_challenge_2018.tar.gz"
FOLDER = "phm_data_challenge_2018"
URL = "https://drive.google.com/uc?id=15Jx9Scq9FqpIGn8jbAQB_lcHSXvIoPzb"
OUTPUT = COMPRESSED_FILE
def download(path: Path):
logger.info("Downloading dataset...")
gdown.download(URL, str(path / OUTPUT), quiet=False)
def prepare_raw_dataset(path: Path):
def track_progress(members):
for member in tqdm(members, total=70):
yield member
path = path / "raw"
path.mkdir(parents=True, exist_ok=True)
if not (path / OUTPUT).resolve().is_file():
download(path)
logger.info("Decompressing dataset...")
with tarfile.open(path / OUTPUT, "r") as tarball:
tarball.extractall(path=path, members=track_progress(tarball))
shutil.move(str(path / "phm_data_challenge_2018" / "train"), str(path / "train"))
shutil.move(str(path / "phm_data_challenge_2018" / "test"), str(path / "test"))
shutil.rmtree(str(path / "phm_data_challenge_2018"))
(path / OUTPUT).unlink()
class FailureType(Enum):
FlowCoolPressureDroppedBelowLimit = "FlowCool Pressure Dropped Below Limit"
FlowcoolPressureTooHighCheckFlowcoolPump = (
"Flowcool Pressure Too High Check Flowcool Pump"
)
FlowcoolLeak = "Flowcool leak"
@staticmethod
def that_starth_with(s: str):
for f in FailureType:
if s.startswith(f.value):
return f
return None
from typing import List, Optional, Union
def merge_data_with_faults(
data_file: Union[str, Path], fault_data_file: Union[str, Path]
) -> pd.DataFrame:
"""Merge the raw sensor data with the fault information
Parameters
----------
data_file : Union[str, Path]
Path where the raw sensor data is located
fault_data_file : Union[str, Path]
Path where the fault information is located
Returns
-------
pd.DataFrame
Dataframe indexed by time with the raw sensors and faults
The dataframe contains also a fault_number column
"""
data = pd.read_csv(data_file).dropna().set_index("time")
fault_data = (
pd.read_csv(fault_data_file).drop_duplicates(subset=["time"]).set_index("time")
)
fault_data["fault_number"] = range(fault_data.shape[0])
return pd.merge_asof(data, fault_data, on="time", direction="forward").set_index(
"time"
)
def prepare_dataset(dataset_path: Path):
(dataset_path / "processed" / "lives").mkdir(exist_ok=True, parents=True)
files = list(Path(dataset_path / "raw" / "train").resolve().glob("*.csv"))
faults_files = list(
Path(dataset_path / "raw" / "train" / "train_faults").resolve().glob("*.csv")
)
files = {file.stem[0:6]: file for file in files}
faults_files = {file.stem[0:6]: file for file in faults_files}
dataset_data = []
for filename in tqdm(faults_files.keys(), "Processing files"):
tool = filename[0:6]
data_file = files[tool]
logger.info(f"Loading data file {files[tool]}")
fault_data_file = faults_files[filename]
data = merge_data_with_faults(data_file, fault_data_file)
for life_index, life_data in data.groupby("fault_number"):
if life_data.shape[0] == 0:
continue
failure_type = FailureType.that_starth_with(life_data["fault_name"].iloc[0])
output_filename = (
f"Life_{int(life_index)}_{tool}_{failure_type.name}.pkl.gzip"
)
dataset_data.append(
(tool, life_data.shape[0], failure_type.value, output_filename)
)
life = life_data.copy()
life["RUL"] = np.arange(life.shape[0] - 1, -1, -1)
with gzip.open(
dataset_path / "processed" / "lives" / output_filename, "wb"
) as file:
pickle.dump(life_data, file)
df = pd.DataFrame(
dataset_data, columns=["Tool", "Number of samples", "Failure Type", "Filename"]
)
df.to_csv(dataset_path / "processed" / "lives" / "lives_db.csv")
class PHMDataset2018(AbstractLivesDataset):
def __init__(
self,
failure_types: Union[FailureType, List[FailureType]] = [l for l in FailureType],
tools: Union[str, List[str]] = "all",
path: Path = DATASET_PATH,
):
super().__init__()
if not isinstance(failure_types, list):
failure_types = [failure_types]
self.failure_types = failure_types
if isinstance(tools, str) and tools != "all":
tools = [tools]
self.tools = tools
self.path = path
self.dataset_path = path / FOLDER
self.procesed_path = self.dataset_path / "processed" / "lives"
self.lives_table_filename = self.procesed_path / "lives_db.csv"
if not self.lives_table_filename.is_file():
if not (self.dataset_path / "raw" / "train").is_dir():
prepare_raw_dataset(self.dataset_path)
prepare_dataset(self.dataset_path)
self.lives = pd.read_csv(self.lives_table_filename)
if tools != "all":
self.lives = self.lives[self.lives["Tool"].isin(self.tools)]
self.lives = self.lives[
self.lives["Failure Type"].isin([a.value for a in self.failure_types])
]
valid = []
for i, (j, r) in enumerate(self.lives.iterrows()):
df = self._load_life(r["Filename"])
if df.shape[0] > 1200:
valid.append(i)
self.lives = self.lives.iloc[valid, :]
@property
def n_time_series(self) -> int:
return self.lives.shape[0]
def _load_life(self, filename: str) -> pd.DataFrame:
with gzip.open(self.procesed_path / filename, "rb") as file:
df = pickle.load(file)
return df
def get_time_series(self, i: int) -> pd.DataFrame:
"""
Paramters
---------
i:int
Returns
-------
pd.DataFrame
DataFrame with the data of the life i
"""
df = self._load_life(self.lives.iloc[i]["Filename"])
# df = df[df['FIXTURESHUTTERPOSITION'] == 1].copy().dropna()
# df = df[df['ETCHAUXSOURCETIMER'].diff() != 0]
# df = df[df['ETCHSOURCEUSAGE'].diff() != 0]
df["RUL"] = np.arange(df.shape[0] - 1, -1, -1)
return df
@property
def rul_column(self) -> str:
return "RUL" | /rul_pm-1.1.0.tar.gz/rul_pm-1.1.0/rul_pm/datasets/PHMDataset2018.py | 0.725746 | 0.185062 | PHMDataset2018.py | pypi |
from typing import List, Optional, Union
import numpy as np
import pandas as pd
from rul_pm.datasets.lives_dataset import AbstractLivesDataset
from temporis import DATA_PATH
CMAPSS_PATH = DATA_PATH / "C_MAPSS"
# Features used by
# Multiobjective Deep Belief Networks Ensemble forRemaining Useful Life Estimation in
# Prognostics Chong Zhang, Pin Lim, A. K. Qin,Senior Member, IEEE, and Kay Chen Tan,Fellow, IEEE
sensor_indices = np.array([2, 3, 4, 7, 8, 9, 11, 12, 13, 14, 15, 17, 20, 21]) + (4 - 1)
dependent_vars = ["RemainingUsefulLife"]
index_columns_names = ["UnitNumber", "Cycle"]
operational_settings_columns_names = ["OpSet" + str(i) for i in range(1, 4)]
sensor_measure_columns_names = ["SensorMeasure" + str(i) for i in range(1, 22)]
input_file_column_names = (
index_columns_names
+ operational_settings_columns_names
+ sensor_measure_columns_names
)
operation_mode = {"FD001": 0, "FD002": 1, "FD003": 2, "FD004": 3}
engines = ["FD001", "FD002", "FD003", "FD004"]
def process_file_test(file):
test_data = pd.read_csv(
CMAPSS_PATH / ("test_" + file + ".txt"),
names=input_file_column_names,
delimiter=r"\s+",
header=None,
)
truth_data = pd.read_csv(
CMAPSS_PATH / ("RUL_" + file + ".txt"), delimiter=r"\s+", header=None
)
truth_data.columns = ["truth"]
truth_data["UnitNumber"] = np.array(range(truth_data.shape[0])) + 1
test_rul = test_data.groupby("UnitNumber")["Cycle"].max().reset_index()
test_rul.columns = ["UnitNumber", "Elapsed"]
test_rul = test_rul.merge(truth_data, on=["UnitNumber"], how="left")
test_rul["Max"] = test_rul["Elapsed"] + test_rul["truth"]
test_data = test_data.merge(test_rul, on=["UnitNumber"], how="left")
test_data["RUL"] = test_data["Max"] - test_data["Cycle"]
test_data.drop(["Max"], axis=1, inplace=True)
return test_data
def prepare_train_data(data, factor: float = 0):
"""
Paramaters
----------
data: pd.DataFrame
Dataframe with the file content
cutoff: float.
RUL cutoff
"""
df = data.copy()
fdRUL = df.groupby("UnitNumber")["Cycle"].max().reset_index()
fdRUL = pd.DataFrame(fdRUL)
fdRUL.columns = ["UnitNumber", "max"]
df = df.merge(fdRUL, on=["UnitNumber"], how="left")
df["RUL"] = df["max"] - df["Cycle"]
df.drop(columns=["max"], inplace=True)
return df
def process_file_train(file):
df = pd.read_csv(
CMAPSS_PATH / ("train_" + file + ".txt"),
sep=r"\s+",
names=input_file_column_names,
header=None,
)
df = prepare_train_data(df)
df["OpMode"] = operation_mode[file]
return df
class CMAPSSDataset(AbstractLivesDataset):
def __init__(
self, train: bool = True, models: Optional[Union[str, List[str]]] = None
):
super().__init__()
if models is not None and isinstance(models, str):
models = [models]
self._validate_model_names(models)
if train:
processing_fun = process_file_train
else:
processing_fun = process_file_test
self.lives = []
for engine in engines:
if models is not None and engine not in models:
continue
for _, g in processing_fun(engine).groupby("UnitNumber"):
g.drop(columns=["UnitNumber"], inplace=True)
g["Engine"] = engine
self.lives.append(g)
def _validate_model_names(self, models):
if models is not None:
for model in models:
if model not in operation_mode:
raise ValueError(
f"Invalid model: valid model are {list(operation_mode.keys())}"
)
def get_time_series(self, i):
"""
Returns
-------
pd.DataFrame
DataFrame with the data of the life i
"""
return self.lives[i]
@property
def n_time_series(self):
return len(self.lives)
@property
def rul_column(self) -> str:
return "RUL" | /rul_pm-1.1.0.tar.gz/rul_pm-1.1.0/rul_pm/datasets/CMAPSS.py | 0.860369 | 0.453746 | CMAPSS.py | pypi |
import math
from typing import Dict, Iterable, List, Optional, Union
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from rul_pm.graphics.utils.curly_brace import curlyBrace
from rul_pm.results.results import (FittedLife, PredictionResult,
models_cv_results, split_lives,
unexpected_breaks, unexploited_lifetime)
from temporis.dataset.transformed import TransformedDataset
def plot_lives(ds: TransformedDataset):
"""
Plot each life
"""
fig, ax = plt.subplots()
it = ds
for _, y in it:
ax.plot(y)
return fig, ax
def cv_plot_errors_wrt_RUL(bin_edges, error_histogram, **kwargs):
""""""
fig, ax = plt.subplots(**kwargs)
labels = []
heights = []
xs = []
yerr = []
for i in range(len(error_histogram)):
xs.append(i)
heights.append(np.mean(error_histogram[i]))
yerr.append(np.std(error_histogram[i]))
labels.append(f"[{bin_edges[i]:.1f}, {bin_edges[i+1]:.1f})")
ax.bar(height=heights, x=xs, yerr=yerr, tick_label=labels)
ax.set_xlabel("RUL")
ax.set_ylabel("RMSE")
return fig, ax
def _boxplot_errors_wrt_RUL_multiple_models(
bin_edge: np.array,
model_results: dict,
ax=None,
y_axis_label: Optional[str] = None,
x_axis_label: Optional[str] = None,
hold_out=False,
**kwargs,
):
def set_box_color(bp, color):
plt.setp(bp["boxes"], color=color)
plt.setp(bp["whiskers"], color=color)
plt.setp(bp["caps"], color=color)
plt.setp(bp["medians"], color=color)
if ax is None:
_, ax = plt.subplots(**kwargs)
labels = []
n_models = len(model_results)
nbins = len(bin_edge) - 1
for i in range(nbins):
labels.append(f"[{bin_edge[i]:.1f}, {bin_edge[i+1]:.1f})")
max_value = -np.inf
min_value = np.inf
colors = sns.color_palette("hls", n_models)
for model_number, model_name in enumerate(model_results.keys()):
model_data = model_results[model_name]
hold_out = model_data.n_folds == 1
if hold_out:
for errors in model_data.errors:
min_value = min(min_value, np.min(errors))
max_value = max(max_value, np.max(errors))
else:
min_value = min(min_value, np.min(model_data.mean_error))
max_value = max(max_value, np.max(model_data.mean_error))
positions = []
for i in range(nbins):
positions.append((model_number * 0.5) + (i * n_models))
if hold_out:
box = ax.boxplot(
np.array(model_data.errors, dtype=object),
positions=positions,
widths=0.2,
)
else:
box = ax.boxplot(model_data.mean_error, positions=positions, widths=0.2)
set_box_color(box, colors[model_number])
ax.plot([], c=colors[model_number], label=model_name)
ticks = []
for i in range(nbins):
x = np.mean(
[(model_number * 0.5) + (i * n_models) for model_number in range(n_models)]
)
ticks.append(x)
max_x = np.max(ticks) + 1
ax.set_xlabel("RUL" + ("" if x_axis_label is None else x_axis_label))
ax.set_ylabel("$y - \hat{y}$" + ("" if y_axis_label is None else y_axis_label))
ax.set_xticks(ticks)
ax.set_xticklabels(labels)
ax.legend()
ax2 = ax.twinx()
ax2.set_xlim(ax.get_xlim())
ax2.set_ylim(ax.get_ylim())
curlyBrace(
ax.figure, ax2, (max_x, 0), (max_x, min_value), str_text="Over estim.", c="#000"
)
ax2.get_xaxis().set_visible(False)
ax2.get_yaxis().set_visible(False)
curlyBrace(
ax.figure,
ax2,
(max_x, max_value),
(max_x, 0),
str_text="Under estim.",
c="#000",
)
return ax.figure, ax
def boxplot_errors_wrt_RUL(
results_dict: Dict[str, List[PredictionResult]],
nbins: int,
y_axis_label: Optional[str] = None,
x_axis_label: Optional[str] = None,
ax=None,
**kwargs,
):
"""Boxplots of difference between true and predicted RUL over Cross-validated results
Parameters
----------
results_dict: Dict[str, List[PredictionResult]]
Dictionary with the results of the fitted models
nbins: int
Number of bins to divide the
y_axis_label: Optional[str]. Default None,
Optional string to be added to the y axis
x_axis_label: Optional[str]=None
Optional string to be added to the x axis
fig:
Optional figure in which the plot will be
ax: Optional. Default None
Optional axis in which the plot will be drawed.
If an axis is not provided, it will create one.
Keyword arguments
-----------------
**kwargs
Return
-------
fig, ax:
"""
if ax is None:
fig, ax = plt.subplots(**kwargs)
else:
fig = ax.figure
bin_edges, model_results = models_cv_results(results_dict, nbins)
return _boxplot_errors_wrt_RUL_multiple_models(
bin_edges,
model_results,
fig=fig,
ax=ax,
y_axis_label=y_axis_label,
x_axis_label=x_axis_label,
)
def _cv_barplot_errors_wrt_RUL_multiple_models(
bin_edges,
model_results: dict,
fig=None,
ax=None,
y_axis_label: Optional[str] = None,
x_axis_label: Optional[str] = None,
color_palette: str = "hls",
bar_width: float=1/1.5,
**kwargs,
):
"""Plot the barplots given the errors
Parameters
----------
bin_edges: np.ndarray:
model_results: dict
Dictionary with the results
fig: Optional[plt.Figure]
Figure
ax: Optional[ax.Axis] Defaults to None.
Axis
y_axis_label: Optional[str] Defaults to None.
Y Label
x_axis_label:Optional[str]
X Label
Returns:
Tuple[fig, axis]
"""
if fig is None:
fig, ax = plt.subplots(**kwargs)
labels = []
n_models = len(model_results)
nbins = len(bin_edges) - 1
for i in range(nbins):
labels.append(f"[{bin_edges[i]:.1f}, {bin_edges[i+1]:.1f})")
colors = sns.color_palette(color_palette, n_models)
mean_data = []
std_data = []
model_names = []
for model_number, model_name in enumerate(model_results.keys()):
model_data = model_results[model_name]
mean_data.append(np.mean(model_data.mae, axis=0))
std_data.append(np.std(model_data.mae, axis=0))
model_names.append(model_name)
model_names = np.array(model_names)
bar_group_width = n_models*(bar_width+1)
group_separation = int(bar_group_width/2)
mean_data = np.vstack(mean_data)
std_data = np.vstack(std_data)
n_models, n_bins = mean_data.shape
indices = np.argsort(mean_data[:, 0])
for i in range(n_bins):
mean_data[:, i] = mean_data[indices, i]
std_data[:, i] = std_data[indices, i]
for model_name, model_index in zip(model_names[indices], range(n_models)):
positions = model_index+np.array(range(n_bins)) * (bar_group_width + group_separation)
rect = ax.bar(
positions,
mean_data[model_index, :],
yerr=std_data[model_index, :],
label=model_name,
width=bar_width,
color=colors[model_index],
)
ticks = []
dx = 0
for i in range(nbins):
ticks.append( dx + bar_group_width/2)
dx += bar_group_width + group_separation
ax.set_xlabel("RUL" + ("" if x_axis_label is None else x_axis_label))
ax.set_ylabel("$y - \hat{y}$" + ("" if y_axis_label is None else y_axis_label))
ax.set_xticks(ticks)
ax.set_xticklabels(labels)
ax.legend()
return fig, ax
def barplot_errors_wrt_RUL(
results_dict: Dict[str, List[PredictionResult]],
nbins: int,
y_axis_label=None,
x_axis_label=None,
fig=None,
ax=None,
color_palette: str = "hls",
**kwargs,
):
"""Boxplots of difference between true and predicted RUL
Parameters
----------
nbins: int
Number of boxplots
"""
if fig is None:
fig, ax = plt.subplots(**kwargs)
bin_edges, model_results = models_cv_results(results_dict, nbins)
return _cv_barplot_errors_wrt_RUL_multiple_models(
bin_edges,
model_results,
fig=fig,
ax=ax,
y_axis_label=y_axis_label,
x_axis_label=x_axis_label,
color_palette=color_palette,
)
def _cv_shadedline_plot_errors_wrt_RUL_multiple_models(
bin_edges,
model_results,
fig=None,
ax=None,
y_axis_label=None,
x_axis_label=None,
**kwargs,
):
"""Plot a error bar for each model
Args:
bin_edge ([type]): [description]
error_histograms ([type]): [description]
model_names ([type]): [description]
width (float, optional): [description]. Defaults to 0.5.
ax ([type], optional): [description]. Defaults to None.
Returns:
[type]: [description]
"""
if fig is None:
fig, ax = plt.subplots(**kwargs)
labels = []
n_models = len(model_results)
nbins = len(bin_edges) - 1
width = 1.0 / n_models
for i in range(nbins):
labels.append(f"[{bin_edges[i]:.1f}, {bin_edges[i+1]:.1f})")
max_value = -np.inf
min_value = np.inf
colors = sns.color_palette("hls", n_models)
for model_number, model_name in enumerate(model_results.keys()):
model_data = model_results[model_name]
hold_out = model_data.n_folds == 1
if hold_out:
for errors in model_data.errors:
min_value = min(min_value, np.min(errors))
max_value = max(max_value, np.max(errors))
else:
min_value = min(min_value, np.min(model_data.mean_error))
max_value = max(max_value, np.max(model_data.mean_error))
positions = []
for i in range(nbins):
positions.append((model_number * width) + (i * n_models))
if not hold_out:
mean_error = np.mean(model_data.mean_error, axis=0)
std_error = np.std(model_data.mean_error, axis=0)
else:
mean_error = np.array([np.mean(e, axis=0) for e in model_data.errors])
std_error = np.array([np.std(e, axis=0) for e in model_data.errors])
rect = ax.plot(
positions, mean_error, label=model_name, color=colors[model_number]
)
ax.fill_between(
positions,
mean_error - std_error,
mean_error + std_error,
alpha=0.3,
color=colors[model_number],
)
ticks = []
for i in range(nbins):
x = np.mean(
[(model_number * 0.5) + (i * n_models) for model_number in range(n_models)]
)
ticks.append(x)
ax.set_xlabel("RUL" + ("" if x_axis_label is None else x_axis_label))
ax.set_ylabel("$y - \hat{y}$" + ("" if y_axis_label is None else y_axis_label))
ax.set_xticks(ticks)
ax.set_xticklabels(labels)
ax.legend()
max_x = np.max(ticks) + 1
ax2 = ax.twinx()
ax2.set_xlim(ax.get_xlim())
ax2.set_ylim(ax.get_ylim())
curlyBrace(
fig, ax2, (max_x, 0), (max_x, min_value), str_text="Over estim.", c="#000"
)
ax2.get_xaxis().set_visible(False)
ax2.get_yaxis().set_visible(False)
curlyBrace(
fig, ax2, (max_x, max_value), (max_x, 0), str_text="Under estim.", c="#000"
)
return fig, ax
def shadedline_plot_errors_wrt_RUL(
results_dict: dict,
nbins: int,
y_axis_label=None,
x_axis_label=None,
fig=None,
ax=None,
**kwargs,
):
"""Boxplots of difference between true and predicted RUL
The format of the input should be:
.. highlight:: python
.. code-block:: python
{
'Model Name': [
{
'true': np.array,
'predicted': np.array
},
{
'true': np.array,
'predicted': np.array
},
...
'Model Name 2': [
{
'true': np.array,
'predicted': np.array
},
{
'true': np.array,
'predicted': np.array
},
...
]
}
Parameters
----------
nbins: int
Number of boxplots
"""
if fig is None:
fig, ax = plt.subplots(**kwargs)
bin_edges, model_results = models_cv_results(results_dict, nbins)
return _cv_shadedline_plot_errors_wrt_RUL_multiple_models(
bin_edges,
model_results,
fig=fig,
ax=ax,
bins=nbins,
y_axis_label=y_axis_label,
x_axis_label=x_axis_label,
)
def plot_unexploited_lifetime(
results_dict: dict,
max_window: int,
n: int,
ax=None,
units: Optional[str] = "",
add_shade: bool = True,
**kwargs,
):
if ax is None:
fig, ax = plt.subplots(**kwargs)
n_models = len(results_dict)
colors = sns.color_palette("hls", n_models)
for i, model_name in enumerate(results_dict.keys()):
m, ulft, std_ul = unexploited_lifetime(results_dict[model_name], max_window, n)
ax.plot(m, ulft, label=model_name, color=colors[i])
if add_shade:
ax.fill_between(m, ulft+std_ul, ulft-std_ul, alpha=0.1, color=colors[i])
ax.legend()
ax.set_title("Unexploited lifetime")
ax.set_xlabel("Fault window size" + units)
ax.set_ylabel(units)
return ax
def plot_unexpected_breaks(
results_dict: dict,
max_window: int,
n: int,
ax: Optional[matplotlib.axes.Axes] = None,
units: Optional[str] = "",
add_shade: bool = True,
**kwargs,
) -> matplotlib.axes.Axes:
"""Plot the risk of unexpected breaks with respect to the maintenance window
Parameters
----------
results_dict : dict
Dictionary with the results
max_window : int
Maximum size of the maintenance windows
n : int
Number of points used to evaluate the window size
ax : Optional[matplotlib.axes.Axes], optional
axis on which to draw, by default None
units : Optional[str], optional
Units to use in the xlabel, by default ""
Returns
-------
matplotlib.axes.Axes
The axis in which the plot was made
"""
if ax is None:
fig, ax = plt.subplots(**kwargs)
n_models = len(results_dict)
colors = sns.color_palette("hls", n_models)
for i, model_name in enumerate(results_dict.keys()):
m, mean_ub, std_ub = unexpected_breaks(results_dict[model_name], max_window, n)
ax.plot(m, mean_ub, label=model_name, color=colors[i])
if add_shade:
ax.fill_between(m, mean_ub+std_ub, mean_ub-std_ub, alpha=0.1, color=colors[i])
ax.set_title("Unexpected breaks")
ax.set_xlabel("Fault window size" + units)
ax.set_ylabel("Risk of breakage")
ax.legend()
return ax
def plot_J_Cost(
results: Dict[str, List[PredictionResult]],
window: int,
step: int,
ax=None,
ratio_min: float = 1 / 120,
ratio_max: float = 1 / 5,
ratio_n_points: int = 50,
):
def label_formatter(x):
UB_c = 1 / x
UL_c = 1
if x == 0:
return ""
return f"{int(UB_c)}:{int(UL_c)}"
if ax is None:
fig, ax = plt.subplots(figsize=(17, 5))
ratio = np.linspace(ratio_min, ratio_max, ratio_n_points)
n_models = len(results)
colors = sns.color_palette("hls", n_models)
for i, model_name in enumerate(results.keys()):
window_ub, mean_ub, std_ub = unexpected_breaks(results[model_name], window_size=window, step=step)
window_ul, mean_ul, std_ul = unexploited_lifetime(results[model_name], window_size=window, step=step)
v = []
labels = []
from uncertainties import unumpy
for r in ratio:
UB_c = 1.0
UL_c = UB_c * r
v.append(np.mean(unumpy.uarray(mean_ub, std_ub) * UB_c + unumpy.uarray(mean_ul, std_ul) * UL_c))
labels.append(f"{int(UB_c)}:{UL_c}")
mean = unumpy.nominal_values(v)
std = unumpy.std_devs(v)
ax.plot(ratio, mean, "-o", label=model_name, color=colors[i])
ax.fill_between(ratio, mean+std, mean-std, color=colors[i], alpha=0.2)
ticks = ax.get_xticks().tolist()
ticks.append(ratio[0])
ax.set_xticks(ticks)
ax.set_xticklabels([label_formatter(x) for x in ax.get_xticks()])
ax.set_xlabel(
"Ratio between UL and UB. How many minutes of UL are equal to 1 breakage"
)
ax.set_ylabel("J")
return ax
def plot_life(
life: FittedLife,
ax=None,
units: Optional[str] = "",
markersize: float = 0.7,
add_fitted: bool = False,
plot_target:bool = True,
add_regressed:bool = True,
start_x:int= 0,
label:str = '',
**kwargs,
):
if ax is None:
_, ax = plt.subplots(1, 1, **kwargs)
time = life.time
ax.plot(
life.time[start_x: len(life.y_pred)],
life.y_pred[start_x:],
"o",
label=f"Predicted {label}",
markersize=markersize,
)
if plot_target:
ax.plot(life.time, life.y_true, label="True", linewidth=3)
if add_regressed and life.y_true[-1] > 0:
time1 = np.hstack((time[-1], time[-1] + life.y_true[-1]))
ax.plot(time1, [life.y_true[-1], 0], label="Regressed true")
if add_fitted:
#time1 = np.hstack(
# (time[len(life.y_pred) - 1], time[len(life.y_pred) - 1] + life.y_pred[-1])
#)
#ax.plot(time1, [life.y_pred[-1], 0], label="Projected end")
ax.plot(
life.time,
life.y_pred_fitted,
label="Picewise fitted",
)
ax.plot(
life.time,
life.y_true_fitted,
label="Picewise fitted",
)
ax.set_ylabel(units)
ax.set_xlabel(units)
_, max = ax.get_ylim()
ax.set_ylim(0 - max / 10, max)
legend = ax.legend(markerscale=15,)
return ax
def plot_predictions_grid(
results: Union[PredictionResult, List[PredictionResult]],
ncols: int = 3,
alpha=1.0,
xlabel: Optional[str] = None,
ylabel: Optional[str] = None,
**kwargs,
):
"""Plot a matrix of predictions
Parameters
----------
results : dict
Dictionary with the results
ncols : int, optional
Number of colmns in the plot, by default 3
alpha : float, optional
Opacity of the predicted curves, by default 1.0
xlabel : Optional[str], optional
Xlabel, by default None
ylabel : Optional[str], optional
YLabel, by default None
Return
------
fig, ax:
Figure and axis
"""
def linear_to_subindices(i, ncols):
row = int(i / ncols)
col = i % ncols
return row, col
if isinstance(results, PredictionResult):
results = [results]
init = False
for model_results in results:
lives_model = split_lives(model_results)
NROW = math.ceil(len(lives_model) / ncols)
if not init:
fig, ax = plt.subplots(NROW, ncols, squeeze=False, **kwargs)
for i, life in enumerate(lives_model):
row, col = linear_to_subindices(i, ncols)
if not init:
ax[row, col].plot(life.time, life.y_true, label="True")
ax[row, col].plot(
life.time, life.y_pred, label=model_results.name, alpha=alpha
)
if xlabel is not None:
ax[row, col].set_xlabel(xlabel)
if ylabel is not None:
ax[row, col].set_ylabel(ylabel)
init = True
for j in range(len(lives_model), NROW * ncols):
row, col = linear_to_subindices(j, ncols)
fig.delaxes(ax[row, col])
for a in ax.flatten():
a.legend()
return fig, ax
def plot_predictions(
result: PredictionResult,
ax=None,
units: str = "Hours [h]",
markersize: float = 0.7,
plot_fitted: bool = True,
model_name:str = '',
**kwargs,
):
"""Plots the predicted and the true remaining useful lives
Parameters
----------
results_dict : dict
Dictionary with an interface conforming the requirements of the module
ax : optional
Axis to plot. If it is missing a new figure will be created, by default None
units : str, optional
Units of time to be used in the axis labels, by default 'Hours [h]'
cv : int, optional
Number of the CV results, by default 0
Returns
-------
ax
The axis on which the plot has been made
"""
if ax is None:
_, ax = plt.subplots(1, 1, **kwargs)
y_predicted = result.predicted_RUL
y_true = result.true_RUL
ax.plot(y_predicted, "o", label=f"Predicted {model_name}", markersize=markersize)
ax.plot(y_true, label="True")
x = 0
if plot_fitted:
try:
fitted = np.hstack([life.y_pred_fitted for life in split_lives(result)])
ax.plot(fitted)
except:
pass
ax.set_ylabel(units)
ax.set_xlabel(units)
legend = ax.legend()
for l in legend.legendHandles:
l.set_markersize(6)
return ax | /rul_pm-1.1.0.tar.gz/rul_pm-1.1.0/rul_pm/graphics/plots.py | 0.839504 | 0.349158 | plots.py | pypi |
from copy import copy
from typing import Callable, List, Optional, Tuple, Union
import matplotlib.patheffects as PathEffects
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from temporis.dataset.ts_dataset import AbstractTimeSeriesDataset
def add_vertical_line(ax, v_x, label, color, line, n_lines):
miny, maxy = ax.get_ylim()
ax.axvline(v_x, label=label, color=color)
txt = ax.text(
v_x,
miny + (maxy - miny) * (0.5 + 0.5 * (line / n_lines)),
label,
color=color,
fontweight="semibold",
)
txt.set_path_effects([PathEffects.withStroke(linewidth=2, foreground="w")])
def lives_duration_histogram(
datasets: Union[AbstractTimeSeriesDataset, List[AbstractTimeSeriesDataset]],
xlabel: str,
label: Union[str, List[str]] = "",
bins: int = 15,
units: str = "m",
vlines: Tuple[float, str] = [],
ax=None,
add_mean: bool = True,
add_median: bool = True,
transform: Callable[[float], float] = lambda x: x,
threshold: float = np.inf,
color=None,
**kwargs,
):
"""Generate an histogram from the lives durations of the dataset
Parameters
----------
dataset : Union[AbstractLivesDataset, List[AbstractLivesDataset]]
Dataset from which take the lives durations
xlabel: str
Label of the x axis
label: Union[str, List[str]] = '',
Label of each dataset to use as label in the boxplot
bins : int, optional
Number of bins to compute in the histogram, by default 15
units : str, optional
Units of time of the lives. Useful to generate labels, by default 'm'
vlines : List[Tuple[float, str]], optional
Vertical lines to be added to the plot
Each element of the list should be the x position in the first element of the tuple,
and the second elmenet of the tuple should be the label of the line
By default []
ax : optional
Axis where to draw the plot.
If missing a new figure will be created, by default None
add_mean : bool, optional
whether to add a vertical line with the mean value, by default True
add_median : bool, optional
whether to add a vertical line with the median value, by default True
transform : Callable[[float], float], optional
A function to transform each duration, by default lambdax:x
threshold : float, optional
Includes duration less than the threshold, by default np.inf
Returns
-------
fig, ax
"""
if isinstance(datasets, list):
assert len(datasets) == len(label)
else:
datasets = [datasets]
label = [label]
durations = []
for ds in datasets:
durations.append([transform(duration) for duration in ds.durations()])
return lives_duration_histogram_from_durations(
durations,
xlabel=xlabel,
label=label,
bins=bins,
units=units,
vlines=vlines,
ax=ax,
add_mean=add_mean,
add_median=add_median,
threshold=threshold,
color=color,
**kwargs,
)
def lives_duration_histogram_from_durations(
durations: Union[List[float], List[List[float]]],
xlabel: str,
label: Union[str, List[str]] = "",
bins: int = 15,
units: str = "m",
vlines: List[Tuple[float, str]] = [],
ax=None,
add_mean: bool = True,
add_median: bool = True,
threshold: float = np.inf,
color=None,
alpha=1.0,
**kwargs,
):
"""Generate an histogram from the lives durations
Parameters
----------
durations : Union[List[float], List[List[float]]]
Duration of each live
xlabel: str
Label of the x axis
label: Union[str, List[str]] = ''
Label of each boxplot specified in durations
bins : int, optional
Number of bins to compute in the histogram, by default 15
units : str, optional
Units of time of the lives. Useful to generate labels, by default 'm'
vlines : List[Tuple[float, str]], optional
Vertical lines to be added to the plot
Each element of the list should be the x position in the first element of the tuple,
and the second elmenet of the tuple should be the label of the line
By default []
ax : optional
Axis where to draw the plot.
If missing a new figure will be created, by default None
add_mean : bool, optional
whether to add a vertical line with the mean value, by default True
add_median : bool, optional
whether to add a vertical line with the median value, by default True
transform : Callable[[float], float], optional
Returns
-------
[type]
[description]
"""
if ax is None:
_, ax = plt.subplots(1, 1, **kwargs)
if isinstance(durations[0], list):
assert isinstance(label, list)
assert len(durations) == len(label)
else:
durations = [durations]
label = [label]
for l, dur in zip(label, durations):
if len(l) > 0:
l += " "
vlines = copy(vlines)
if add_mean:
vlines.append((np.mean(dur), l + "Mean"))
if add_median:
vlines.append((np.median(dur), l + "Median"))
dur = [d for d in dur if d < threshold]
ax.hist(dur, bins, color=color, alpha=alpha, label=l)
ax.set_xlabel(xlabel)
ax.set_ylabel("Number of lives")
colors = sns.color_palette("hls", len(vlines))
for i, (v_x, l) in enumerate(vlines):
label = f"{l}: {v_x:.2f} {units}"
add_vertical_line(ax, v_x, label, colors[i], i, len(vlines))
ax.legend()
return ax.figure, ax
def durations_boxplot(
datasets: Union[AbstractTimeSeriesDataset, List[AbstractTimeSeriesDataset]],
xlabel: Union[str, List[str]],
ylabel: str,
ax=None,
hlines: List[Tuple[float, str]] = [],
units: str = "m",
transform: Callable[[float], float] = lambda x: x,
maxy: Optional[float] = None,
**kwargs,
):
"""Generate boxplots of the lives duration
Parameters
----------
datasets : Union[AbstractLivesDataset, List[AbstractLivesDataset]]
[description]
xlabel : Union[str, List[str]]
[description]
ylabel : str
[description]
ax : [type], optional
[description], by default None
hlines : List[Tuple[float, str]], optional
[description], by default []
units : str, optional
[description], by default 'm'
transform : Callable[[float], float], optional
[description], by default lambdax:x
maxy : Optional[float], optional
[description], by default None
Returns
-------
[type]
[description]
"""
if isinstance(datasets, list):
assert isinstance(xlabel, list)
assert len(datasets) == len(xlabel)
else:
datasets = [datasets]
xlabel = [xlabel]
durations = []
for ds in datasets:
durations.append([transform(duration) for duration in ds.durations()])
return durations_boxplot_from_durations(
durations,
xlabel=xlabel,
ylabel=ylabel,
ax=ax,
hlines=hlines,
units=units,
maxy=maxy,
**kwargs,
)
def durations_boxplot_from_durations(
durations: Union[List[float], List[List[float]]],
xlabel: Union[str, List[str]],
ylabel: str,
ax=None,
hlines: List[Tuple[float, str]] = [],
units: str = "m",
maxy: Optional[float] = None,
**kwargs,
):
"""Generate an histogram from a list of durations
Parameters
----------
durations : Union[List[float], List[List[float]]]
[description]
xlabel : Union[str, List[str]]
[description]
ylabel : str
[description]
ax : [type], optional
[description], by default None
hlines : List[Tuple[float, str]], optional
[description], by default []
units : str, optional
[description], by default 'm'
maxy : Optional[float], optional
[description], by default None
Returns
-------
[type]
[description]
"""
if isinstance(durations[0], list):
assert isinstance(xlabel, list)
assert len(durations) == len(xlabel)
else:
durations = [durations]
xlabel = [xlabel]
if ax is None:
fig, ax = plt.subplots(**kwargs)
ax.boxplot(durations, labels=xlabel)
ax.set_ylabel(ylabel)
if maxy is not None:
miny, _ = ax.get_ylim()
ax.set_ylim(miny, maxy)
colors = sns.color_palette("hls", len(hlines))
for i, (pos, label) in enumerate(hlines):
ax.axhline(pos, label=f"{label}: {pos:.2f} {units}", color=colors[i])
_, labels = ax.get_legend_handles_labels()
if len(labels) > 0:
ax.legend()
return ax.figure, ax | /rul_pm-1.1.0.tar.gz/rul_pm-1.1.0/rul_pm/graphics/duration.py | 0.945676 | 0.538983 | duration.py | pypi |
from typing import List, Optional, Type
import matplotlib.cm as cm
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import LogNorm, Normalize
def time_series_importance(
n_features:int,
window_size:int,
coefficients:np.ndarray,
column_names: List[str],
t: Optional[float] = None,
ax=None,
colormap: str = "Greens",
features_to_plot: Optional[int] = None,
base_alpha:float =0.2,
normalizer_cls: Type[Normalize] = Normalize
):
"""Plot the feature importance by time-stamp and feature
Parameters
----------
n_features : int
Total number of features used in the model
window_size : int
Window size used in the model
coefficients : np.ndarray
Coefficient array with shape (1 x n_features*window_size)
column_names : List[str]
Name of the columns
t : Optional[float], optional
[description], by default None
ax : [type], optional
Axis where to put the graphic, by default None
colormap : str, optional
Color map to use, by default "Greens"
features_to_plot : Optional[int], optional
Maximum number of features to plot, by default None
If it is omitted all the features will be used.
The features_to_plot most important features are going to be plotted.
The importance will be computed as the sum of the timestamp importance
per feature
normalizer_cls : Type[Normalize], by default Normalize
Color mapper class
"""
def color_threshold(importance, t):
if importance > t:
color = "green"
alpha = 1.0
else:
color = "black"
alpha = 0.2
return color, alpha
if ax is None:
fig, ax = plt.subplots(figsize=(17, 5))
else:
fig = ax.get_figure()
im = coefficients.reshape(window_size, n_features)
cmap = plt.get_cmap(colormap)
norm = normalizer_cls(np.min(im), np.max(im), clip=True)
importance = np.sum(im, axis=0)
features_order = np.argsort(importance)[::-1]
if features_to_plot is None:
features_to_plot = len(features_order)
features_order = features_order[:features_to_plot][::-1]
n_selected_features = len(features_order)
for w in range(window_size):
for y, f in enumerate(features_order):
importance = im[w, f]
if t is not None:
color, alpha = color_threshold(importance, 0)
else:
color, alpha = cmap(norm(importance)), np.clip(
norm(importance) + base_alpha, 0, 1
)
ax.scatter(w, y, color=color, alpha=alpha, marker="s", s=75)
ax.set_yticks(list(range(n_selected_features)))
ax.set_yticklabels(column_names[features_order])
ax.set_xlim(-1, window_size + 0.5)
ax.set_ylim(-1, n_selected_features + 0.5)
cbar = fig.colorbar(cm.ScalarMappable(norm=norm, cmap=cmap))
disp = (np.max(im) - np.min(im)) * 0.05
cbar.set_ticks([np.min(im), np.max(im) - disp])
cbar.ax.set_yticklabels(["Less important", "More Important"])
ax.set_xlabel("Time window")
return ax | /rul_pm-1.1.0.tar.gz/rul_pm-1.1.0/rul_pm/graphics/feature_importance.py | 0.960431 | 0.692291 | feature_importance.py | pypi |
class _UNDEFINED(object):
def __bool__(self):
return False
__name__ = 'UNDEFINED'
__nonzero__ = __bool__
def __repr__(self):
return self.__name__
UNDEFINED = _UNDEFINED()
"""
A sentinel value to specify that something is undefined. When evaluated, the value is falsy.
.. versionadded:: 2.0.0
"""
class EngineError(Exception):
"""The base exception class from which other exceptions within this package inherit."""
def __init__(self, message=''):
"""
:param str message: A text description of what error occurred.
"""
self.message = message
"""A text description of what error occurred."""
def __repr__(self):
return "<{} message={!r} >".format(self.__class__.__name__, self.message)
class EvaluationError(EngineError):
"""
An error raised for issues which occur while the rule is being evaluated. This can occur at parse time while AST
nodes are being evaluated during the reduction phase.
"""
pass
class SyntaxError(EngineError):
"""A base error for syntax related issues."""
class DatetimeSyntaxError(SyntaxError):
"""An error raised for issues regarding the use of improperly formatted datetime expressions."""
def __init__(self, message, value):
"""
:param str message: A text description of what error occurred.
:param str value: The datetime value which contains the syntax error which caused this exception to be raised.
"""
super(DatetimeSyntaxError, self).__init__(message)
self.value = value
"""The datetime value which contains the syntax error which caused this exception to be raised."""
class FloatSyntaxError(SyntaxError):
"""
An error raised for issues regarding the use of improperly formatted float expressions.
.. versionadded:: 4.0.0
"""
def __init__(self, message, value):
"""
:param str message: A text description of what error occurred.
:param str value: The float value which contains the syntax error which caused this exception to be raised.
"""
super(FloatSyntaxError, self).__init__(message)
self.value = value
"""The float value which contains the syntax error which caused this exception to be raised."""
class TimedeltaSyntaxError(SyntaxError):
"""
An error raised for issues regarding the use of improperly formatted timedelta expressions.
.. versionadded:: 3.5.0
"""
def __init__(self, message, value):
"""
:param str message: A text description of what error occurred.
:param str value: The timedelta value which contains the syntax error which caused this exception to be raised.
"""
super(TimedeltaSyntaxError, self).__init__(message)
self.value = value
"""The timedelta value which contains the syntax error which caused this exception to be raised."""
class RegexSyntaxError(SyntaxError):
"""An error raised for issues regarding the use of improper regular expression syntax."""
def __init__(self, message, error, value):
"""
:param str message: A text description of what error occurred.
:param error: The :py:exc:`re.error` exception from which this error was triggered.
:type error: :py:exc:`re.error`
:param str value: The regular expression value which contains the syntax error which caused this exception to be
raised.
"""
super(RegexSyntaxError, self).__init__(message)
self.error = error
"""The :py:exc:`re.error` exception from which this error was triggered."""
self.value = value
"""The regular expression value which contains the syntax error which caused this exception to be raised."""
class RuleSyntaxError(SyntaxError):
"""An error raised for issues identified while parsing the grammar of the rule text."""
def __init__(self, message, token=None):
"""
:param str message: A text description of what error occurred.
:param token: The PLY token (if available) which is related to the syntax error.
"""
if token is None:
position = 'EOF'
else:
position = "line {0}:{1}".format(token.lineno, token.lexpos)
message = message + ' at: ' + position
super(RuleSyntaxError, self).__init__(message)
self.token = token
"""The PLY token (if available) which is related to the syntax error."""
class AttributeResolutionError(EvaluationError):
"""
An error raised with an attribute can not be resolved to a value.
.. versionadded:: 2.0.0
"""
def __init__(self, attribute_name, object_, thing=UNDEFINED, suggestion=None):
"""
:param str attribute_name: The name of the symbol that can not be resolved.
:param object_: The value that *attribute_name* was used as an attribute for.
:param thing: The root-object that was used to resolve *object*.
:param str suggestion: An optional suggestion for a valid attribute name.
.. versionchanged:: 3.2.0
Added the *suggestion* parameter.
"""
self.attribute_name = attribute_name
"""The name of the symbol that can not be resolved."""
self.object = object_
"""The value that *attribute_name* was used as an attribute for."""
self.thing = thing
"""The root-object that was used to resolve *object*."""
self.suggestion = suggestion
"""An optional suggestion for a valid attribute name."""
super(AttributeResolutionError, self).__init__("unknown attribute: {0!r}".format(attribute_name))
def __repr__(self):
return "<{} message={!r} suggestion={!r} >".format(self.__class__.__name__, self.message, self.suggestion)
class AttributeTypeError(EvaluationError):
"""
An error raised when an attribute with type information is resolved to a Python value that is not of that type.
"""
def __init__(self, attribute_name, object_type, is_value, is_type, expected_type):
"""
:param str attribute_name: The name of the symbol that can not be resolved.
:param object_type: The value that *attribute_name* was used as an attribute for.
:param is_value: The native Python value of the incompatible attribute.
:param is_type: The :py:class:`rule-engine type<rule_engine.ast.DataType>` of the incompatible attribute.
:param expected_type: The :py:class:`rule-engine type<rule_engine.ast.DataType>` that was expected for this
attribute.
"""
self.attribute_name = attribute_name
"""The name of the attribute that is of an incompatible type."""
self.object_type = object_type
"""The object on which the attribute was resolved."""
self.is_value = is_value
"""The native Python value of the incompatible attribute."""
self.is_type = is_type
"""The :py:class:`rule-engine type<rule_engine.ast.DataType>` of the incompatible attribute."""
self.expected_type = expected_type
"""The :py:class:`rule-engine type<rule_engine.ast.DataType>` that was expected for this attribute."""
message = "attribute {0!r} resolved to incorrect datatype (is: {1}, expected: {2})".format(
attribute_name,
is_type.name,
expected_type.name
)
super(AttributeTypeError, self).__init__(message)
class LookupError(EvaluationError):
"""
An error raised when a lookup operation fails to obtain and *item* from a *container*. This is analogous to a
combination of Python's builtin :py:exc:`IndexError` and :py:exc:`KeyError` exceptions.
.. versionadded:: 2.4.0
"""
def __init__(self, container, item):
"""
:param container: The container object that the lookup was performed on.
:param item: The item that was used as either the key or index of *container* for the lookup.
"""
self.container = container
"""The container object that the lookup was performed on."""
self.item = item
"""The item that was used as either the key or index of *container* for the lookup."""
super(LookupError, self).__init__('lookup operation failed')
class SymbolResolutionError(EvaluationError):
"""An error raised when a symbol name is not able to be resolved to a value."""
def __init__(self, symbol_name, symbol_scope=None, thing=UNDEFINED, suggestion=None):
"""
:param str symbol_name: The name of the symbol that can not be resolved.
:param str symbol_scope: The scope of where the symbol should be valid for resolution.
:param thing: The root-object that was used to resolve the symbol.
:param str suggestion: An optional suggestion for a valid symbol name.
.. versionchanged:: 2.0.0
Added the *thing* parameter.
.. versionchanged:: 3.2.0
Added the *suggestion* parameter.
"""
self.symbol_name = symbol_name
"""The name of the symbol that can not be resolved."""
self.symbol_scope = symbol_scope
"""The scope of where the symbol should be valid for resolution."""
self.thing = thing
"""The root-object that was used to resolve the symbol."""
self.suggestion = suggestion
"""An optional suggestion for a valid symbol name."""
super(SymbolResolutionError, self).__init__("unknown symbol: {0!r}".format(symbol_name))
def __repr__(self):
return "<{} message={!r} suggestion={!r} >".format(self.__class__.__name__, self.message, self.suggestion)
class SymbolTypeError(EvaluationError):
"""An error raised when a symbol with type information is resolved to a Python value that is not of that type."""
def __init__(self, symbol_name, is_value, is_type, expected_type):
"""
:param str symbol_name: The name of the symbol that is of an incompatible type.
:param is_value: The native Python value of the incompatible symbol.
:param is_type: The :py:class:`rule-engine type<rule_engine.ast.DataType>` of the incompatible symbol.
:param expected_type: The :py:class:`rule-engine type<rule_engine.ast.DataType>` that was expected for this
symbol.
"""
self.symbol_name = symbol_name
"""The name of the symbol that is of an incompatible type."""
self.is_value = is_value
"""The native Python value of the incompatible symbol."""
self.is_type = is_type
"""The :py:class:`rule-engine type<rule_engine.ast.DataType>` of the incompatible symbol."""
self.expected_type = expected_type
"""The :py:class:`rule-engine type<rule_engine.ast.DataType>` that was expected for this symbol."""
message = "symbol {0!r} resolved to incorrect datatype (is: {1}, expected: {2})".format(
symbol_name,
is_type.name,
expected_type.name
)
super(SymbolTypeError, self).__init__(message)
class FunctionCallError(EvaluationError):
"""
An error raised when there is an issue calling a function.
.. versionadded:: 4.0.0
"""
def __init__(self, message, error=None, function_name=None):
super(FunctionCallError, self).__init__(message)
self.error = error
"""The exception from which this error was triggered."""
self.function_name = function_name | /rule-engine-4.1.0.tar.gz/rule-engine-4.1.0/lib/rule_engine/errors.py | 0.796015 | 0.30571 | errors.py | pypi |
import collections
import collections.abc
import datetime
import decimal
import functools
import math
import random
from ._utils import parse_datetime, parse_float, parse_timedelta
from . import ast
from . import errors
from . import types
import dateutil.tz
def _builtin_filter(function, iterable):
return tuple(filter(function, iterable))
def _builtin_map(function, iterable):
return tuple(map(function, iterable))
def _builtin_parse_datetime(builtins, string):
return parse_datetime(string, builtins.timezone)
def _builtin_random(boundary=None):
if boundary:
if not types.is_natural_number(boundary):
raise errors.FunctionCallError('argument #1 (boundary) must be a natural number')
return random.randint(0, int(boundary))
return random.random()
def _builtins_split(string, sep=None, maxsplit=None):
if maxsplit is None:
maxsplit = -1
elif types.is_natural_number(maxsplit):
maxsplit = int(maxsplit)
else:
raise errors.FunctionCallError('argument #3 (maxsplit) must be a natural number')
return tuple(string.split(sep=sep, maxsplit=maxsplit))
class BuiltinValueGenerator(object):
"""
A class used as a wrapper for builtin values to differentiate between a value that is a function and a value that
should be generated by calling a function. A value that is generated by calling a function is useful for determining
the value during evaluation for things like the current time.
.. versionadded:: 4.0.0
"""
__slots__ = ('callable',)
def __init__(self, callable):
self.callable = callable
def __call__(self, builtins):
return self.callable(builtins)
class Builtins(collections.abc.Mapping):
"""
A class to define and provide variables to within the builtin context of rules. These can be accessed by specifying
a symbol name with the ``$`` prefix.
"""
scope_name = 'built-in'
"""The identity name of the scope for builtin symbols."""
def __init__(self, values, namespace=None, timezone=None, value_types=None):
"""
:param dict values: A mapping of string keys to be used as symbol names with values of either Python literals or
a function which will be called when the symbol is accessed. When using a function, it will be passed a
single argument, which is the instance of :py:class:`Builtins`.
:param str namespace: The namespace of the variables to resolve.
:param timezone: A timezone to use when resolving timestamps.
:type timezone: :py:class:`~datetime.tzinfo`
:param dict value_types: A mapping of the values to their datatypes.
.. versionchanged:: 2.3.0
Added the *value_types* parameter.
"""
self.__values = values
self.__value_types = value_types or {}
self.namespace = namespace
self.timezone = timezone or dateutil.tz.tzlocal()
def resolve_type(self, name):
"""
The method to use for resolving the data type of a builtin symbol.
:param str name: The name of the symbol to retrieve the data type of.
:return: The data type of the symbol or :py:attr:`~rule_engine.ast.DataType.UNDEFINED`.
"""
return self.__value_types.get(name, ast.DataType.UNDEFINED)
def __repr__(self):
return "<{} namespace={!r} keys={!r} timezone={!r} >".format(self.__class__.__name__, self.namespace, tuple(self.keys()), self.timezone)
def __getitem__(self, name):
value = self.__values[name]
if isinstance(value, collections.abc.Mapping):
if self.namespace is None:
namespace = name
else:
namespace = self.namespace + '.' + name
return self.__class__(value, namespace=namespace, timezone=self.timezone)
elif callable(value) and isinstance(value, BuiltinValueGenerator):
value = value(self)
return value
def __iter__(self):
return iter(self.__values)
def __len__(self):
return len(self.__values)
@classmethod
def from_defaults(cls, values=None, **kwargs):
"""Initialize a :py:class:`Builtins` instance with a set of default values."""
now = BuiltinValueGenerator(lambda builtins: datetime.datetime.now(tz=builtins.timezone))
# there may be errors here if the decimal.Context precision exceeds what is provided by the math constants
default_values = {
# mathematical constants
'e': decimal.Decimal(repr(math.e)),
'pi': decimal.Decimal(repr(math.pi)),
# timestamps
'now': now,
'today': BuiltinValueGenerator(lambda builtins: now(builtins).replace(hour=0, minute=0, second=0, microsecond=0)),
# functions
'abs': abs,
'any': any,
'all': all,
'sum': sum,
'map': _builtin_map,
'max': max,
'min': min,
'filter': _builtin_filter,
'parse_datetime': BuiltinValueGenerator(lambda builtins: functools.partial(_builtin_parse_datetime, builtins)),
'parse_float': parse_float,
'parse_timedelta': parse_timedelta,
'random': _builtin_random,
'split': _builtins_split
}
default_values.update(values or {})
default_value_types = {
# mathematical constants
'e': ast.DataType.FLOAT,
'pi': ast.DataType.FLOAT,
# timestamps
'now': ast.DataType.DATETIME,
'today': ast.DataType.DATETIME,
# functions
'abs': ast.DataType.FUNCTION('abs', return_type=ast.DataType.FLOAT, argument_types=(ast.DataType.FLOAT,)),
'all': ast.DataType.FUNCTION('all', return_type=ast.DataType.BOOLEAN, argument_types=(ast.DataType.ARRAY,)),
'any': ast.DataType.FUNCTION('any', return_type=ast.DataType.BOOLEAN, argument_types=(ast.DataType.ARRAY,)),
'sum': ast.DataType.FUNCTION('sum', return_type=ast.DataType.FLOAT, argument_types=(ast.DataType.ARRAY(ast.DataType.FLOAT),)),
'map': ast.DataType.FUNCTION('map', return_type=ast.DataType.ARRAY, argument_types=(ast.DataType.FUNCTION, ast.DataType.ARRAY)),
'max': ast.DataType.FUNCTION('max', return_type=ast.DataType.FLOAT, argument_types=(ast.DataType.ARRAY(ast.DataType.FLOAT),)),
'min': ast.DataType.FUNCTION('min', return_type=ast.DataType.FLOAT, argument_types=(ast.DataType.ARRAY(ast.DataType.FLOAT),)),
'filter': ast.DataType.FUNCTION('filter', return_type=ast.DataType.ARRAY, argument_types=(ast.DataType.FUNCTION, ast.DataType.ARRAY)),
'parse_datetime': ast.DataType.FUNCTION('parse_datetime', return_type=ast.DataType.DATETIME, argument_types=(ast.DataType.STRING,)),
'parse_float': ast.DataType.FUNCTION('parse_float', return_type=ast.DataType.FLOAT, argument_types=(ast.DataType.STRING,)),
'parse_timedelta': ast.DataType.FUNCTION('parse_timedelta', return_type=ast.DataType.TIMEDELTA, argument_types=(ast.DataType.STRING,)),
'random': ast.DataType.FUNCTION('random', return_type=ast.DataType.FLOAT, argument_types=(ast.DataType.FLOAT,), minimum_arguments=0),
'split': ast.DataType.FUNCTION(
'split',
return_type=ast.DataType.ARRAY(ast.DataType.STRING),
argument_types=(ast.DataType.STRING, ast.DataType.STRING, ast.DataType.FLOAT),
minimum_arguments=1
)
}
default_value_types.update(kwargs.pop('value_types', {}))
return cls(default_values, value_types=default_value_types, **kwargs) | /rule-engine-4.1.0.tar.gz/rule-engine-4.1.0/lib/rule_engine/builtins.py | 0.636127 | 0.285908 | builtins.py | pypi |
import ast as pyast
import collections
import threading
import types as pytypes
from . import ast
from . import errors
from ._utils import timedelta_regex
import ply.lex as lex
import ply.yacc as yacc
literal_eval = pyast.literal_eval
class _DeferredAstNode(object):
__slots__ = ('cls', 'args', 'kwargs', 'method')
def __init__(self, cls, *, args, kwargs=None, method='build'):
if not issubclass(cls, ast.ASTNodeBase):
raise TypeError('cls is not a subclass of AstNodeBase')
self.cls = cls
self.args = args
self.kwargs = kwargs or {}
self.method = method
def build(self):
constructor = getattr(self.cls, self.method)
return constructor(*self.args, **self.kwargs)
class ParserBase(object):
"""
A base class for parser objects to inherit from. This does not provide any
grammar related definitions.
"""
precedence = ()
"""The precedence for operators."""
tokens = ()
reserved_words = {}
"""
A mapping of literal words which are reserved to their corresponding grammar
names.
"""
__mutex = threading.Lock()
def __init__(self, debug=False):
"""
:param bool debug: Whether or not to enable debugging features when
using the ply API.
"""
self.debug = debug
self.context = None
# Build the lexer and parser
self._lexer = lex.lex(module=self, debug=self.debug)
self._parser = yacc.yacc(module=self, debug=self.debug, write_tables=self.debug)
def parse(self, text, context, **kwargs):
"""
Parse the specified text in an abstract syntax tree of nodes that can later be evaluated. This is done in two
phases. First, the syntax is parsed and a tree of deferred / uninitialized AST nodes are constructed. Next each
node is built recursively using it's respective :py:meth:`rule_engine.ast.ASTNodeBase.build`.
:param str text: The grammar text to parse into an AST.
:param context: A context for specifying parsing and evaluation options.
:type context: :py:class:`~rule_engine.engine.Context`
:return: The parsed AST statement.
:rtype: :py:class:`~rule_engine.ast.Statement`
"""
kwargs['lexer'] = kwargs.pop('lexer', self._lexer)
with self.__mutex:
self.context = context
# phase 1: parse the string into a tree of deferred nodes
result = self._parser.parse(text, **kwargs)
self.context = None
# phase 2: initialize each AST node recursively, providing them with an opportunity to define assignments
return result.build()
class Parser(ParserBase):
"""
The parser class for the rule grammar. This class contains many ply specific
members to define the various components of the grammar allowing it to be
parsed and reduced into an abstract syntax tree (AST). Once the AST has been
constructed it can then be evaluated multiple times. To make the evaluation
more efficient, nodes within the AST that are able to be reduced are while
the parsing is taking place. This reduction phase involves evaluation,
causing :py:exc:`~rule_engine.errors.EvaluationError` exceptions to be
raised during parsing.
"""
op_names = {
# arithmetic operators
'+': 'ADD', '-': 'SUB',
'**': 'POW', '*': 'MUL',
'/': 'TDIV', '//': 'FDIV', '%': 'MOD',
# bitwise operators
'&': 'BWAND', '|': 'BWOR', '^': 'BWXOR',
'<<': 'BWLSH', '>>': 'BWRSH',
# comparison operators
'==': 'EQ', '=~': 'EQ_FZM', '=~~': 'EQ_FZS',
'!=': 'NE', '!~': 'NE_FZM', '!~~': 'NE_FZS',
'>': 'GT', '>=': 'GE',
'<': 'LT', '<=': 'LE',
# logical operators
'and': 'AND', 'or': 'OR', 'not': 'NOT',
'for': 'FOR', 'if': 'IF',
# other operators
'.': 'ATTR',
'&.': 'ATTR_SAFE',
'in': 'IN',
}
reserved_words = {
# booleans
'true': 'TRUE',
'false': 'FALSE',
# float constants
'inf': 'FLOAT_INF',
'nan': 'FLOAT_NAN',
# null
'null': 'NULL',
# operators
'and': 'AND',
'in': 'IN',
'or': 'OR',
'not': 'NOT',
'for': 'FOR',
'if': 'IF'
}
tokens = (
'DATETIME', 'TIMEDELTA', 'FLOAT', 'STRING', 'SYMBOL',
'LPAREN', 'RPAREN', 'QMARK', 'COLON', 'COMMA',
'LBRACKET', 'RBRACKET', 'LBRACE', 'RBRACE', 'COMMENT'
) + tuple(set(list(reserved_words.values()) + list(op_names.values())))
t_ignore = ' \t'
# Tokens
t_BWAND = r'\&'
t_BWOR = r'\|'
t_BWXOR = r'\^'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_EQ = r'=='
t_NE = r'!='
t_QMARK = r'\?'
t_COLON = r'\:'
t_ADD = r'\+'
t_SUB = r'\-'
t_MOD = r'\%'
t_COMMA = r'\,'
t_LBRACKET = r'((?<=\S)&)?\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_FLOAT = r'0(b[01]+|o[0-7]+|x[0-9a-fA-F]+)|[0-9]+(\.[0-9]*)?([eE][+-]?[0-9]+)?|\.[0-9]+([eE][+-]?[0-9]+)?'
# attributes must be valid symbol names so the right side is more specific
t_ATTR = r'(?<=\S)\.(?=[a-zA-Z_][a-zA-Z0-9_]*)'
t_ATTR_SAFE = r'(?<=\S)&\.(?=[a-zA-Z_][a-zA-Z0-9_]*)'
# Tokens are listed from highest to lowest precedence, ones that appear
# later are effectively evaluated last
# see:
# * https://en.wikipedia.org/wiki/Order_of_operations#Programming_languages
# * https://docs.python.org/3/reference/expressions.html
precedence = tuple(
# reverse the order to lowest to highest for ply
reversed((
('nonassoc', 'LPAREN', 'RPAREN'),
('left', 'ATTR', 'ATTR_SAFE'),
('right', 'UMINUS'),
('left', 'POW'),
('left', 'MUL', 'TDIV', 'FDIV', 'MOD'),
('left', 'BWLSH', 'BWRSH'),
('left', 'ADD', 'SUB'),
('nonassoc', 'EQ', 'NE', 'EQ_FZM', 'EQ_FZS', 'NE_FZM', 'NE_FZS', 'GE', 'GT', 'LE', 'LT', 'IN'), # Nonassociative operators
('right', 'QMARK', 'COLON'),
('left', 'BWAND'),
('left', 'BWXOR'),
('left', 'BWOR'),
('right', 'NOT'),
('left', 'AND'),
('left', 'OR'),
)
))
@classmethod
def get_token_regex(cls, token_name):
"""
Return the regex that is used by the specified token.
:param str token_name: The token for which to return the regex.
:rtype: str
"""
obj = getattr(cls, 't_' + token_name, None)
if isinstance(obj, str):
return obj
elif isinstance(obj, pytypes.FunctionType):
return obj.__doc__
raise ValueError('unknown token: ' + token_name)
def t_POW(self, t):
r'\*\*?'
if t.value == '*':
t.type = 'MUL'
return t
def t_FDIV(self, t):
r'\/\/?'
if t.value == '/':
t.type = 'TDIV'
return t
def t_LT(self, t):
r'<([=<])?'
t.type = {'<': 'LT', '<=': 'LE', '<<': 'BWLSH'}[t.value]
return t
def t_GT(self, t):
r'>([=>])?'
t.type = {'>': 'GT', '>=': 'GE', '>>': 'BWRSH'}[t.value]
return t
def t_EQ_FZS(self, t):
r'=~~?'
if t.value == '=~':
t.type = 'EQ_FZM'
return t
def t_NE_FZS(self, t):
r'!~~?'
if t.value == '!~':
t.type = 'NE_FZM'
return t
def t_DATETIME(self, t):
r'd(?P<quote>["\'])([^\\\n]|(\\.))*?(?P=quote)'
t.value = t.value[1:]
return t
def t_TIMEDELTA(self, t):
t.value = t.value[2:-1]
return t
t_TIMEDELTA.__doc__ = r't(?P<quote>["\'])' + timedelta_regex + r'(?P=quote)'
def t_STRING(self, t):
r's?(?P<quote>["\'])([^\\\n]|(\\.))*?(?P=quote)'
if t.value[0] == 's':
t.value = t.value[1:]
return t
def t_SYMBOL(self, t):
r'\$?[a-zA-Z_][a-zA-Z0-9_]*'
if t.value in ('elif', 'else', 'while'):
raise errors.RuleSyntaxError("syntax error (the {} keyword is reserved for future use)".format(t.value))
t.type = self.reserved_words.get(t.value, 'SYMBOL')
return t
def t_COMMENT(self, t):
r'\#.*$'
return t
def t_newline(self, t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error(self, t):
raise errors.RuleSyntaxError("syntax error (illegal character {0!r})".format(t.value[0]), t)
# Parsing Rules
def p_error(self, token):
raise errors.RuleSyntaxError('syntax error', token)
def p_statement_expr(self, p):
"""
statement : expression
| expression COMMENT
"""
kwargs = {}
if len(p) == 3:
kwargs['comment'] = ast.Comment(p[2][1:].strip())
p[0] = _DeferredAstNode(ast.Statement, args=(self.context, p[1]), kwargs=kwargs)
def p_expression_getattr(self, p):
"""
object : object ATTR SYMBOL
| object ATTR_SAFE SYMBOL
"""
op_name = self.op_names.get(p[2])
p[0] = _DeferredAstNode(ast.GetAttributeExpression, args=(self.context, p[1], p[3]), kwargs={'safe': op_name == 'ATTR_SAFE'})
def p_expression_object(self, p):
"""
expression : object
"""
p[0] = p[1]
def p_expression_ternary(self, p):
"""
expression : expression QMARK expression COLON expression
"""
condition, _, case_true, _, case_false = p[1:6]
p[0] = _DeferredAstNode(ast.TernaryExpression, args=(self.context, condition, case_true, case_false))
def p_expression_arithmetic(self, p):
"""
expression : expression MOD expression
| expression MUL expression
| expression FDIV expression
| expression TDIV expression
| expression POW expression
"""
left, op, right = p[1:4]
op_name = self.op_names[op]
p[0] = _DeferredAstNode(ast.ArithmeticExpression, args=(self.context, op_name, left, right))
def p_expression_add(self, p):
"""
expression : expression ADD expression
"""
left, op, right = p[1:4]
op_name = self.op_names[op]
p[0] = _DeferredAstNode(ast.AddExpression, args=(self.context, op_name, left, right))
def p_expression_sub(self, p):
"""
expression : expression SUB expression
"""
left, op, right = p[1:4]
op_name = self.op_names[op]
p[0] = _DeferredAstNode(ast.SubtractExpression, args=(self.context, op_name, left, right))
def p_expression_bitwise(self, p):
"""
expression : expression BWAND expression
| expression BWOR expression
| expression BWXOR expression
"""
left, op, right = p[1:4]
op_name = self.op_names[op]
p[0] = _DeferredAstNode(ast.BitwiseExpression, args=(self.context, op_name, left, right))
def p_expression_bitwise_shift(self, p):
"""
expression : expression BWLSH expression
| expression BWRSH expression
"""
left, op, right = p[1:4]
op_name = self.op_names[op]
p[0] = _DeferredAstNode(ast.BitwiseShiftExpression, args=(self.context, op_name, left, right))
def p_expression_contains(self, p):
"""
expression : expression IN expression
| expression NOT IN expression
"""
if len(p) == 4:
member, _, container = p[1:4]
p[0] = _DeferredAstNode(ast.ContainsExpression, args=(self.context, container, member))
else:
member, _, _, container = p[1:5]
p[0] = _DeferredAstNode(ast.ContainsExpression, args=(self.context, container, member))
p[0] = _DeferredAstNode(ast.UnaryExpression, args=(self.context, 'NOT', p[0]))
def p_expression_comparison(self, p):
"""
expression : expression EQ expression
| expression NE expression
"""
left, op, right = p[1:4]
op_name = self.op_names[op]
p[0] = _DeferredAstNode(ast.ComparisonExpression, args=(self.context, op_name, left, right))
def p_expression_arithmetic_comparison(self, p):
"""
expression : expression GT expression
| expression GE expression
| expression LT expression
| expression LE expression
"""
left, op, right = p[1:4]
op_name = self.op_names[op]
p[0] = _DeferredAstNode(ast.ArithmeticComparisonExpression, args=(self.context, op_name, left, right))
def p_expression_fuzzy_comparison(self, p):
"""
expression : expression EQ_FZM expression
| expression EQ_FZS expression
| expression NE_FZM expression
| expression NE_FZS expression
"""
left, op, right = p[1:4]
op_name = self.op_names[op]
p[0] = _DeferredAstNode(ast.FuzzyComparisonExpression, args=(self.context, op_name, left, right))
def p_expression_logic(self, p):
"""
expression : expression AND expression
| expression OR expression
"""
left, op, right = p[1:4]
op_name = self.op_names[op]
p[0] = _DeferredAstNode(ast.LogicExpression, args=(self.context, op_name, left, right))
def p_expression_group(self, p):
'object : LPAREN expression RPAREN'
p[0] = p[2]
def p_expression_negate(self, p):
'expression : NOT expression'
p[0] = _DeferredAstNode(ast.UnaryExpression, args=(self.context, 'NOT', p[2]))
def p_expression_symbol(self, p):
'object : SYMBOL'
name = p[1]
scope = None
if name[0] == '$':
scope = 'built-in'
name = name[1:]
p[0] = _DeferredAstNode(ast.SymbolExpression, args=(self.context, name), kwargs={'scope': scope})
def p_expression_uminus(self, p):
'expression : SUB expression %prec UMINUS'
names = {'-': 'UMINUS'}
p[0] = _DeferredAstNode(ast.UnaryExpression, args=(self.context, names[p[1]], p[2]))
# Literal expressions
def p_expression_boolean(self, p):
"""
expression : TRUE
| FALSE
"""
p[0] = _DeferredAstNode(ast.BooleanExpression, args=(self.context, p[1] == 'true'))
def p_expression_datetime(self, p):
'object : DATETIME'
p[0] = _DeferredAstNode(ast.DatetimeExpression, args=(self.context, literal_eval(p[1])), method='from_string')
def p_expression_timedelta(self, p):
'object : TIMEDELTA'
p[0] = _DeferredAstNode(ast.TimedeltaExpression, args=(self.context, p[1]), method='from_string')
def p_expression_float(self, p):
"""
expression : FLOAT
| FLOAT_NAN
| FLOAT_INF
"""
str_val = p[1]
p[0] = _DeferredAstNode(ast.FloatExpression, args=(self.context, str_val), method='from_string')
def p_expression_null(self, p):
'object : NULL'
# null is an object because of the safe operator
p[0] = _DeferredAstNode(ast.NullExpression, args=(self.context,))
def p_expression_set(self, p):
"""
object : LBRACE ary_members RBRACE
| LBRACE ary_members COMMA RBRACE
"""
p[0] = _DeferredAstNode(ast.SetExpression, args=(self.context, tuple(p[2])))
def p_expression_string(self, p):
'object : STRING'
p[0] = _DeferredAstNode(ast.StringExpression, args=(self.context, literal_eval(p[1])))
def p_expression_array(self, p):
"""
object : LBRACKET RBRACKET
| LBRACKET ary_members RBRACKET
| LBRACKET ary_members COMMA RBRACKET
"""
if len(p) < 4:
p[0] = _DeferredAstNode(ast.ArrayExpression, args=(self.context, tuple()))
else:
p[0] = _DeferredAstNode(ast.ArrayExpression, args=(self.context, tuple(p[2])))
def p_expression_array_comprehension(self, p):
"""
object : LBRACKET expression FOR SYMBOL IN expression RBRACKET
| LBRACKET expression FOR SYMBOL IN expression IF expression RBRACKET
"""
condition = None
if len(p) == 10:
condition = p[8]
p[0] = _DeferredAstNode(ast.ComprehensionExpression, args=(self.context, p[2], p[4], p[6]), kwargs={'condition': condition})
def p_expression_array_members(self, p):
"""
ary_members : expression
| ary_members COMMA expression
"""
if len(p) == 2:
deque = collections.deque()
deque.append(p[1])
else:
deque = p[1]
deque.append(p[3])
p[0] = deque
def p_expression_mapping(self, p):
"""
object : LBRACE RBRACE
| LBRACE map_members RBRACE
| LBRACE map_members COMMA RBRACE
"""
if len(p) < 4:
p[0] = _DeferredAstNode(ast.MappingExpression, args=(self.context, tuple()))
else:
p[0] = _DeferredAstNode(ast.MappingExpression, args=(self.context, tuple(p[2])))
def p_expression_mapping_member(self, p):
"""
map_member : expression COLON expression
"""
p[0] = (p[1], p[3])
def p_expression_mapping_members(self, p):
"""
map_members : map_member
| map_members COMMA map_member
"""
return self.p_expression_array_members(p)
def p_expression_getitem(self, p):
"""
object : object LBRACKET expression RBRACKET
"""
container, lbracket, item = p[1:4]
p[0] = _DeferredAstNode(ast.GetItemExpression, args=(self.context, container, item), kwargs={'safe': lbracket == '&['})
def p_expression_getslice(self, p):
"""
object : object LBRACKET COLON RBRACKET
| object LBRACKET COLON expression RBRACKET
| object LBRACKET expression COLON RBRACKET
| object LBRACKET expression COLON expression RBRACKET
"""
container = p[1]
safe = p[2] == '&['
colon_index = p[1:].index(':')
if colon_index == 2 and len(p) == 5:
start, stop = None, None
elif colon_index == 2 and len(p) == 6:
start, stop = None, p[4]
elif colon_index == 3 and len(p) == 6:
start, stop = p[3], None
elif colon_index == 3 and len(p) == 7:
start, _, stop = p[3:6]
else:
raise errors.RuleSyntaxError('invalid get slice expression')
p[0] = _DeferredAstNode(ast.GetSliceExpression, args=(self.context, container, start, stop), kwargs={'safe': safe})
def p_expression_function_call(self, p):
"""
object : expression LPAREN RPAREN
| expression LPAREN ary_members RPAREN
"""
function = p[1]
if len(p) == 4:
arguments = collections.deque()
elif len(p) == 5:
arguments = p[3]
else:
raise errors.RuleSyntaxError('invalid function call expression')
p[0] = _DeferredAstNode(ast.FunctionCallExpression, args=(self.context, function, arguments)) | /rule-engine-4.1.0.tar.gz/rule-engine-4.1.0/lib/rule_engine/parser.py | 0.632049 | 0.260866 | parser.py | pypi |
__all__ = [
'BinarySplit',
'IsInSplit',
'GreaterThanSplit',
'GreaterEqualThanSplit',
'LesserThanSplit',
'LesserEqualThanSplit',
'RangeSplit',
'MultiRangeSplit',
'MultiRangeAnySplit'
]
from typing import Union, List, Dict, Tuple
import numpy as np
import pandas as pd
from igraph import Graph
from .businessrule import BusinessRule, generate_range_mask
from .rules import EmptyRule
class BinarySplit(BusinessRule):
def __init__(self):
self._store_child_params(level=2)
if not hasattr(self, "default"):
self.default = None
if self.default is None:
self.default = np.nan
assert hasattr(self, "if_true")
if self.if_true is None:
self.if_true = EmptyRule()
self._stored_params['if_true'] = self.if_true
assert isinstance(self.if_true, BusinessRule)
assert hasattr(self, "if_false")
if self.if_false is None:
self.if_false = EmptyRule()
self._stored_params['if_false'] = self.if_false
assert isinstance(self.if_false, BusinessRule)
def predict(self, X:pd.DataFrame)->np.ndarray:
y = np.full(len(X), np.nan)
mask = self.__rule__(X)
y[mask] = self.if_true.predict(X[mask])
y[~mask] = self.if_false.predict(X[~mask])
if not np.isnan(self.default):
y = np.where(np.isnan(y), self.default, y)
return y
def score_rule(self, X:pd.DataFrame, y:Union[pd.Series, np.ndarray],
scores_df:pd.DataFrame=None, is_classifier:bool=False)->pd.DataFrame:
# first predict without filling in the default
if not np.isnan(self.default):
old_default = self.default
self.default = np.nan
y_preds = self.predict(X)
self.default = old_default
else:
y_preds = self.predict(X)
mask = np.invert(np.isnan(y_preds))
scores_df = self._score_rule(y, y_preds, mask,
prediction=np.nan, default=self.default,
scores_df=scores_df, is_classifier=is_classifier)
rule_mask = pd.Series(self.__rule__(X)).values
scores_df = self.if_true.score_rule(X[rule_mask], y[rule_mask], scores_df, is_classifier)
scores_df = self.if_false.score_rule(X[~rule_mask], y[~rule_mask], scores_df, is_classifier)
return scores_df
def set_rule_id(self, rule_id:int=0)->int:
rule_id = super().set_rule_id(rule_id)
rule_id = self.if_true.set_rule_id(rule_id)
rule_id = self.if_false.set_rule_id(rule_id)
return rule_id
def get_max_rule_id(self, max_rule_id:int=0)->int:
max_rule_id = super().get_max_rule_id(max_rule_id)
max_rule_id = self.if_true.get_max_rule_id(max_rule_id)
max_rule_id = self.if_false.get_max_rule_id(max_rule_id)
return max_rule_id
def get_rule(self, rule_id:int)->BusinessRule:
if self._rule_id is not None and self._rule_id == rule_id:
return self
if_true_rule = self.if_true.get_rule(rule_id)
if if_true_rule is not None:
return if_true_rule
if_false_rule = self.if_false.get_rule(rule_id)
if if_false_rule is not None:
return if_false_rule
def get_rule_input(self, rule_id:int, X:pd.DataFrame, y:Union[pd.Series, np.ndarray]=None
)->Union[pd.DataFrame, Tuple[pd.DataFrame, Union[pd.Series, np.ndarray]]]:
if y is not None:
input_X, input_y = super().get_rule_input(rule_id, X, y)
if input_X is not None:
return input_X, input_y
else:
input_X = super().get_rule_input(rule_id, X)
if input_X is not None:
return input_X
rule_mask = pd.Series(self.__rule__(X)).values
if y is not None:
input_X, input_y = self.if_true.get_rule_input(rule_id, X[rule_mask], y[rule_mask])
if input_X is not None:
return input_X, input_y
else:
input_X = self.if_true.get_rule_input(rule_id, X[rule_mask])
if input_X is not None:
return input_X
if y is not None:
input_X, input_y = self.if_false.get_rule_input(rule_id, X[~rule_mask], y[~rule_mask])
if input_X is not None:
return input_X, input_y
else:
input_X = self.if_false.get_rule_input(rule_id, X[~rule_mask])
if input_X is not None:
return input_X
if y is not None:
return None, None
else:
return None
def get_rule_leftover(self, rule_id:int, X:pd.DataFrame, y:Union[pd.Series, np.ndarray]=None
)->Union[pd.DataFrame, Tuple[pd.DataFrame, Union[pd.Series, np.ndarray]]]:
if self._rule_id is not None and self._rule_id == rule_id:
y_preds = self.predict(X)
mask = np.isnan(y_preds)
if y is not None:
return X[mask], y[mask]
else:
return X[mask]
rule_mask = pd.Series(self.__rule__(X)).values
if y is not None:
leftover_X, leftover_y = self.if_true.get_rule_leftover(rule_id, X[rule_mask], y[rule_mask])
if leftover_X is not None:
return leftover_X, leftover_y
else:
leftover_X = self.if_true.get_rule_leftover(rule_id, X[rule_mask])
if leftover_X is not None:
return leftover_X
if y is not None:
leftover_X, leftover_y = self.if_false.get_rule_leftover(rule_id, X[~rule_mask], y[~rule_mask])
if leftover_X is not None:
return leftover_X, leftover_y
else:
leftover_X = self.if_false.get_rule_leftover(rule_id, X[~rule_mask])
if leftover_X is not None:
return leftover_X
if y is not None:
return None, None
else:
return None
def replace_rule(self, rule_id:int, new_rule:BusinessRule)->None:
replace_rule = super().replace_rule(rule_id, new_rule)
if replace_rule is None and hasattr(self, "if_true"):
replace_rule = self.if_true.replace_rule(rule_id, new_rule)
if replace_rule is None and hasattr(self, "if_false"):
replace_rule = self.if_false.replace_rule(rule_id, new_rule)
return replace_rule
def remove_rule(self, rule_id:int):
if self.if_true._rule_id is not None and self.if_true._rule_id == rule_id:
self.replace_rule(self.if_true._rule_id, EmptyRule())
self._stored_params['if_true'] = self.if_true
return self.if_true
elif self.if_false._rule_id is not None and self.if_false._rule_id == rule_id:
self.replace_rule(self.if_false._rule_id, EmptyRule())
self._stored_params['if_false'] = self.if_false
return self.if_false
else:
removed = self.if_true.remove_rule(rule_id)
if not removed:
removed = self.if_false.remove_rule(rule_id)
return removed
def get_rule_params(self, rule_id:int)->dict:
params = super().get_rule_params(rule_id)
if params is not None:
return params
params = self.if_true.get_rule_params(rule_id)
if params is not None:
return params
params = self.if_false.get_rule_params(rule_id)
if params is not None:
return params
def set_rule_params(self, rule_id:int, **params)->None:
super().set_rule_params(rule_id, **params)
self.if_true.set_rule_params(rule_id, **params)
self.if_false.set_rule_params(rule_id, **params)
def _get_casewhens(self, casewhens:dict=None):
casewhens = super()._get_casewhens(casewhens)
casewhens = self.if_true._get_casewhens(casewhens)
casewhens = self.if_false._get_casewhens(casewhens)
return casewhens
def _get_binarynodes(self, binarynodes:dict=None):
binarynodes = super()._get_binarynodes(binarynodes)
if self._rule_id is not None:
binarynodes[self._rule_id] = dict(if_true=self.if_true._rule_id, if_false=self.if_false._rule_id)
binarynodes = self.if_true._get_binarynodes(binarynodes)
binarynodes = self.if_false._get_binarynodes(binarynodes)
return binarynodes
def add_to_igraph(self, graph:Graph=None)->Graph:
graph = super().add_to_igraph(graph)
self.if_true.add_to_igraph(graph)
self.if_false.add_to_igraph(graph)
if self._rule_id is not None and self.if_true._rule_id is not None:
graph.add_edge(self._rule_id, self.if_true._rule_id, binary_node="if_true")
if self._rule_id is not None and self.if_false._rule_id is not None:
graph.add_edge(self._rule_id, self.if_false._rule_id, binary_node="if_false")
return graph
def __rulerepr__(self):
return "BinarySplit"
class IsInSplit(BinarySplit):
def __init__(self, col:str, cats:List[str],
if_true:BusinessRule=None, if_false:BusinessRule=None, default=None):
super().__init__()
if not isinstance(self.cats, list):
self.cats = [self.cats]
def __rule__(self, X:pd.DataFrame)->pd.Series:
return X[self.col].isin(self.cats)
def __rulerepr__(self)->str:
return f"Split if {self.col} in {self.cats}"
class GreaterThanSplit(BinarySplit):
def __init__(self, col:str, cutoff:float,
if_true:BusinessRule=None, if_false:BusinessRule=None, default=None):
super().__init__()
def __rule__(self, X:pd.DataFrame)->pd.Series:
return X[self.col] > self.cutoff
def __rulerepr__(self)->str:
return f"Split if {self.col} > {self.cutoff}"
class GreaterEqualThanSplit(BinarySplit):
def __init__(self, col:str, cutoff:float,
if_true:BusinessRule=None, if_false:BusinessRule=None, default=None):
super().__init__()
def __rule__(self, X:pd.DataFrame)->pd.Series:
return X[self.col] >= self.cutoff
def __rulerepr__(self)->str:
return f"Split if {self.col} >= {self.cutoff}"
class LesserThanSplit(BinarySplit):
def __init__(self, col:str, cutoff:float,
if_true:BusinessRule=None, if_false:BusinessRule=None, default=None):
super().__init__()
def __rule__(self, X:pd.DataFrame)->pd.Series:
return X[self.col] < self.cutoff
def __rulerepr__(self)->str:
return f"Split if {self.col} < {self.cutoff}"
class LesserEqualThanSplit(BinarySplit):
def __init__(self, col:str, cutoff:float,
if_true:BusinessRule=None, if_false:BusinessRule=None, default=None):
super().__init__()
def __rule__(self, X:pd.DataFrame)->pd.Series:
return X[self.col] <= self.cutoff
def __rulerepr__(self)->str:
return f"Split if {self.col} <= {self.cutoff}"
class RangeSplit(BinarySplit):
def __init__(self, col:str, min:float, max:float,
if_true:BusinessRule=None, if_false:BusinessRule=None,
default=None):
super().__init__()
def __rule__(self, X:pd.DataFrame):
return (X[self.col] >= self.min) & (X[self.col] <= self.max)
def __rulerepr__(self):
return f"Split if {self.min} <= {self.col} <= {self.max} "
class MultiRangeSplit(BinarySplit):
def __init__(self, range_dict, if_true:BusinessRule=None, if_false:BusinessRule=None, default=None):
"""
Switches to if_true rule if all range conditions hold for all cols in
range_dict. range_dict can contain multiple ranges per col.
range_dict should be of the format
```
range_dict = {
'petal length (cm)': [[4.1, 4.7], [5.2, 7.5]],
'petal width (cm)': [1.6, 2.6]
}
```
"""
super().__init__()
def __rule__(self, X):
return generate_range_mask(self.range_dict, X, kind='all')
def __rulerepr__(self):
return ("Split if " + " AND ".join([f"{k} in {v}" for k, v in self.range_dict.items()]))
class MultiRangeAnySplit(BinarySplit):
def __init__(self, range_dict, if_true:BusinessRule=None, if_false:BusinessRule=None, default=None):
"""
Switches to if_true rule if any range conditions hold for any cols in
range_dict. range_dict can contain multiple ranges per col.
range_dict should be of the format
```
range_dict = {
'petal length (cm)': [[4.1, 4.7], [5.2, 7.5]],
'petal width (cm)': [1.6, 2.6]
}
```
"""
super().__init__()
def __rule__(self, X):
return generate_range_mask(self.range_dict, X, kind='any')
def __rulerepr__(self):
return ("Split if " + " OR ".join([f"{k} in {v}" for k, v in self.range_dict.items()])) | /rule_estimator-0.4.1-py3-none-any.whl/rule_estimator/splits.py | 0.801159 | 0.248352 | splits.py | pypi |
__all__ = ['RuleClassifierDashboard']
from math import log10, floor
from typing import List, Tuple, Dict, Union
import numpy as np
import pandas as pd
from pandas.api.types import is_numeric_dtype
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from sklearn.model_selection import train_test_split
from .businessrule import *
from .splits import *
from .rules import *
from .estimators import RuleClassifier
from .plotting import *
instructions_markdown = """
This dashboard allows you to create a set of business rules that act as a classifier:
predicting an outcome label based on input features. This classifier can then be used
just like any other machine learning classifier from the python-based scikit-learn machine
learning library.
### Adding new rules
New rules are either based on a **single feature** or **multiple features**. Single feature
rules are either based on a *cutoff* (for numerical features), or a *list of categories*
(for categorical features). Multi feature rules can be a combination of numerical
feature ranges and categorical feature lists.
You can add three kinds of rules: "Split", "Predict Selected" and "Predict All".
A **Split** divides the data points in two branches: all input data for which the conditions holds
go left, and all other input data goes right. You can then add additional rules
both to the left branch and to the right branch. Once you have finished with one
branch you can select the other branch by clicking in the model graph.
**Predict Selected** applies a prediction label to all incoming data for which the
condition holds, i.e. the data that is selected by this rule. Data for which the condition
does not hold (i.e. unselected data) goes onlabeled (or rather: predicted nan).
Unlabeled data will be passed on the next rule.
**Predict All** unconditionally applies a prediction label to all incoming data.
After this rule there will be no remaining unlabeled data in this branch. You can
add a *Predict All* rule at the end of every branch to make sure that every input
receives a label.
The dashboard offers "suggest" buttons that helps you with selecting appropriate rules.
The feature suggest button will select a good feature for a new rule (based on maximum gini reduction,
similar to DecisionTree algorithms). The cutoff suggest button will suggest a good
cutoff (again to maximize gini reduction). Once you have added a rule, automatically
the best next feature and cutoff for the remaining data will be selected.
When you append a rule, the model automatically generates a `CaseWhen` block if needed. A CaseWhen
block evaluates a list of rules one by one, applying predictions if the rule condition holds
and otherwise passing the data to the next rule.
By default the dashboard will append a new rule to the currently selected rule (creating
a new CaseWhen block if needed). Thus when you select a rule, the plots will display only the
data points that have not been labeled after this rule. You can also select to replace the current rule. In that
case the dashboard will display all the data coming into the the current rule.
### Inspecting the model
The decision rules together form a "model" that takes input data and outputs predicted
labels. You can inspect the model in the "Model" section. You can view a Graphical
representation of the model or a textual Description.
Once you are happy with your model you can export a scikit-learn compatible model as a
python pickle file from the navbar "save as..." menu. You can also export a `model.yaml` file
that you can use to instantiate the model from python with `RuleClassifier.from_yaml("model.yaml")`.
Finally you can also copy-paste the python code that will generate the model. Using the upload
button you can load `.pkl` or `.yaml` files that you have previously exported.
You can delete individual rules or reset the entire model, but these actions cannot be undone, so be careful.
### Performance
The dashboard offers a few basic metrics for measuring the performance of the model.
A confusion matrix shows the absolute and relative number of True Negatives, True Positives, False Negatives
and False Positives.
Accuracy (number of correctly predicted labels),
precision (number of positively labeled predictions that were in fact positive),
recall (number of positively labeled data points that were in fact labeled positive)
as well as the f1-score (combination of predicion and recall) get calculated.
Coverage is defined as the fraction of input data that receive a label.
### Training data vs Validation data
It is good practice to split your data into a training set and a validation set.
You construct your rules using the training set, and then evaluate them using the
validation set. There is a toggle in the navbar to switch between the two data sets.
Ideally you would keep a test set apart completely which you only use to evaluate
your final model.
"""
class RuleClassifierDashboard:
def __init__(self, X, y, X_val=None, y_val=None, val_size=None, model=None, labels=None, port=8050):
self.model = model if model is not None else RuleClassifier()
if X_val is not None and y_val is not None:
self.X, self.y = X, pd.Series(y)
self.X_val, self.y_val = X_val, pd.Series(y_val)
elif val_size is not None:
self.X, self.X_val, self.y, self.y_val = train_test_split(X, y, test_size=0.2, stratify=y)
self.y, self.y_val = pd.Series(self.y), pd.Series(self.y_val)
else:
self.X, self.y = X, pd.Series(y)
self.X_val, self.y_val = None, None
if labels is None:
self.labels = [str(i) for i in range(self.y.nunique())]
else:
self.labels = labels
self.cats = [col for col in X.columns if not is_numeric_dtype(X[col])]
self.non_cats = [col for col in X.columns if is_numeric_dtype(X[col])]
self.initial_col = self.model._sort_cols_by_gini_reduction(X, y)[0]
self.port = port
self.app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
self.app.title = "RuleClassifier"
self.app.layout = self.layout()
self.register_callbacks(self.app)
@staticmethod
def _process_constraintrange(ranges:List, ticktext:List=None, sig:int=4)->List:
"""helper function to format selections in a go.Parcoords plot.
For numerical features range bounds are rounded to four significant digits.
For categorical features the ranges are converted to a list of categories.
Args:
ranges (List[List]) either a single list of two bounds or a list of lists of two bounds
ticktext (List): the ticktext property of a go.Parcoords fig lists the order
in which the categorical features are displayed in the plot. This is used
to convert ranges to a list of categories.
sig (int): number of significant digits to round to. So e.g 123553 is round to 123600
and 0.000034512553 is round to 0.00003451
"""
def round_sig(x, sig):
return round(x, sig-int(floor(log10(abs(x))))-1)
def round_range(range_list:List, sig)->List:
return [round_sig(range_list[0], sig), round_sig(range_list[1], sig)]
def range_to_cats(range_list, ticktext):
return [tickval for i, tickval in enumerate(ticktext) if i >= range_list[0] and i <= range_list[1]]
def process_range(range_list, sig, ticktext):
if ticktext is not None:
return range_to_cats(range_list, ticktext)
else:
return round_range(range_list, sig)
if len(ranges) == 2 and not isinstance(ranges[0], list):
return process_range(ranges, sig, ticktext)
else:
return [process_range(range_list, sig, ticktext) for range_list in ranges]
@staticmethod
def _get_stepsize(min, max, steps=500, sig=3):
"""returns step size range (min, max) into steps, rounding to sig significant digits"""
return round((max-min)/steps, sig-int(floor(log10(abs((max-min)/steps))))-1)
@staticmethod
def _round_cutoff(cutoff, min, max, steps=500, sig=3):
"""returns cutoff for dividing div into steps, rounding to sig significant digits"""
return round(cutoff, sig-int(floor(log10(abs((max-min)/steps))))-1)
def _get_range_dict_from_parallel_plot(self, fig):
"""
Extracts a range_dict from a go.Parcoords() figure.
a range_dict is of the format e.g.:
range_dict = {
'petal length (cm)': [[4.1, 4.7], [5.2, 7.5]],
'petal width (cm)': [1.6, 2.6],
'sex' : ['male']
}
"""
plot_data = fig['data'][0].get('dimensions', None)
range_dict = {}
for col_data in plot_data:
if col_data['label'] != 'y' and 'constraintrange' in col_data:
range_dict[col_data['label']] = self._process_constraintrange(
col_data['constraintrange'], col_data['ticktext'] if 'ticktext' in col_data else None)
return range_dict
@staticmethod
def _cats_to_range(cats:List, cats_order:List):
"""converts a list of categories of a categorical feature to a list of ranges
for a go.Parcoords plot.
Args:
cats: list of categories to encode
cats_order: list of cats in the order that they are displayed in the go.Parcoords plot
"""
return [[max(0, cats_order.index(cat)-0.25), min(len(cats_order)-1, cats_order.index(cat)+0.25)] for cat in cats]
@staticmethod
def _get_callback_trigger():
"""Returns the dash id of the component that triggered a callback
Is used when there are multiple Input's and the Output depends on which
Input triggered the callback.
"""
return dash.callback_context.triggered[0]['prop_id'].split('.')[0]
def _get_model(self, json_model=None):
"""Returns a model instance from a json model definition.
If the json_model is still empty (=None) then returns self.model"""
if json_model:
return RuleClassifier.from_json(json_model)
else:
return RuleClassifier.from_json(self.model.to_json())
def _get_X_y(self, train_or_val='train'):
"""Returns either the full training set (X,y ) or the full validation set (X, y)"""
X, y = (self.X, self.y) if train_or_val == 'train' else (self.X_val, self.y_val)
return X, y
@staticmethod
def _infer_after(append_or_replace):
"""infer whether to use data that has not been assigned after applying a
rule with rule_id (after=True) or to use all data that reaches a certain rule_id
(after=False) by checking the append_or_replace toggle"""
return (append_or_replace == 'append')
def _get_model_X_y(self, model=None, train_or_val='train', rule_id=0, after=False):
"""returns a (model, X, y) tuple
Args:
train_or_val: return 'train' data or 'val' data
rule_id: return data for rule rule_id
after: If True return data that has not been assigned
a prediction after rule rule_id. Can also be a string
in {'append', 'replace}, in which case it will get inferred.
"""
if model is None or not isinstance(model, RuleClassifier):
model = self._get_model(model)
X, y = self._get_X_y(train_or_val)
if not isinstance(after, bool):
if after in {'append', 'replace'}:
after = self._infer_after(after)
else:
raise ValueError(f"After should either be a bool or in 'append', 'replace',"
f" but you passed {after}!")
if rule_id == 0 and not after:
return model, X, y
X, y = model.get_rule_input(rule_id, X, y, after)
return model, X, y
def _change_model(self, model, rule_id, new_rule, append_or_replace='append'):
"""Update a model with a new rule.
Args:
model: model to be updated
rule_id: rule to which new rule should be appended or replaced
new_rule: instance of new rule
append_or_replace ({'append', 'replace'})
"""
if not isinstance(model, RuleClassifier):
model = self._get_model(model)
if append_or_replace=='append':
new_rule_id = model.append_rule(rule_id, new_rule)
elif append_or_replace=='replace':
new_rule = model.replace_rule(rule_id, new_rule)
new_rule_id = new_rule._rule_id
return new_rule_id, model
def layout(self):
"""Returns the dash layout of the dashboard. """
return dbc.Container([
dbc.NavbarSimple(
children=[
dbc.NavItem(dbc.NavLink(children=[html.Div("Instructions")], id="instructions-open-button", n_clicks=0)),
dbc.NavItem(dbc.NavLink(children=[html.Div("Upload")], id="upload-button", n_clicks=0)),
dbc.DropdownMenu(
children=[
dbc.DropdownMenuItem("as .yaml", id='download-yaml-button', n_clicks=None),
dcc.Download(id='download-pickle'),
dbc.DropdownMenuItem("as pickle", id='download-pickle-button', n_clicks=None),
dcc.Download(id='download-yaml'),
],
nav=True,
in_navbar=True,
label="Save model",
),
html.Div([
dbc.Select(
options=[{'label':'Training data', 'value':'train'},
{'label':'Validation data', 'value':'val'}],
value='train',
id='train-or-val'
),
dbc.Tooltip("Display values using the training data set or the "
"validation data set. Use the training set to build your rules, "
"and the validation to measure performance and find rules that "
"do not generalize well.", target='train-or-val'),
], style=dict(display="none") if self.X_val is None else dict()),
],
brand="RuleClassifierDashboard",
brand_href="https://github.com/oegedijk/rule_estimator/",
color="primary",
dark=True,
),
dbc.Modal(
[
dbc.ModalHeader("Dashboard Intructions"),
dbc.ModalBody([dcc.Markdown(instructions_markdown)]),
dbc.ModalFooter(
dbc.Button("Close", id="instructions-close-button", className="ml-auto", n_clicks=0)
),
],
id="instructions-modal",
is_open=True,
size="xl",
),
# helper storages as a workaround for dash limitation that each output can
# only be used in a single callback
dcc.Store(id='updated-rule-id'),
dcc.Store(id='parallel-updated-rule-id'),
dcc.Store(id='density-num-updated-rule-id'),
dcc.Store(id='density-cats-updated-rule-id'),
dcc.Store(id='removerule-updated-rule-id'),
dcc.Store(id='resetmodel-updated-rule-id'),
dcc.Store(id='model-store'),
dcc.Store(id='parallel-updated-model'),
dcc.Store(id='density-num-updated-model'),
dcc.Store(id='density-cats-updated-model'),
dcc.Store(id='removerule-updated-model'),
dcc.Store(id='resetmodel-updated-model'),
dcc.Store(id='uploaded-model'),
dcc.Store(id='update-model-performance'),
dcc.Store(id='update-model-graph'),
dcc.Store(id='added-num-density-rule'),
dcc.Store(id='added-cats-density-rule'),
html.Div(id='upload-div', children=[
dcc.Upload(
id='upload-model',
children=html.Div([
'Drag and drop a .yaml or .pkl model or ',
html.A('Select File')
]),
style={
'width': '100%',
'height': '60px',
'lineHeight': '60px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
'margin': '10px'
},
multiple=False
),
], style=dict(display="none")),
dbc.Row([
dbc.Col([
dbc.Card([
dbc.CardHeader([
dbc.Row([
dbc.Col([
html.H3("Add New Rule", className='card-title'),
], md=8),
dbc.Col([
dbc.FormGroup([
dbc.Select(
options=[{'label':'Append new rule after (show data out)', 'value':'append'},
{'label':'Replace rule (show data in)', 'value':'replace'}],
value='append',
id='append-or-replace'),
dbc.Tooltip("When you select to 'append' a rule, the plots will display all the data "
"that is still unlabeled after the selected rule. When you add a rule it will appended"
" with a CaseWhen block. "
"When you select 'replace' the plots will display all the data going *into* "
"the selected rule. When you add a rule, it will replace the existing rule.",
target='append-or-replace'),
]),
], md=4),
]),
]),
dbc.CardBody([
dcc.Tabs(id='rule-tabs', value='density-tab',
children=[
dcc.Tab(label="Single Feature Rules", id='density-tab', value='density-tab', children=[
dbc.Card([
dbc.CardBody([
dbc.Row([
dbc.Col([
dbc.Label("Feature"),
dcc.Dropdown(id='density-col',
options=[{'label':col, 'value':col} for col in self.X.columns],
value=self.initial_col,
clearable=False),
], md=9),
dbc.Col([
dbc.FormGroup([
html.Div([
dbc.Button("Suggest", id='density-col-suggest-button', color="primary", size="sm",
style={'position':'absolute', 'bottom':'25px'}),
dbc.Tooltip("Select the feature with the largest gini reduction potential.",
target='density-col-suggest-button'),
], style={'display': 'flex', 'flex-direction':'column'}),
]),
], md=1, ),
dbc.Col([
dbc.FormGroup([
dbc.Label("Sort features", html_for='parallel-sort'),
dbc.Select(id='density-sort',
options=[dict(label=s, value=s) for s in ['dataframe', 'alphabet', 'histogram overlap', 'gini reduction']],
value='gini reduction',
style=dict(height=40, width=150, horizontalAlign = 'right'),
bs_size="sm",
),
dbc.Tooltip("You can sort the features by their potential gini reduction (default), "
"or alphabetically, by order in the dataframe, or by histogram overlap.",
target='density-sort'),
]),
], md=2),
]),
html.Div([
dbc.Row([
dbc.Col([
dcc.Graph(id='density-num-plot', config=dict(modeBarButtons=[[]], displaylogo=False)),
], md=10),
dbc.Col([
dbc.Label("All", id='density-num-pie-all-label'),
dcc.Graph(id='density-num-pie-all', config=dict(modeBarButtons=[[]], displaylogo=False), style=dict(marginBottom=20)),
dbc.Label("Selected", id='density-num-pie-selected-label'),
dcc.Graph(id='density-num-pie-selected', config=dict(modeBarButtons=[[]], displaylogo=False), style=dict(marginBottom=20)),
dbc.Label("Not Selected", id='density-num-pie-not-selected-label'),
dcc.Graph(id='density-num-pie-not-selected', config=dict(modeBarButtons=[[]], displaylogo=False)),
], md=2),
]),
dbc.Row([
dbc.Col([
html.Div([
dbc.FormGroup([
dcc.RangeSlider(id='density-num-cutoff',
allowCross=False, min=0, max=10,
tooltip=dict(always_visible=True)),
]),
], style=dict(marginLeft=45)),
], md=9),
dbc.Col([
dbc.FormGroup([
dbc.Select(
options=[
{'label':'Range', 'value':'range'},
{'label':'Lesser than', 'value':'lesser_than'},
{'label':'Greater than', 'value':'greater_than'}
],
value='lesser_than',
id='density-num-ruletype',
bs_size="sm",
),
dbc.Tooltip("LesserThan rules ignore the lower bound and GreaterThan "
"rules ignore the upper bound. RangeRules respect both upper and lower bounds.",
target='density-num-ruletype'),
]),
], md=2),
dbc.Col([
dbc.FormGroup([
html.Div([
dbc.Button("Suggest", id='density-num-suggest-button', color="primary", size="sm"),
dbc.Tooltip("Select the best split point that minimizes the weighted average gini after "
"the split, similar to a DecisionTree", target='density-num-suggest-button'),
], style={'vertical-align': 'bottom'})
]),
], md=1),
], form=True),
dbc.Row([
dbc.Col([
html.Hr()
])
]),
dbc.Row([
dbc.Col([
dbc.FormGroup([
dbc.Button("Split", id='density-num-split-button',
color="primary", size="m", style=dict(width=150)),
dbc.Tooltip("Generate a Split rule where all observations where the condition holds go left "
"and all other observations go right.", target='density-num-split-button'),
]),
], md=6),
dbc.Col([
dbc.FormGroup([
html.Div([
dbc.Select(id='density-num-prediction', options=[
{'label':f"{y}. {label}", 'value':str(y)} for y, label in enumerate(self.labels)],
value=str(len(self.labels)-1), bs_size="md"),
dbc.Button("Predict Selected", id='density-num-predict-button', color="primary",
size="m", style=dict(marginLeft=10, width=400)),
dbc.Tooltip("Apply the prediction for all observation for which the condition holds. All "
"other observations will either be covered by later rules, or predicted nan.",
target='density-num-predict-button'),
dbc.Button(children="Predict All", id='density-num-predict-all-button', color="primary",
size="m", style=dict(marginLeft=10, width=400)),
dbc.Tooltip(children="Apply the prediction to all observations regardless whether the condition holds. Use this "
"rule as the final rule in order to prevent any nan's showing up in your predictions.",
target='density-num-predict-all-button'),
], style={'width': '100%', 'display': 'flex', 'align-items': 'right', 'justify-content': 'right'}),
], row=True),
], md=6),
], form=True),
], id='density-num-div', style=dict(display="none")),
html.Div([
dbc.Row([
dbc.Col([
dcc.Graph(id='density-cats-plot', config=dict(modeBarButtons=[[]], displaylogo=False)),
], md=10),
dbc.Col([
dbc.Label("All", id='density-cats-pie-all-label'),
dcc.Graph(id='density-cats-pie-all', config=dict(modeBarButtons=[[]], displaylogo=False), style=dict(marginBottom=20)),
dbc.Label("Selected", id='density-cats-pie-selected-label'),
dcc.Graph(id='density-cats-pie-selected', config=dict(modeBarButtons=[[]], displaylogo=False), style=dict(marginBottom=20)),
dbc.Label("Not selected", id='density-cats-pie-not-selected-label'),
dcc.Graph(id='density-cats-pie-not-selected', config=dict(modeBarButtons=[[]], displaylogo=False)),
], md=2),
]),
dbc.Row([
dbc.Col([
dbc.FormGroup([
dcc.Dropdown(id='density-cats-cats', value=[], multi=True,
style=dict(marginBottom=20)),
]),
], md=8),
dbc.Col([
dbc.FormGroup([
html.Div([
dbc.Button("Invert", id='density-cats-invert-button', color="primary", size="sm"),
dbc.Tooltip("Invert the category selection: all selected will be unselected and "
"all not selected will be selected", target='density-cats-invert-button'),
], style={'vertical-align': 'bottom'}),
]),
], md=1),
dbc.Col([
dbc.FormGroup([
html.Div([
dbc.Button("Suggest", id='density-cats-suggest-button', color="primary", size="sm"),
dbc.Tooltip("Suggest best single category to select to minimize weighted gini. Either the category "
"or the inverse will be selected, whichever has the lowest gini", target='density-cats-suggest-button'),
], style={'vertical-align': 'bottom'})
]),
], md=1),
dbc.Col([
html.Div([
dbc.FormGroup([
dbc.Checklist(
options=[{"label": "Display Relative", "value": True}],
value=[],
id='density-cats-percentage',
inline=True,
switch=True,
),
dbc.Tooltip("Display barcharts as percentages instead of counts.", target='density-cats-percentage'),
]),
], style={'display': 'flex', 'align-items': 'right', 'justify-content': 'flex-end'}),
], md=2),
], form=True),
dbc.Row([
dbc.Col([
html.Hr()
])
]),
dbc.Row([
dbc.Col([
dbc.FormGroup([
dbc.Button("Split", id='density-cats-split-button',
color="primary", size="m", style=dict(width=150)),
dbc.Tooltip("Generate a Split rule where all observations where the condition holds go left "
"and all other observations go right.", target='density-cats-split-button'),
]),
], md=6),
dbc.Col([
dbc.FormGroup([
html.Div([
dbc.Select(id='density-cats-prediction', options=[
{'label':f"{y}. {label}", 'value':str(y)} for y, label in enumerate(self.labels)],
value=str(len(self.labels)-1), #clearable=False,
bs_size="md"),
dbc.Button("Predict Selected", id='density-cats-predict-button', color="primary",
size="m", style=dict(marginLeft=10, width=400)),
dbc.Tooltip("Apply the prediction for all observation for which the condition holds. All "
"other observations will either be covered by later rules, or predicted nan.",
target='density-cats-predict-button'),
dbc.Button("Predict All", id='density-cats-predict-all-button', color="primary",
size="m", style=dict(marginLeft=10, width=400)),
dbc.Tooltip("Apply the prediction to all observations regardless whether the condition holds. Use this "
"rule as the final rule in order to prevent any nan's showing up in your predictions.",
target='density-cats-predict-all-button'),
], style = {'width': '100%', 'display': 'flex', 'align-items': 'right', 'justify-content': 'right'})
], row=True),
], md=6),
], form=True),
], id='density-cats-div'),
]),
]),
]),
dcc.Tab(label="Multi Feature Rules", id='parallel-tab', value='parallel-tab', children=[
dbc.Card([
dbc.CardBody([
dbc.Row([
dbc.Col([
dbc.FormGroup([
dbc.Label("Display Features", html_for='parallel-cols', id='parallel-cols-label'),
dcc.Dropdown(id='parallel-cols', multi=True,
options=[{'label':col, 'value':col}
for col in self.X.columns],
value = self.X.columns.tolist()),
dbc.Tooltip("Select the features to be displayed in the Parallel Plot",
target='parallel-cols-label'),
]),
], md=10),
dbc.Col([
dbc.FormGroup([
dbc.Label("Sort features", html_for='parallel-sort', id='parallel-sort-label'),
dbc.Select(id='parallel-sort',
options=[dict(label=s, value=s) for s in ['dataframe', 'alphabet', 'histogram overlap', 'gini reduction']],
value='gini reduction',
style=dict(height=40, width=150, horizontalAlign = 'right'),
bs_size="sm",
),
dbc.Tooltip("Sort the features in the plot from least histogram overlap "
"(feature distributions look the most different for different "
"values of y) on the right, to highest histogram overlap on the left.",
target='parallel-sort-label'),
])
], md=2),
], form=True),
dbc.Row([
dbc.Col([
html.Small('You can select multiple ranges in the parallel plot to define a multi feature rule.', className="text-muted",),
dcc.Graph(id='parallel-plot'),
html.Div(id='parallel-description'),
])
]),
dbc.Row([
dbc.Col([
html.Div([
dbc.Row(dbc.Col([
dbc.Label("All", id='parallel-pie-all-label', html_for='parallel-pie-all'),
dbc.Tooltip("Label distribution for all observations in the parallel plot above.",
target='parallel-pie-all-label'),
dcc.Graph(id='parallel-pie-all', config=dict(modeBarButtons=[[]], displaylogo=False)),
])),
], style={'display':'flex', 'justify-content': 'center', 'align-items': 'center'}),
]),
dbc.Col([
html.Div([
dbc.Row(dbc.Col([
dbc.Label("Selected", id='parallel-pie-selection-label'),
dbc.Tooltip("Label distribution for all the feature ranges selected above",
target='parallel-pie-selection-label'),
dcc.Graph(id='parallel-pie-selection', config=dict(modeBarButtons=[[]], displaylogo=False)),
])),
], style={'display':'flex', 'justify-content': 'center', 'align-items': 'center'}),
]),
dbc.Col([
html.Div([
dbc.Row(dbc.Col([
dbc.Label("Not selected", id='parallel-pie-non-selection-label'),
dbc.Tooltip("Label distribution for all the feature ranges not selected above",
target='parallel-pie-non-selection-label'),
dcc.Graph(id='parallel-pie-non-selection', config=dict(modeBarButtons=[[]], displaylogo=False)),
])),
], style={'display':'flex', 'justify-content': 'center', 'align-items': 'center'}),
]),
]),
dbc.Row([
dbc.Col([
html.Hr()
]),
]),
dbc.Row([
dbc.Col([
dbc.FormGroup([
dbc.Button("Split", id='parallel-split-button',
color="primary", size="m", style=dict(width=150)),
dbc.Tooltip("Make a split using the selection in the Parallel Plot. "
"Data in the selected ranges goes left (true), all other data "
"goes right (false).", target='parallel-split-button'),
]),
], md=6),
dbc.Col([
dbc.FormGroup([
html.Div([
dbc.Select(id='parallel-prediction', options=[
{'label':f"{y}. {label}", 'value':str(y)} for y, label in enumerate(self.labels)],
value=str(len(self.labels)-1),
bs_size="md"),
dbc.Tooltip("The prediction to be applied. Either to all data ('Predict All'), "
"or to the selected data ('Predict Selected'). Will get automatically "
"Inferred from the selected data in the Parallel Plot.", target='parallel-prediction'),
dbc.Button("Predict Selected", id='parallel-predict-button',
color="primary", size="m", style=dict(marginLeft=10, width=400)),
dbc.Tooltip("Apply the prediction to all data within the ranges "
"selected in the Parallel Plot.", target='parallel-predict-button'),
dbc.Button("Predict All", id='parallel-predict-all-button',
color="primary", size="m", style=dict(marginLeft=10, width=400)),
dbc.Tooltip("Add a PredictionRule: Apply a single uniform prediction to all the "
"data without distinction.", target='parallel-predict-all-button'),
], style = {'width': '100%', 'display': 'flex', 'align-items': 'right', 'justify-content': 'right'})
], row=True),
], md=6),
], form=True),
]),
]),
]),
], style=dict(marginTop=5)),
]),
]),
]),
], style=dict(marginBottom=20, marginTop=20)),
dbc.Row([
dbc.Col([
dbc.Card([
dbc.CardHeader([
dbc.Row([
dbc.Col([
html.H3("Model", className='card-title'),
], md=6),
dbc.Col([
html.Div([
dbc.FormGroup([
dbc.Label("Selected rule:", id='selected-rule-id-label',
html_for='selected-rule-id', className="mr-2"),
dcc.Dropdown(id='selected-rule-id', options=[
{'label':str(ruleid), 'value':int(ruleid)}
for ruleid in range(self.model._get_max_rule_id()+1)],
value=0, clearable=False, style=dict(width=80)),
dbc.Tooltip("You can either select a rule id here or by clicking in the model graph.",
target='selected-rule-id-label'),
dcc.ConfirmDialogProvider(
children=html.Button("Remove Rule", id='remove-rule-button', className="btn btn-danger btn-sm",
style=dict(marginLeft=5)),
id='remove-rule-confirm',
message='Warning! Once you have removed a rule there is no undo button or ctrl-z! Are you sure?'
),
dbc.Tooltip("Remove the selected rule from the model. Warning! Cannot be undone!",
target='remove-rule-button'),
dcc.ConfirmDialogProvider(
children=html.Button("Reset Model", id='reset-model-button', className="btn btn-danger btn-sm",
style=dict(marginLeft=10)),
id='reset-model-confirm',
message='Warning! Once you have reset the model there is no undo button or ctrl-z! Are you sure?'
),
dbc.Tooltip("Reset the model to the initial state. Warning! Cannot be undone!",
target='reset-model-button'),
], row=True),
], style={'display': 'flex', 'align-items': 'right', 'justify-content': 'flex-end'}),
], md=6),
], form=True, style={'marginLeft':8, 'marginTop':10}),
]),
dbc.CardBody([
dcc.Tabs(id='model-tabs', value='model-graph-tab', children=[
dcc.Tab(id='model-graph-tab', value='model-graph-tab', label='Graph',
children=html.Div([
dbc.Row([
dbc.Col([
dbc.FormGroup([
dbc.Label("Color scale: ", html_for='absolute-or-relative', className="mr-2"),
dbc.Select(id='model-graph-color-scale',
options=[{'label':'Absolute', 'value':'absolute'},
{'label':'Relative', 'value':'relative'}],
value='absolute',
bs_size="sm", style=dict(width=100)),
dbc.Tooltip("Color the rules by either by absolute accuracy (0%=red, 100%=green), "
"or by relative accuracy (lowest accuracy=red, highest=green)", target='model-graph-color-scale'),
], row=True),
], md=3),
dbc.Col([
dbc.FormGroup([
dbc.Label("Display: ", html_for='model-graph-scatter-text', className="mr-2"),
dbc.Select(id='model-graph-scatter-text',
options=[dict(label=o, value=o) for o in ['name', 'description', 'coverage', 'accuracy']],
value='description',
bs_size="sm",style=dict(width=130)),
dbc.Tooltip("You can display rule description, accuracy or coverage next to the markers on the model graph.",
target='model-graph-scatter-text'),
], row=True),
], md=3),
], form=True, style=dict(marginTop=8, marginLeft=16, marginRight=16)),
dbc.Row([
dbc.Col([
dcc.Graph(id='model-graph'),
]),
]),
dbc.Row([
dbc.Col([
html.P("You can select a rule by clicking on it in the Graph"),
])
])
])
),
dcc.Tab(id='model-description-tab', value='model-description-tab', label='Description',
children=html.Div([dbc.Row([dbc.Col([
html.Div([
dcc.Clipboard(
target_id="model-description",
title="copy"),
], style={'display': 'flex', 'align-items': 'right', 'justify-content': 'flex-end'}),
dcc.Markdown(id='model-description'),
])])])
),
dcc.Tab(id='model-yaml-tab', value='model-yaml-tab', label='.yaml',
children=html.Div([dbc.Row([dbc.Col([
html.Div("To instantiate a model from a .yaml file:", style=dict(marginBottom=10)),
dcc.Markdown("```\nfrom rule_estimator import RuleClassifier\n"
"model = RuleClassifier.from_yaml('model.yaml')\n"
"model.predict(X_test)\n```"),
html.B("model.yaml:"),
html.Div([
dcc.Clipboard(
target_id="model-yaml",
title="copy"),
], style={'display': 'flex', 'align-items': 'right', 'justify-content': 'flex-end'}),
dcc.Markdown(id='model-yaml'),
])])])
),
dcc.Tab(id='model-code-tab', value='model-code-tab', label='Python Code',
children=[
html.Div([
dcc.Clipboard(
target_id="model-code",
title="copy"),
], style={'marginTop':20, 'display': 'flex', 'align-items': 'right', 'justify-content': 'flex-end'}),
dcc.Markdown(id='model-code'),
]
),
]),
]),
]),
]),
], style=dict(marginBottom=20, marginTop=20)),
dbc.Row([
dbc.Col([
dbc.Card([
dbc.CardHeader([
html.H3("Performance"),
]),
dbc.CardBody([
dcc.Tabs(id='performance-tabs', value='performance-overview-tab', children=[
dcc.Tab(id='performance-rules-tab2', value='performance-overview-tab', label="Model Performance",
children=html.Div([
dbc.Row([
dbc.Col([
dbc.Select(id='model-performance-select',
options=[
dict(label='Full model', value='model'),
dict(label='Single Rule', value='rule'),
],
value='full_model',
),
dbc.Tooltip("Display performance for the model as a whole or "
"only for the currently selected rule", target='model-performance-select'),
]),
]),
dbc.Row([
dbc.Col([
dcc.Graph(id='model-performance-confmat'),
], md=6),
dbc.Col([
html.Div(id='model-performance-metrics'),
html.Div(id='model-performance-coverage'),
]),
]),
])),
dcc.Tab(id='performance-rules-tab', value='performance-rules-tab', label="All rules",
children=html.Div(id='model-performance')),
]),
]),
]),
]),
], style=dict(marginBottom=20, marginTop=20)),
])
def register_callbacks(self, app):
@app.callback(
Output("download-yaml", "data"),
Input("download-yaml-button", "n_clicks"),
State('model-store', 'data')
)
def download_yaml(n_clicks, model):
if n_clicks is not None:
model = self._get_model(model)
return dict(content=model.to_yaml(), filename="model.yaml")
raise PreventUpdate
@app.callback(
Output("download-pickle", "data"),
Input("download-pickle-button", "n_clicks"),
State('model-store', 'data')
)
def download_pickle(n_clicks, model):
if n_clicks is not None:
model = self._get_model(model)
return dcc.send_bytes(model.pickle().read(), "model.pkl")
raise PreventUpdate
@app.callback(
Output('updated-rule-id', 'data'),
Input('parallel-updated-rule-id', 'data'),
Input('density-num-updated-rule-id', 'data'),
Input('density-cats-updated-rule-id', 'data'),
Input('removerule-updated-rule-id', 'data'),
Input('resetmodel-updated-rule-id', 'data'),
Input('model-graph', 'clickData'),
Input('uploaded-model', 'data')
)
def update_model(parallel_rule_id, num_rule_id, cats_rule_id,
removerule_rule_id, resetmodel_rule_id, clickdata,
uploaded_model):
trigger = self._get_callback_trigger()
if trigger == 'uploaded-model':
model = self._get_model(uploaded_model)
if model.get_max_rule_id() > 0:
return 1
return 0
if trigger == 'model-graph':
if (clickdata is not None and clickdata['points'][0] is not None and
'hovertext' in clickdata['points'][0]):
rule = clickdata['points'][0]['hovertext'].split('rule:')[1].split('<br>')[0]
if rule is not None:
return int(rule)
if trigger == 'parallel-updated-rule-id':
return parallel_rule_id
if trigger == 'density-num-updated-rule-id':
return num_rule_id
if trigger == 'density-cats-updated-rule-id':
return cats_rule_id
elif trigger == 'removerule-updated-rule-id':
return removerule_rule_id
elif trigger == 'resetmodel-updated-rule-id':
return resetmodel_rule_id
raise PreventUpdate
@app.callback(
Output('model-store', 'data'),
Input('parallel-updated-model', 'data'),
Input('density-num-updated-model', 'data'),
Input('density-cats-updated-model', 'data'),
Input('removerule-updated-model', 'data'),
Input('resetmodel-updated-model', 'data'),
Input('uploaded-model', 'data'),
State('model-store', 'data')
)
def store_model(parallel_update, num_update, cats_update,
removerule_update, resetmodel_update, uploaded_model,
model):
trigger = self._get_callback_trigger()
if trigger == 'parallel-updated-model':
if parallel_update is not None: return parallel_update
elif trigger == 'density-num-updated-model':
if num_update is not None: return num_update
elif trigger == 'density-cats-updated-model':
if cats_update is not None: return cats_update
elif trigger == 'removerule-updated-model':
if removerule_update is not None: return removerule_update
elif trigger == 'resetmodel-updated-model':
if resetmodel_update is not None: return resetmodel_update
elif trigger == 'uploaded-model':
if uploaded_model is not None: return uploaded_model
if model is None:
return self.model.to_json()
raise PreventUpdate
@app.callback(
Output('update-model-graph', 'data'),
Output('model-description', 'children'),
Output('model-yaml', 'children'),
Output('model-code', 'children'),
Output('update-model-performance', 'data'),
Output('selected-rule-id', 'options'),
Output('selected-rule-id', 'value'),
Input('updated-rule-id', 'data'),
Input('model-store', 'data')
)
def update_model(rule_id, model):
model = self._get_model(model)
rule_id_options = [{'label':str(ruleid), 'value':int(ruleid)}
for ruleid in range(model._get_max_rule_id()+1)]
rule_id = dash.no_update if rule_id is None else int(rule_id)
return ("update_graph",
f"```\n{model.describe()}```",
f"```yaml\n{model.to_yaml()}\n```",
f"```python\nfrom rule_estimator import *\n\nmodel = {model.to_code()[1:]}\n```",
"update_performance",
rule_id_options, rule_id)
######### DENSITY NUM RULE CALLBACK
@app.callback(
Output('density-num-updated-rule-id', 'data'),
Output('density-num-updated-model', 'data'),
Output('added-num-density-rule', 'data'),
Input('density-num-split-button', 'n_clicks'),
Input('density-num-predict-button', 'n_clicks'),
Input('density-num-predict-all-button', 'n_clicks'),
State('selected-rule-id', 'value'),
State('append-or-replace', 'value'),
State('density-num-ruletype', 'value'),
State('density-col', 'value'),
State('density-num-cutoff', 'value'),
State('density-num-prediction', 'value'),
State('model-store', 'data'),
)
def update_model_rule(split_clicks, predict_clicks, all_clicks, rule_id, append_or_replace, rule_type,
col, cutoff, prediction, model):
new_rule = None
trigger = self._get_callback_trigger()
if trigger == 'density-num-predict-button':
if rule_type == 'lesser_than':
new_rule = LesserThan(col=col, cutoff=cutoff[1], prediction=int(prediction))
elif rule_type == 'greater_than':
new_rule = GreaterThan(col=col, cutoff=cutoff[0], prediction=int(prediction))
elif rule_type == 'range':
new_rule = RangeRule(col=col, min=cutoff[0], max=cutoff[1], prediction=int(prediction))
elif trigger == 'density-num-split-button':
if rule_type == 'lesser_than':
new_rule = LesserThanSplit(col=col, cutoff=cutoff[1])
elif rule_type == 'greater_than':
new_rule = GreaterThanSplit(col=col, cutoff=cutoff[0])
elif rule_type == 'range':
new_rule = RangeSplit(col=col, min=cutoff[0], max=cutoff[1])
elif trigger == 'density-num-predict-all-button':
new_rule = PredictionRule(prediction=int(prediction))
if new_rule is not None:
rule_id, model = self._change_model(model, rule_id, new_rule, append_or_replace)
return rule_id, model.to_json(), "trigger"
raise PreventUpdate
######### DENSITY CATS RULE CALLBACK
@app.callback(
Output('density-cats-updated-rule-id', 'data'),
Output('density-cats-updated-model', 'data'),
Output('added-cats-density-rule', 'data'),
Input('density-cats-split-button', 'n_clicks'),
Input('density-cats-predict-button', 'n_clicks'),
Input('density-cats-predict-all-button', 'n_clicks'),
State('selected-rule-id', 'value'),
State('append-or-replace', 'value'),
State('density-col', 'value'),
State('density-cats-cats', 'value'),
State('density-cats-prediction', 'value'),
State('model-store', 'data'),
)
def update_model_rule(split_clicks, predict_clicks, all_clicks, rule_id, append_or_replace, col, cats, prediction, model):
new_rule = None
trigger = self._get_callback_trigger()
if trigger == 'density-cats-split-button':
new_rule = IsInSplit(col=col, cats=cats)
elif trigger == 'density-cats-predict-button':
new_rule = IsInRule(col=col, cats=cats, prediction=int(prediction))
elif trigger == 'density-cats-predict-all-button':
new_rule = PredictionRule(prediction=int(prediction))
if new_rule is not None:
rule_id, model = self._change_model(model, rule_id, new_rule, append_or_replace)
return rule_id, model.to_json(), "trigger"
raise PreventUpdate
######### PARALLEL RULE CALLBACK
@app.callback(
Output('parallel-updated-rule-id', 'data'),
Output('parallel-updated-model', 'data'),
Input('parallel-split-button', 'n_clicks'),
Input('parallel-predict-button', 'n_clicks'),
Input('parallel-predict-all-button', 'n_clicks'),
State('selected-rule-id', 'value'),
State('append-or-replace', 'value'),
State('parallel-prediction', 'value'),
State('parallel-plot', 'figure'),
State('model-store', 'data'),
)
def update_model_parallel(split_clicks, predict_clicks, predict_all_clicks,
rule_id, append_or_replace, prediction, fig, model):
new_rule = None
trigger = self._get_callback_trigger()
if fig is not None:
model = self._get_model(model)
plot_data = fig['data'][0].get('dimensions', None)
range_dict = {}
for col_data in plot_data:
if col_data['label'] != 'y' and 'constraintrange' in col_data:
range_dict[col_data['label']] = self._process_constraintrange(
col_data['constraintrange'], col_data['ticktext'] if 'ticktext' in col_data else None)
if trigger == 'parallel-split-button':
new_rule = MultiRangeSplit(range_dict)
elif trigger == 'parallel-predict-button':
new_rule = MultiRange(range_dict, prediction=int(prediction))
elif trigger == 'parallel-predict-all-button':
new_rule = PredictionRule(prediction=int(prediction))
else:
raise PreventUpdate
rule_id, model = self._change_model(model, rule_id, new_rule, append_or_replace)
return rule_id, model.to_json()
raise PreventUpdate
@app.callback(
Output('removerule-updated-rule-id', 'data'),
Output('removerule-updated-model', 'data'),
Input('remove-rule-confirm', 'submit_n_clicks'),
State('selected-rule-id', 'value'),
State('model-store', 'data'),
)
def remove_rule(n_clicks, rule_id, model):
if n_clicks is not None:
model = self._get_model(model)
model.remove_rule(rule_id)
return min(rule_id, model._get_max_rule_id()), model.to_json()
raise PreventUpdate
@app.callback(
Output('resetmodel-updated-rule-id', 'data'),
Output('resetmodel-updated-model', 'data'),
Input('reset-model-confirm', 'submit_n_clicks'),
State('selected-rule-id', 'value'),
)
def reset_model(n_clicks, rule_id):
if n_clicks is not None:
return 0, self.model.to_json()
raise PreventUpdate
@app.callback(
Output('density-num-div', 'style'),
Output('density-cats-div', 'style'),
Input('density-col', 'value'),
)
def update_density_hidden_divs(col):
if col is not None:
if col in self.cats:
return dict(display="none"), {}
else:
return {}, dict(display="none")
else:
if self.X.columns[0] in self.cats:
return dict(display="none"), {}
else:
return {}, dict(display="none")
@app.callback(
Output('density-col', 'value'),
Output('rule-tabs', 'value'),
Input('selected-rule-id', 'value'),
Input('append-or-replace', 'value'),
Input('density-col-suggest-button', 'n_clicks'),
State('train-or-val', 'value'),
State('model-store', 'data'),
)
def update_model_node(rule_id, append_or_replace, n_clicks, train_or_val, model):
trigger = self._get_callback_trigger()
if trigger == 'density-col-suggest-button':
model, X, y = self._get_model_X_y(model, train_or_val, rule_id, append_or_replace)
return model._sort_cols_by_gini_reduction(X, y)[0], dash.no_update
if append_or_replace == 'replace' and rule_id is not None:
model = self._get_model(model)
rule = model.get_rule(rule_id)
if isinstance(rule, IsInRule):
return rule.col, "density-tab"
elif isinstance(rule, IsInSplit):
return rule.col, "density-tab"
elif isinstance(rule, LesserThan):
return rule.col, "density-tab"
elif isinstance(rule, GreaterThan):
return rule.col, "density-tab"
elif isinstance(rule, LesserThanSplit):
return rule.col, "density-tab"
elif isinstance(rule, GreaterThanSplit):
return rule.col, "density-tab"
elif isinstance(rule, MultiRange):
return dash.no_update, "density-tab"
elif isinstance(rule, RangeRule):
return dash.no_update, "density-tab"
elif isinstance(rule, RangeRule):
return dash.no_update, "density-tab"
elif isinstance(rule, MultiRangeSplit):
return dash.no_update, "parallel-tab"
raise PreventUpdate
@app.callback(
Output('density-num-prediction', 'value'),
Output('density-num-pie-all-label', 'children'),
Output('density-num-pie-all', 'figure'),
Output('density-num-pie-selected-label', 'children'),
Output('density-num-pie-selected', 'figure'),
Output('density-num-pie-not-selected-label', 'children'),
Output('density-num-pie-not-selected', 'figure'),
Input('density-num-cutoff', 'value'),
Input('selected-rule-id', 'value'),
Input('density-num-ruletype', 'value'),
Input('train-or-val', 'value'),
Input('append-or-replace', 'value'),
State('density-col', 'value'),
State('model-store', 'data'),
)
def update_density_num_pies(cutoff, rule_id, rule_type, train_or_val, append_or_replace, col, model):
if col is not None and col in self.non_cats and cutoff is not None:
model, X, y = self._get_model_X_y(model, train_or_val, rule_id, append_or_replace)
pie_size = 80
if rule_type == 'lesser_than':
rule = LesserThanSplit(col, cutoff[1])
elif rule_type == 'greater_than':
rule = GreaterThanSplit(col, cutoff[0])
elif rule_type == 'range':
rule = RangeSplit(col, cutoff[0], cutoff[1])
else:
raise ValueError("rule_type should be either lesser_than or greater_than!")
X_rule, y_rule = X[rule.__rule__(X)], y[rule.__rule__(X)]
pie_all = plot_label_pie(model, X, y, size=pie_size)
pie_selection = plot_label_pie(model, X_rule, y_rule, size=pie_size)
pie_non_selection = plot_label_pie(model, X[~rule.__rule__(X)], y[~rule.__rule__(X)], size=pie_size)
trigger = self._get_callback_trigger()
prediction = None
if append_or_replace=='replace' and trigger in ['selected-rule-id', 'append-or-replace']:
rule = model.get_rule(rule_id)
if (isinstance(rule, LesserThan) or isinstance(rule, LesserThanSplit) or
isinstance(rule, GreaterThan) or isinstance(rule, GreaterThanSplit) or
isinstance(rule, RangeRule) or isinstance(rule, RangeSplit)):
prediction = rule.prediction
if prediction is None and not X_rule.empty:
prediction = y_rule.value_counts().index[0]
elif prediction is None and not X.empty:
prediction = y.value_counts().index[0]
else:
prediction = 0
return (str(prediction),
f"All ({len(X)})", pie_all,
f"Selected ({len(X_rule)})", pie_selection,
f"Not Selected ({len(X)-len(X_rule)})", pie_non_selection
)
raise PreventUpdate
@app.callback(
Output('density-cats-prediction', 'value'),
Output('density-cats-pie-all-label', 'children'),
Output('density-cats-pie-all', 'figure'),
Output('density-cats-pie-selected-label', 'children'),
Output('density-cats-pie-selected', 'figure'),
Output('density-cats-pie-not-selected-label', 'children'),
Output('density-cats-pie-not-selected', 'figure'),
Input('density-cats-cats', 'value'),
Input('selected-rule-id', 'value'),
Input('train-or-val', 'value'),
Input('append-or-replace', 'value'),
State('density-col', 'value'),
State('model-store', 'data'),
)
def update_density_cats_pies(cats, rule_id, train_or_val, append_or_replace, col, model):
if col is not None:
model, X, y = self._get_model_X_y(model, train_or_val, rule_id, append_or_replace)
pie_size = 80
rule = IsInSplit(col, cats)
X_rule, y_rule = X[rule.__rule__(X)], y[rule.__rule__(X)]
pie_all = plot_label_pie(model, X, y, size=pie_size)
pie_selection = plot_label_pie(model, X_rule, y_rule, size=pie_size)
pie_non_selection = plot_label_pie(model, X[~rule.__rule__(X)], y[~rule.__rule__(X)], size=pie_size)
trigger = self._get_callback_trigger()
prediction = None
if append_or_replace=='replace' and trigger in ['selected-rule-id', 'append-or-replace']:
rule = model.get_rule(rule_id)
if isinstance(rule, IsInRule):
prediction = rule.prediction
if prediction is None and not X_rule.empty:
prediction = y_rule.value_counts().index[0]
elif prediction is None and not X.empty:
prediction = y.value_counts().index[0]
else:
prediction = 0
return (str(prediction),
f"All ({len(X)})", pie_all,
f"Selected ({len(X_rule)})", pie_selection,
f"Not Selected ({len(X)-len(X_rule)})", pie_non_selection
)
raise PreventUpdate
@app.callback(
Output('density-cats-plot', 'figure'),
Output('density-num-plot', 'figure'),
Input('density-col', 'value'),
Input('selected-rule-id', 'value'),
Input('train-or-val', 'value'),
Input('append-or-replace', 'value'),
Input('density-num-cutoff', 'value'),
Input('density-cats-cats', 'value'),
Input('density-cats-percentage', 'value'),
State('model-store', 'data'),
)
def update_density_plot(col, rule_id, train_or_val, append_or_replace, cutoff, cats, percentage, model):
if col is not None:
model, X, y = self._get_model_X_y(model, train_or_val, rule_id, append_or_replace)
after = self._infer_after(append_or_replace)
if col in self.cats:
#percentage = (percentage=='percentage')
fig = plot_cats_density(model, X, y, col, rule_id=rule_id, after=after,
labels=self.labels, percentage=bool(percentage), highlights=cats)
return fig, dash.no_update
elif col in self.non_cats:
fig = plot_density(model, X, y, col, rule_id=rule_id, after=after,
labels=self.labels, cutoff=cutoff)
return dash.no_update, fig
raise PreventUpdate
@app.callback(
Output('density-col-suggest-button', 'n_clicks'),
Input('added-num-density-rule', 'data'),
Input('added-cats-density-rule', 'data'),
State('density-col-suggest-button', 'n_clicks'),
)
def trigger_new_suggested_col(num_trigger, cats_trigger, old_clicks):
if old_clicks is not None:
return old_clicks+1
return 1
@app.callback(
Output('density-col', 'options'),
Input('density-sort', 'value'),
Input('selected-rule-id', 'value'),
Input('train-or-val', 'value'),
Input('append-or-replace', 'value'),
State('model-store', 'data'),
State('density-col', 'options'),
)
def update_density_col(sort, rule_id, train_or_val, append_or_replace, model, old_options):
if sort=='dataframe':
return [dict(label=col, value=col) for col in self.X.columns]
elif sort == 'alphabet':
return [dict(label=col, value=col) for col in sorted(self.X.columns.tolist())]
elif sort == 'histogram overlap':
model, X, y = self._get_model_X_y(model, train_or_val, rule_id, append_or_replace)
return [dict(label=col, value=col) for col in model._sort_cols_by_histogram_overlap(X, y)]
elif sort == 'gini reduction':
model, X, y = self._get_model_X_y(model, train_or_val, rule_id, append_or_replace)
return [dict(label=col, value=col) for col in model._sort_cols_by_gini_reduction(X, y)]
else:
raise ValueError(f"Wrong sort value: {sort}!")
raise PreventUpdate
@app.callback(
Output('density-cats-cats', 'value'),
Output('density-num-cutoff', 'value'),
Output('density-num-ruletype', 'value'),
Input('density-cats-plot', 'clickData'),
Input('density-col', 'value'),
Input('selected-rule-id', 'value'),
Input('train-or-val', 'value'),
Input('append-or-replace', 'value'),
Input('density-num-suggest-button', 'n_clicks'),
Input('density-cats-suggest-button', 'n_clicks'),
Input('density-cats-invert-button', 'n_clicks'),
Input('density-num-ruletype', 'value'),
Input('density-num-cutoff', 'value'),
State('model-store', 'data'),
State('density-cats-cats', 'value'),
State('density-num-cutoff', 'min'),
State('density-num-cutoff', 'max'),
State('density-cats-cats', 'options'),
)
def check_cats_clicks(clickdata, col, rule_id, train_or_val, append_or_replace,
num_suggest_n_clicks, cats_suggest_n_clicks, invert_n_clicks, num_ruletype,
old_cutoff, model, old_cats, cutoff_min, cutoff_max, cats_options):
trigger = self._get_callback_trigger()
if trigger == 'density-cats-invert-button':
new_cats = [cat['value'] for cat in cats_options if cat['value'] not in old_cats]
return new_cats, dash.no_update, dash.no_update
if append_or_replace=='replace' and trigger in ['selected-rule-id', 'append-or-replace']:
model = self._get_model(model)
rule = model.get_rule(rule_id)
if isinstance(rule, IsInRule) or isinstance(rule, IsInSplit):
return rule.cats, dash.no_update, dash.no_update
if isinstance(rule, GreaterThan) or isinstance(rule, GreaterThanSplit):
return dash.no_update, [rule.cutoff, cutoff_max], "greater_than"
if isinstance(rule, LesserThan) or isinstance(rule, LesserThanSplit):
return dash.no_update, [cutoff_min, rule.cutoff], "lesser_than"
if isinstance(rule, RangeRule) or isinstance(rule, RangeSplit):
return dash.no_update, [rule.min, rule.max], "range"
if trigger == 'density-cats-plot':
clicked_cat = clickdata['points'][0]['x']
if old_cats is None:
return [clicked_cat], dash.no_update, dash.no_update
elif clicked_cat in old_cats:
return [cat for cat in old_cats if cat != clicked_cat], dash.no_update, dash.no_update
else:
old_cats.append(clicked_cat)
return old_cats, dash.no_update, dash.no_update
model, X, y = self._get_model_X_y(model, train_or_val, rule_id, append_or_replace)
if trigger == 'density-num-ruletype':
if num_ruletype == 'greater_than':
if old_cutoff[0] == X[col].min():
return dash.no_update, [old_cutoff[1], X[col].max()], dash.no_update
return dash.no_update, [old_cutoff[0], X[col].max()], dash.no_update
if num_ruletype == 'lesser_than':
if old_cutoff[1] == X[col].max():
return dash.no_update, [X[col].min(), old_cutoff[0]], dash.no_update
return dash.no_update, [X[col].min(), old_cutoff[1]], dash.no_update
return dash.no_update, old_cutoff, dash.no_update
if trigger == 'density-num-cutoff':
if num_ruletype == 'lesser_than' and old_cutoff[0] != X[col].min():
return dash.no_update, [X[col].min(), old_cutoff[1]], dash.no_update
if num_ruletype == 'greater_than' and old_cutoff[1] != X[col].max():
return dash.no_update, [old_cutoff[0], X[col].max()], dash.no_update
raise PreventUpdate
if not X.empty:
if col in self.cats:
cat, gini, single_cat = model.suggest_split(X, y, col)
if single_cat:
return [cat], dash.no_update, dash.no_update
elif cats_options:
return [cat_col['value'] for cat_col in cats_options if cat_col['value'] != cat], dash.no_update, dash.no_update
else:
[cat_col for cat_col in X[col].unique() if cat_col != cat], dash.no_update, dash.no_update
elif col in self.non_cats:
cutoff, gini, lesser_than = model.suggest_split(X, y, col)
cutoff = self._round_cutoff(cutoff, X[col].min(), X[col].max())
if lesser_than:
return dash.no_update, [X[col].min(), cutoff], "lesser_than"
else:
return dash.no_update, [cutoff, X[col].max()], "greater_than"
raise PreventUpdate
@app.callback(
Output('density-num-suggest-button', 'n_clicks'),
Output('density-cats-suggest-button', 'n_clicks'),
Input('density-col', 'value'),
State('density-num-suggest-button', 'n_clicks'),
State('density-cats-suggest-button', 'n_clicks'),
)
def trigger_suggest_buttons_on_col(col, num_clicks, cats_clicks):
if col in self.cats:
return dash.no_update, cats_clicks+1 if cats_clicks else 1
elif col in self.non_cats:
return num_clicks+1 if num_clicks else 1, dash.no_update
raise PreventUpdate
@app.callback(
Output('density-num-cutoff', 'min'),
Output('density-num-cutoff', 'max'),
Output('density-num-cutoff', 'step'),
Output('density-cats-cats', 'options'),
Input('density-col', 'value'),
Input('selected-rule-id', 'value'),
Input('train-or-val', 'value'),
Input('append-or-replace', 'value'),
State('model-store', 'data'),
)
def update_density_plot(col, rule_id, train_or_val, append_or_replace, model):
if col is not None:
model, X, y = self._get_model_X_y(model, train_or_val, rule_id, append_or_replace)
if col in self.cats:
cats_options = [dict(label=cat, value=cat) for cat in X[col].unique()]
return dash.no_update, dash.no_update, dash.no_update, cats_options
elif col in self.non_cats:
min_val, max_val = X[col].min(), X[col].max()
return min_val, max_val, self._get_stepsize(min_val, max_val), dash.no_update
raise PreventUpdate
@app.callback(
Output('parallel-plot', 'figure'),
Input('selected-rule-id', 'value'),
Input('parallel-cols', 'value'),
Input('append-or-replace', 'value'),
Input('parallel-sort', 'value'),
Input('train-or-val', 'value'),
State('parallel-plot', 'figure'),
State('model-store', 'data'),
)
def return_parallel_plot(rule_id, cols, append_or_replace, sort, train_or_val, old_fig, model):
model, X, y = self._get_model_X_y(model, train_or_val, rule_id, append_or_replace)
after = self._infer_after(append_or_replace)
if sort=='dataframe':
cols = [col for col in self.X.columns if col in cols]
elif sort == 'alphabet':
cols = sorted(cols)
elif sort == 'histogram overlap':
cols = model._sort_cols_by_histogram_overlap(X, y, cols, reverse=True)
elif sort == 'gini reduction':
cols = model._sort_cols_by_gini_reduction(X, y, cols, reverse=True)
else:
raise ValueError(f"Wrong sort value: {sort}!")
fig = plot_parallel_coordinates(model, X, y, rule_id, cols=cols, labels=self.labels, after=after,
ymin=self.y.min(), ymax=self.y.max())
fig.update_layout(margin=dict(t=50, b=50, l=50, r=50))
if fig['data'] and 'dimensions' in fig['data'][0]:
trigger = self._get_callback_trigger()
if append_or_replace=='replace' and trigger in ['selected-rule-id', 'append-or-replace']:
rule = model.get_rule(rule_id)
if (isinstance(rule, MultiRange) or
isinstance(rule, MultiRangeSplit)):
plot_data = fig['data'][0]['dimensions']
for col, ranges in rule.range_dict.items():
for dimension in fig['data'][0]['dimensions']:
if dimension['label'] == col:
if isinstance(ranges[0], str) and 'ticktext' in dimension:
dimension['constraintrange'] = self._cats_to_range(ranges, dimension['ticktext'])
else:
dimension['constraintrange'] = ranges
if trigger == 'train-or-val' and old_fig['data'] and 'dimensions' in old_fig['data'][0]:
fig['data'][0]['dimensions'] = old_fig['data'][0]['dimensions']
return fig
@app.callback(
Output('parallel-prediction', 'value'),
Output('parallel-pie-all-label', 'children'),
Output('parallel-pie-all', 'figure'),
Output('parallel-pie-selection-label', 'children'),
Output('parallel-pie-selection', 'figure'),
Output('parallel-pie-non-selection-label', 'children'),
Output('parallel-pie-non-selection', 'figure'),
Input('parallel-plot', 'restyleData'),
Input('selected-rule-id', 'value'),
Input('append-or-replace', 'value'),
Input('train-or-val', 'value'),
Input('parallel-plot', 'figure'),
State('model-store', 'data'),
)
def update_parallel_prediction(restyle, rule_id, append_or_replace, train_or_val, fig, model):
if fig is not None and fig['data']:
model, X, y = self._get_model_X_y(model, train_or_val, rule_id, append_or_replace)
range_dict = self._get_range_dict_from_parallel_plot(fig)
rule = MultiRangeSplit(range_dict)
after = self._infer_after(append_or_replace)
pie_size = 50
pie_all = plot_label_pie(model, self.X, self.y, rule_id=rule_id, after=after, size=pie_size)
X_rule, y_rule = X[rule.__rule__(X)], y[rule.__rule__(X)]
pie_selection = plot_label_pie(model, X_rule, y_rule, size=pie_size)
pie_non_selection = plot_label_pie(model, X[~rule.__rule__(X)], y[~rule.__rule__(X)], size=pie_size)
if not X_rule.empty:
prediction = y_rule.value_counts().index[0]
elif not X.empty:
prediction = y.value_counts().index[0]
else:
prediction = 0
trigger = self._get_callback_trigger()
if append_or_replace=='replace' and trigger in ['selected-rule-id','append-or-replace']:
rule = model.get_rule(rule_id)
if isinstance(rule, PredictionRule):
prediction = rule.prediction
return (str(prediction),
f"All ({len(X)})", pie_all,
f"Selected ({len(X_rule)})", pie_selection,
f"Not selected ({len(X)-len(X_rule)})", pie_non_selection
)
raise PreventUpdate
@app.callback(
Output('model-performance', 'children'),
Input('update-model-performance', 'data'),
Input('train-or-val', 'value'),
State('model-store', 'data'),
)
def update_performance_metrics(update, train_or_val, model):
if update:
model = self._get_model(model)
X, y = self._get_X_y(train_or_val)
return dbc.Table.from_dataframe(
model.score_rules(X, y)
.assign(coverage = lambda df:df['coverage'].apply(lambda x: f"{100*x:.2f}%"))
.assign(accuracy = lambda df:df['accuracy'].apply(lambda x: f"{100*x:.2f}%"))
)
raise PreventUpdate
@app.callback(
Output('model-performance-confmat', 'figure'),
Input('update-model-performance', 'data'),
Input('train-or-val', 'value'),
Input('selected-rule-id', 'value'),
Input('model-performance-select', 'value'),
State('model-store', 'data'),
)
def update_performance_confmat(update, train_or_val, rule_id, model_or_rule, model):
if update:
if model_or_rule == 'rule':
model, X, y = self._get_model_X_y(model, train_or_val, rule_id=rule_id)
return plot_confusion_matrix(model, X, y, labels=self.labels, rule_id=rule_id, rule_only=True)
else:
model, X, y = self._get_model_X_y(model, train_or_val)
return plot_confusion_matrix(model, X, y, labels=self.labels)
raise PreventUpdate
@app.callback(
Output('model-performance-metrics', 'children'),
Input('update-model-performance', 'data'),
Input('train-or-val', 'value'),
Input('selected-rule-id', 'value'),
Input('model-performance-select', 'value'),
State('model-store', 'data'),
)
def update_performance_metrics(update, train_or_val, rule_id, model_or_rule, model):
if update:
if model_or_rule == 'rule':
model, X, y = self._get_model_X_y(model, train_or_val, rule_id=rule_id)
return dbc.Table.from_dataframe(get_metrics_df(model, X, y, rule_id=rule_id, rule_only=True))
else:
model, X, y = self._get_model_X_y(model, train_or_val)
return dbc.Table.from_dataframe(get_metrics_df(model, X, y))
raise PreventUpdate
@app.callback(
Output('model-performance-coverage', 'children'),
Input('update-model-performance', 'data'),
Input('train-or-val', 'value'),
Input('selected-rule-id', 'value'),
Input('model-performance-select', 'value'),
State('model-store', 'data'),
)
def update_performance_coverage(update, train_or_val, rule_id, model_or_rule, model):
if update:
if model_or_rule == 'rule':
model, X, y = self._get_model_X_y(model, train_or_val, rule_id=rule_id)
return dbc.Table.from_dataframe(get_coverage_df(model, X, y, rule_id=rule_id, rule_only=True))
else:
model, X, y = self._get_model_X_y(model, train_or_val)
return dbc.Table.from_dataframe(get_coverage_df(model, X, y))
raise PreventUpdate
@app.callback(
Output('model-graph', 'figure'),
Input('update-model-graph', 'data'),
Input('train-or-val', 'value'),
Input('model-graph-color-scale', 'value'),
Input('selected-rule-id', 'value'),
Input('model-graph-scatter-text', 'value'),
State('model-store', 'data'),
)
def update_model_graph(update, train_or_val, color_scale, highlight_id, scatter_text, model):
if update:
model = self._get_model(model)
X, y = self._get_X_y(train_or_val)
if scatter_text=='coverage':
def format_cov(row):
return f"coverage={100*row.coverage:.2f}% ({row.n_outputs}/{row.n_inputs})"
scatter_text = model.score_rules(X, y).drop_duplicates(subset=['rule_id'])[['coverage', 'n_inputs', 'n_outputs']].apply(
lambda row: format_cov(row), axis=1).tolist()
elif scatter_text=='accuracy':
scatter_text = model.score_rules(X, y).drop_duplicates(subset=['rule_id'])['accuracy'].apply(lambda x: f"accuracy: {100*x:.2f}%").tolist()
return plot_model_graph(model, X, y, color_scale=color_scale, highlight_id=highlight_id, scatter_text=scatter_text)
raise PreventUpdate
@app.callback(
Output('uploaded-model', 'data'),
Output('upload-div', 'style'),
Input('upload-model', 'contents'),
Input('upload-button', 'n_clicks'),
State('upload-model', 'filename'),
State('upload-div', 'style'),
)
def update_output(contents, n_clicks, filename, style):
trigger = self._get_callback_trigger()
if trigger == 'upload-button':
if not style:
return dash.no_update, dict(display="none")
return dash.no_update, {}
if contents is not None:
import base64
import io
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
if filename.endswith(".yaml"):
try:
model = RuleClassifier.from_yaml(config=decoded.decode('utf-8'))
return model.to_json(), dict(display="none")
except:
pass
elif filename.endswith(".pkl"):
import pickle
try:
model = pickle.loads(decoded)
if isinstance(model, RuleClassifier):
return model.to_json(), dict(display="none")
except:
pass
raise PreventUpdate
@app.callback(
Output('instructions-modal', 'is_open'),
Input('instructions-open-button', 'n_clicks'),
Input('instructions-close-button', 'n_clicks')
)
def toggle_modal(open_clicks, close_clicks):
trigger = self._get_callback_trigger()
if trigger == 'instructions-open-button':
return True
elif trigger == 'instructions-close-button':
return False
raise PreventUpdate
def run(self, debug=False):
self.app.run_server(port=self.port)#, use_reloader=False, debug=debug) | /rule_estimator-0.4.1-py3-none-any.whl/rule_estimator/dashboard.py | 0.891369 | 0.514156 | dashboard.py | pypi |
__all__ = [
'CaseWhen',
'EmptyRule',
'PredictionRule',
'IsInRule',
'GreaterThan',
'GreaterEqualThan',
'LesserThan',
'LesserEqualThan',
'RangeRule',
'MultiRange',
'MultiRangeAny'
]
from typing import Union, List, Dict, Tuple
import numpy as np
import pandas as pd
from igraph import Graph
from .businessrule import BusinessRule, generate_range_mask
class CaseWhen(BusinessRule):
def __init__(self, rules:List[BusinessRule]=None, default=None):
super().__init__()
if rules is None:
self.rules = []
if not isinstance(self.rules, list):
self.rules = [self.rules]
def predict(self, X:pd.DataFrame)->np.ndarray:
y = np.full(len(X), np.nan)
for rule in self.rules:
y[np.isnan(y)] = rule.predict(X[np.isnan(y)])
if not np.isnan(self.default):
y = np.where(np.isnan(y), self.default, y)
return y
def score_rule(self, X:pd.DataFrame, y:Union[pd.Series, np.ndarray],
scores_df:pd.DataFrame=None, is_classifier:bool=False)->pd.DataFrame:
# first predict without filling in the default
if not np.isnan(self.default):
old_default = self.default
self.default = np.nan
y_preds = self.predict(X)
self.default = old_default
else:
y_preds = self.predict(X)
mask = np.invert(np.isnan(y_preds))
scores_df = self._score_rule(y, y_preds, mask,
prediction=np.nan, default=self.default,
scores_df=scores_df, is_classifier=is_classifier)
y_temp = np.full(len(X), np.nan)
for rule in self.rules:
scores_df = rule.score_rule(X[np.isnan(y_temp)], y[np.isnan(y_temp)], scores_df, is_classifier)
y_temp[np.isnan(y_temp)] = rule.predict(X[np.isnan(y_temp)])
return scores_df
def set_rule_id(self, rule_id:int=0)->int:
rule_id = super().set_rule_id(rule_id)
for rule in self.rules:
rule_id = rule.set_rule_id(rule_id)
return rule_id
def get_max_rule_id(self, max_rule_id:int=0)->int:
max_rule_id = super().get_max_rule_id(max_rule_id)
for rule in self.rules:
max_rule_id = rule.get_max_rule_id(max_rule_id)
return max_rule_id
def get_rule(self, rule_id:int)->BusinessRule:
if self._rule_id is not None and self._rule_id == rule_id:
return self
for rule in self.rules:
return_rule = rule.get_rule(rule_id)
if return_rule is not None:
return return_rule
def remove_rule(self, rule_id:int):
if rule_id in [rule._rule_id for rule in self.rules]:
rule = self.get_rule(rule_id)
self.rules = [rule for rule in self.rules if rule._rule_id is not None and rule._rule_id !=rule_id]
self._stored_params['rules'] = self.rules
return rule
else:
for rule in self.rules:
remove = rule.remove_rule(rule_id)
if remove is not None:
break
return remove
def get_rule_input(self, rule_id:int, X:pd.DataFrame, y:Union[pd.Series, np.ndarray]=None
)->Union[pd.DataFrame, Tuple[pd.DataFrame, Union[pd.Series, np.ndarray]]]:
if y is not None:
input_X, input_y = super().get_rule_input(rule_id, X, y)
if input_X is not None:
return input_X, input_y
else:
input_X = super().get_rule_input(rule_id, X)
if input_X is not None:
return input_X
y_temp = np.full(len(X), np.nan)
for rule in self.rules:
mask = np.isnan(y_temp)
if y is not None:
input_X, input_y = rule.get_rule_input(rule_id, X[mask], y[mask])
if input_X is not None:
return input_X, input_y
else:
input_X = rule.get_rule_input(rule_id, X[mask])
if input_X is not None:
return input_X
y_temp[mask] = rule.predict(X[mask])
if y is not None:
return None, None
else:
return None
def get_rule_leftover(self, rule_id:int, X:pd.DataFrame, y:Union[pd.Series, np.ndarray]=None
)->Union[pd.DataFrame, Tuple[pd.DataFrame, Union[pd.Series, np.ndarray]]]:
if self._rule_id is not None and self._rule_id == rule_id:
# first predict without filling in the default to get the mask
old_default = self.default
self.default = np.nan
y_preds = self.predict(X)
self.default = old_default
mask = np.isnan(y_preds)
if y is not None:
return X[mask], y[mask]
else:
return X[mask]
y_temp = np.full(len(X), np.nan)
for rule in self.rules:
mask = np.isnan(y_temp)
if y is not None:
leftover_X, leftover_y = rule.get_rule_leftover(rule_id, X[mask], y[mask])
if leftover_X is not None:
return leftover_X, leftover_y
else:
leftover_X = rule.get_rule_leftover(rule_id, X[mask])
if leftover_X is not None:
return leftover_X
y_temp[mask] = rule.predict(X[mask])
if y is not None:
return None, None
else:
return None
def append_rule(self, new_rule:BusinessRule, rule_id:int=None)->None:
if rule_id is not None:
rule_ids = [rule._rule_id for rule in self.rules]
if rule_id in rule_ids:
self.rules.insert(rule_ids.index(rule_id)+1, new_rule)
else:
raise ValueError(f"rule_id {rule_id} can not be found in this CaseWhen!")
else:
self.rules.append(new_rule)
self._stored_params['rules'] = self.rules
def replace_rule(self, rule_id:int, new_rule:BusinessRule)->None:
replace_rule = super().replace_rule(rule_id, new_rule)
if hasattr(self, "rules"):
for rule in self.rules:
if replace_rule is None:
replace_rule = rule.replace_rule(rule_id, new_rule)
return replace_rule
def get_rule_params(self, rule_id:int)->dict:
if self._rule_id is not None and self._rule_id == rule_id:
return self.get_params()
for rule in self.rules:
params = rule.get_rule_params(rule_id)
if params is not None:
return params
def set_rule_params(self, rule_id:int, **params)->None:
super().set_rule_params(rule_id, **params)
for rule in self.rules:
rule.set_rule_params(rule_id, **params)
def _get_casewhens(self, casewhens:dict=None):
if self._rule_id is not None:
if casewhens is None:
casewhens = {self._rule_id:[rule._rule_id for rule in self.rules]}
else:
casewhens[self._rule_id] = [rule._rule_id for rule in self.rules]
for rule in self.rules:
casewhens = rule._get_casewhens(casewhens)
return casewhens
def _get_binarynodes(self, binarynodes:dict=None):
binarynodes = super()._get_binarynodes(binarynodes)
for rule in self.rules:
binarynodes = rule._get_binarynodes(binarynodes)
return binarynodes
def add_to_igraph(self, graph:Graph=None)->Graph:
graph = super().add_to_igraph(graph)
previous_rule_id = self._rule_id
for rule in self.rules:
graph = rule.add_to_igraph(graph)
if self._rule_id is not None and rule._rule_id is not None:
graph.add_edge(previous_rule_id, rule._rule_id, casewhen=True)
previous_rule_id = rule._rule_id
return graph
def __rulerepr__(self)->str:
return "CaseWhen"
class EmptyRule(BusinessRule):
def __init__(self):
super().__init__()
def __rule__(self, X):
return pd.Series(np.full(len(X), False))
def __rulerepr__(self):
return f"EmptyRule"
class PredictionRule(BusinessRule):
def __init__(self, prediction=None):
super().__init__()
def __rule__(self, X):
return pd.Series(np.full(len(X), True))
def __rulerepr__(self):
return f"All remaining predict {self.prediction}"
class IsInRule(BusinessRule):
def __init__(self, col:str, cats:List[str], prediction:Union[float, int], default=None):
super().__init__()
if not isinstance(self.cats, list):
self.cats = [self.cats]
def __rule__(self, X:pd.DataFrame):
return X[self.col].isin(self.cats)
def __rulerepr__(self):
return f"If {self.col} in {self.cats} then predict {self.prediction}"
class GreaterThan(BusinessRule):
def __init__(self, col:str, cutoff:float, prediction:Union[float, int], default=None):
super().__init__()
def __rule__(self, X:pd.DataFrame):
return X[self.col] > self.cutoff
def __rulerepr__(self):
return f"If {self.col} > {self.cutoff} then predict {self.prediction}"
class GreaterEqualThan(BusinessRule):
def __init__(self, col:str, cutoff:float, prediction:Union[float, int], default=None):
super().__init__()
def __rule__(self, X:pd.DataFrame):
return X[self.col] >= self.cutoff
def __rulerepr__(self):
return f"If {self.col} >= {self.cutoff} then predict {self.prediction}"
class LesserThan(BusinessRule):
def __init__(self, col:str, cutoff:float, prediction:Union[float, int], default=None):
super().__init__()
def __rule__(self, X:pd.DataFrame):
return X[self.col] < self.cutoff
def __rulerepr__(self):
return f"If {self.col} < {self.cutoff} then predict {self.prediction}"
class LesserEqualThan(BusinessRule):
def __init__(self, col:str, cutoff:float, prediction:Union[float, int], default=None):
super().__init__()
def __rule__(self, X:pd.DataFrame):
return X[self.col] <= self.cutoff
def __rulerepr__(self):
return f"If {self.col} <= {self.cutoff} then predict {self.prediction}"
class RangeRule(BusinessRule):
def __init__(self, col:str, min:float, max:float, prediction:Union[float, int], default=None):
super().__init__()
def __rule__(self, X:pd.DataFrame):
return (X[self.col] >= self.min) & (X[self.col] <= self.max)
def __rulerepr__(self):
return f"If {self.min} <= {self.col} <= {self.max} then predict {self.prediction}"
class MultiRange(BusinessRule):
def __init__(self, range_dict, prediction, default=None):
"""
Predicts prediction if all range conditions hold for all cols in
range_dict. range_dict can contain multiple ranges per col.
range_dict should be of the format
```
range_dict = {
'petal length (cm)': [[4.1, 4.7], [5.2, 7.5]],
'petal width (cm)': [1.6, 2.6]
}
```
"""
super().__init__()
def __rule__(self, X):
return generate_range_mask(self.range_dict, X, kind='all')
def __rulerepr__(self):
return ("If "
+ " AND ".join([f"{k} in {v}" for k, v in self.range_dict.items()])
+ f" then predict {self.prediction}")
class MultiRangeAny(BusinessRule):
def __init__(self, range_dict, prediction, default=None):
"""
Predicts prediction if any range conditions hold for any cols in
range_dict. range_dict can contain multiple ranges per col.
range_dict should be of the format
```
range_dict = {
'petal length (cm)': [[4.1, 4.7], [5.2, 7.5]],
'petal width (cm)': [1.6, 2.6]
}
```
"""
super().__init__()
def __rule__(self, X):
return generate_range_mask(self.range_dict, X, kind='any')
def __rulerepr__(self):
return ("If " + " OR ".join([f"{k} in {v}" for k, v in self.range_dict.items()])
+ f" then predict {self.prediction}") | /rule_estimator-0.4.1-py3-none-any.whl/rule_estimator/rules.py | 0.740831 | 0.268695 | rules.py | pypi |
__all__ = [
'plot_model_graph',
'plot_label_pie',
'plot_parallel_coordinates',
'plot_density',
'plot_cats_density',
'plot_confusion_matrix',
'get_metrics_df',
'get_coverage_df',
]
from typing import List, Tuple, Union
import numpy as np
import pandas as pd
from pandas.api.types import is_numeric_dtype
import plotly.graph_objects as go
import plotly.express as px
import plotly.figure_factory as ff
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, confusion_matrix
empty_fig = go.Figure()
empty_fig.update_layout(
xaxis = { "visible": False },
yaxis = { "visible": False },
annotations = [{
"text": "No data!<br>Try setting after=False or selecting 'replace' instead of 'append'",
"xref": "paper",
"yref": "paper",
"showarrow": False,
"font": {"size": 14}
}])
def plot_model_graph(model, X:pd.DataFrame=None, y:pd.Series=None,
color_scale:str='absolute', highlight_id:int=None, scatter_text='name'):
"""
Returns a plotly Figure of the rules. Uses the Reingolf-Tilford algorithm
to generate the tree layout.
Args:
model
X (pd.DataFrame, optional): input dataframe. If you pass both X and y
then plot scatter markers will be colored by accuracy.
y (pd.Series, optional): input labels. If you pass both X and y
then plot scatter markers will be colored by accuracy.
color_scale (str, {'absolute', 'relative'}): If 'absolute' scale
the marker color from 0 to 1, which means that if all accuracies
are relatively high, all the markers will look grean. If 'relative'
markers color are scaled from lowest to highest.
highlight_id (int, optional): node to highlight in the graph
scatter_text (str, list, {'name', 'description'}): display the name
(.e.g. 'PredictionRule') or the description (e.g. 'Always predict 1')
next to the scatter markers. If you provide a list of str, with
the same length as the nodes, these will be displayed instead.
"""
graph = model.get_igraph()
layout = graph.layout_reingold_tilford(mode="in", root=0)
nodes_x = [6*pos[0] for pos in layout]
nodes_y = [pos[1] for pos in layout]
#find the max Y in order to invert the graph:
nodes_y = [2*max(nodes_y)-y for y in nodes_y]
casewhen_x, casewhen_y = [], []
for edge in graph.es.select(casewhen=True):
casewhen_x += [nodes_x[edge.tuple[0]], nodes_x[edge.tuple[1]], None]
casewhen_y += [nodes_y[edge.tuple[0]], nodes_y[edge.tuple[1]], None]
node_true_x, node_true_y = [], []
for edge in graph.es.select(binary_node="if_true"):
node_true_x += [nodes_x[edge.tuple[0]], nodes_x[edge.tuple[1]], None]
node_true_y += [nodes_y[edge.tuple[0]], nodes_y[edge.tuple[1]], None]
node_false_x, node_false_y = [], []
for edge in graph.es.select(binary_node="if_false"):
node_false_x += [nodes_x[edge.tuple[0]], nodes_x[edge.tuple[1]], None]
node_false_y += [nodes_y[edge.tuple[0]], nodes_y[edge.tuple[1]], None]
if X is not None and y is not None:
rule_scores_df = model.score_rules(X, y).drop_duplicates(subset=['rule_id'], keep='first')
rule_accuracy = rule_scores_df['accuracy'].values
cmin = 0 if color_scale == 'absolute' else rule_scores_df['accuracy'].dropna().min()
cmax = 1 if color_scale == 'absolute' else rule_scores_df['accuracy'].dropna().max()
if cmin == cmax:
cmin, cmax = 0, 1
hovertext=[f"rule:{id}<br>"
f"{descr}<br>"
f"Prediction:{pred}<br>"
f"Accuracy:{acc:.2f}<br>"
f"Coverage:{cov:.2f}<br>"
f"n_inputs:{input}<br>"
f"n_outputs:{output}<br>"
for id, descr, pred, acc, cov, input, output in zip(
graph.vs['rule_id'],
graph.vs['description'],
rule_scores_df['prediction'].values,
rule_scores_df['accuracy'].values,
rule_scores_df['coverage'].values,
rule_scores_df['n_inputs'].values,
rule_scores_df['n_outputs'].values)]
else:
rule_accuracy = np.full(len(layout), np.nan)
cmin, cmax = 0, 1
hovertext = [f"rule:{id}<br>"
f"{descr}<br>"
for id, descr in zip(
graph.vs['rule_id'],
graph.vs['description'])]
fig = go.Figure()
if highlight_id is not None:
fig.add_trace(go.Scatter(
x=[nodes_x[highlight_id]],
y=[nodes_y[highlight_id]],
mode='markers',
name='highlight',
hoverinfo='none',
marker=dict(
symbol='circle',
size=40,
color='purple',
opacity=0.5,
line=dict(width=3, color='violet'),
),
))
fig.add_trace(go.Scatter(
x=casewhen_x,
y=casewhen_y,
mode='lines',
name='connections',
line=dict(color='rgb(210,210,210)', width=2, dash='dot'),
hoverinfo='none'
))
fig.add_trace(go.Scatter(
x=node_true_x,
y=node_true_y,
mode='lines',
name='connections',
text=[None, "if true", None] * int(len(node_true_x)/3),
line=dict(color='rgb(210,210,210)', width=2, dash='dash'),
hoverinfo='none'
))
for (start_x, end_x, none), (start_y, end_y, none) in zip(zip(*[iter(node_true_x)]*3), zip(*[iter(node_true_y)]*3)):
fig.add_annotation(x=(end_x+start_x)/2, y=(end_y+start_y)/2, text="true", showarrow=False)
fig.add_trace(go.Scatter(
x=node_false_x,
y=node_false_y,
mode='lines',
name='connections',
text=[None, "if false", None] * int(len(node_true_x)/3),
line=dict(color='rgb(210,210,210)', width=2, dash='dash'),
hoverinfo='none'
))
for (start_x, end_x, none), (start_y, end_y, none) in zip(zip(*[iter(node_false_x)]*3), zip(*[iter(node_false_y)]*3)):
fig.add_annotation(x=(end_x+start_x)/2, y=(end_y+start_y)/2, text="false", showarrow=False)
if isinstance(scatter_text, str) and scatter_text == 'name':
scatter_text = graph.vs['name']
elif isinstance(scatter_text, str) and scatter_text == 'description':
scatter_text = graph.vs['description']
elif isinstance(scatter_text, list) and len(scatter_text) == len(nodes_x):
pass
else:
raise ValueError(f"ERROR: scatter_text should either be 'name', 'description' "
f"or a list of str of the right length, but you passed {scatter_text}!")
fig.add_trace(go.Scatter(
x=nodes_x,
y=nodes_y,
mode='markers+text',
name='nodes',
marker=dict(symbol='circle',
size=18,
color=rule_accuracy, #scores_df.accuracy.values,#'#6175c1',
colorscale="temps",
reversescale=True,
cmin=cmin,cmax=cmax,
line=dict(color='rgb(50,50,50)', width=1),
),
text=[f"{id}: {desc}" for id, desc in zip(graph.vs['rule_id'], scatter_text)],
textposition="top right",
hovertemplate = "<b>%{hovertext}</b>",
hovertext=hovertext,
opacity=0.8
))
fig.update_layout(showlegend=False, dragmode='pan', margin=dict(b=0, t=0, l=0, r=0))
fig.update_xaxes(visible=False, range=(min(nodes_x)-4, max(nodes_x)+4))
fig.update_yaxes(visible=False)
return fig
def plot_label_pie(model, X:pd.DataFrame, y:np.ndarray, rule_id:int=None, after=False,
size=120, margin=0, showlegend=False):
if rule_id is not None:
X, y = model.get_rule_input(rule_id, X, y, after)
if X.empty:
fig = go.Figure(go.Pie(values=[1.0], showlegend=False, marker=dict(colors=['grey'])))
else:
y_vc = y.value_counts().sort_index()
labels = [str(round(100*lab/len(y), 1))+'%' if lab==y_vc.max() else " " for lab in y_vc]
fig = go.Figure(
go.Pie(
labels=labels,
values=y_vc.values,
marker=dict(colors=[px.colors.qualitative.Plotly[i] for i in y_vc.index]),
sort=False,
insidetextorientation='horizontal',
))
fig.update_layout(showlegend=showlegend, width=size, height=size)
fig.update_layout(margin=dict(t=margin, b=margin, l=margin, r=margin))
fig.update_layout(uniformtext_minsize=6, uniformtext_mode='hide')
fig.update_traces(textinfo='none', hoverinfo='percent', textposition='inside')
return fig
def plot_parallel_coordinates(model, X:pd.DataFrame, y:np.ndarray, rule_id:int=None,
cols:List[str]=None, labels=None, after=False,
ymin=None, ymax=None):
"""generate parallel coordinates plot for data X, y. If rule_id is specified
then only use data that reaches the rule with rule_id. You can select
a sublist of columns by passing a list of cols.
Args:
X (pd.DataFrame): input
y (np.ndarray): labels
rule_id (int, optional): find the rule_id's with estimator.describe(). Defaults to None.
cols (List[str], optional): List of columns to display. Defaults to None.
Raises:
ImportError: If you don't have plotly installed, raises import error.
Returns:
plotly.graph_objs.Figure
"""
def encode_col(X, col):
if is_numeric_dtype(X[col]):
return dict(label=col, values=X[col])
else:
col_df = pd.DataFrame({col:reversed(y.groupby(X[col]).mean().index)})
index_range = [0, len(col_df)-1] if len(col_df) > 1 else [0,1]
return dict(range=index_range,
tickvals = col_df.index.tolist(), ticktext = col_df[col].tolist(),
label=col, values=X[col].replace(dict(zip(col_df[col], col_df.index))).values)
if labels is None:
labels = [str(i) for i in range(y.nunique())]
if rule_id is not None:
X, y = model.get_rule_input(rule_id, X, y, after)
if cols is None:
cols = X.columns.tolist()
if X.empty:
return empty_fig
ymax = ymax if ymax is not None else y.max()
ymin = ymin if ymin is not None else y.min()
colors = px.colors.qualitative.Plotly
colorscale = []
for a, b in enumerate(np.linspace(0.0, 1.0, int(ymax)+2, endpoint=True)):
if b<0.01:
colorscale.append((b, colors[a]))
elif b > 0.01 and b < 0.99:
colorscale.append((b, colors[a-1]))
colorscale.append((b, colors[a]))
else:
colorscale.append((b, colors[a-1]))
dimensions = [encode_col(X, col) for col in cols]
dimensions.append(dict(range=[0, len(labels)-1],
tickvals = list(range(len(labels))), ticktext = labels,
label="y", values=y))
fig = go.Figure(data=
go.Parcoords(
line = dict(color=y,
cmin=ymin,
cmax=ymax,
colorscale=colorscale,
colorbar=dict(tickvals = list(range(len(labels))), ticktext = labels),
showscale=True),
dimensions = dimensions
)
)
return fig
def plot_density(model, X, y, col, rule_id=0, after=False, labels=None, cutoff=None):
if labels is None:
labels = [str(i) for i in range(y.nunique())]
if rule_id is not None:
X, y = model.get_rule_input(rule_id, X, y, after)
if X.empty:
return empty_fig
hist_data = [X[y==label][col] for label in y.unique()]
labels = [labels[label] for label in y.unique()]
colors = [px.colors.qualitative.Plotly[label] for label in y.unique()]
show_curve = True if len(X) > 10 else False
try:
fig = ff.create_distplot(hist_data, labels, show_rug=False, colors=colors, show_curve=show_curve)
except:
fig = ff.create_distplot(hist_data, labels, show_rug=False, colors=colors, show_curve=False)
fig.update_layout(title_text=col, legend=dict(orientation="h"))
if isinstance(cutoff, list):
fig.add_vrect(
x0=cutoff[0], x1=cutoff[1],
fillcolor="LightSkyBlue", opacity=0.8,
layer="below", line_width=0,
)
elif cutoff is not None:
fig.add_vline(cutoff)
return fig
def plot_cats_density(model, X:pd.DataFrame, y:pd.Series, col:str,
rule_id:int=0, after:bool=False, labels:List[str]=None,
percentage:bool=False, highlights:List=None)->go.Figure:
if labels is None:
labels = [str(i) for i in range(y.nunique())]
if rule_id is not None:
X, y = model.get_rule_input(rule_id, X, y, after)
if X.empty:
return empty_fig
assert not is_numeric_dtype(X[col])
fig = go.Figure()
cats = y.groupby(X[col]).mean().index.tolist()
if highlights is None:
highlights = []
line_widths = [4 if cat in highlights else 0 for cat in cats]
for label in y.unique():
if percentage:
y_vals = [len(y[(X[col]==cat) & (y==label)])/len(y[(X[col]==cat)]) for cat in cats]
else:
y_vals = [len(y[(X[col]==cat) & (y==label)]) for cat in cats]
fig.add_trace(go.Bar(
x=cats,
y=y_vals,
name=labels[label],
marker_color=px.colors.qualitative.Plotly[label]),
)
fig.update_layout(title=col, barmode='stack', legend=dict(orientation="h"))
for bar in fig.data:
bar.marker.line.color = 'darkmagenta'
bar.marker.line.width = line_widths
return fig
def plot_confusion_matrix(model, X:pd.DataFrame, y:pd.Series, rule_id:int=0, after:bool=False,
rule_only=False, labels=None, percentage=True, normalize='all'):
if rule_id is not None:
X, y = model.get_rule_input(rule_id, X, y, after)
if rule_only:
rule = model.get_rule(rule_id)
y_pred = rule.predict(X)
else:
y_pred = model.predict(X)
if (~np.isnan(y_pred)).sum() == 0:
cm = np.array([[0, 0], [0,0]])
else:
cm = confusion_matrix(y[~np.isnan(y_pred)], y_pred[~np.isnan(y_pred)])
if normalize not in ['observed', 'pred', 'all']:
raise ValueError("Error! parameters normalize must be one of {'observed', 'pred', 'all'} !")
with np.errstate(all='ignore'):
if normalize == 'all':
cm_normalized = np.round(100*cm / cm.sum(), 1)
elif normalize == 'observed':
cm_normalized = np.round(100*cm / cm.sum(axis=1, keepdims=True), 1)
elif normalize == 'pred':
cm_normalized = np.round(100*cm / cm.sum(axis=0, keepdims=True), 1)
cm_normalized = np.nan_to_num(cm_normalized)
if labels is None:
labels = [str(i) for i in range(cm.shape[0])]
zmax = 130 # to keep the text readable at 100% accuracy
data=[go.Heatmap(
z=cm_normalized,
x=[f" {lab}" for lab in labels],
y=[f" {lab}" for lab in labels],
hoverinfo="skip",
zmin=0, zmax=zmax, colorscale='Blues',
showscale=False,
)]
layout = go.Layout(
title="Confusion Matrix",
xaxis=dict(title='predicted',
constrain="domain",
tickmode = 'array',
showgrid = False,
tickvals = [f" {lab}" for lab in labels],
ticktext = [f" {lab}" for lab in labels]),
yaxis=dict(title=dict(text='observed',standoff=20),
autorange="reversed",
side='left',
scaleanchor='x',
scaleratio=1,
showgrid = False,
tickmode = 'array',
tickvals = [f" {lab}" for lab in labels],
ticktext = [f" {lab}" for lab in labels]),
plot_bgcolor = '#fff',
)
fig = go.Figure(data, layout)
annotations = []
for x in range(cm.shape[0]):
for y in range(cm.shape[1]):
top_text = f"{cm_normalized[x, y]}%" if percentage else f"{cm[x, y]}"
bottom_text = f"{cm_normalized[x, y]}%" if not percentage else f"{cm[x, y]}"
annotations.extend([
go.layout.Annotation(
x=fig.data[0].x[y],
y=fig.data[0].y[x],
text=top_text,
showarrow=False,
font=dict(size=20)
),
go.layout.Annotation(
x=fig.data[0].x[y],
y=fig.data[0].y[x],
text=f" <br> <br> <br>({bottom_text})",
showarrow=False,
font=dict(size=12)
)]
)
longest_label = max([len(label) for label in labels])
fig.update_layout(annotations=annotations)
fig.update_layout(margin=dict(t=40, b=40, l=longest_label*7, r=40))
return fig
def get_coverage_df(model, X:pd.DataFrame, y:pd.Series, rule_id:int=0, after:bool=False,
rule_only=False, labels=None, percentage=True, normalize='all'):
if rule_id is not None:
X, y = model.get_rule_input(rule_id, X, y, after)
if rule_only:
rule = model.get_rule(rule_id)
y_pred = rule.predict(X)
else:
y_pred = model.predict(X)
y_true, y_pred = y[~np.isnan(y_pred)], y_pred[~np.isnan(y_pred)]
coverage_dict = dict(
n_input = len(X),
predicted = len(y_pred),
predicted_nan = len(X)-len(y_pred),
coverage = round(len(y_pred)/len(X), 3),
)
coverage_df = (pd.DataFrame(coverage_dict, index=["count"])
.T.rename_axis(index="coverage").reset_index())
return coverage_df
def get_metrics_df(model, X:pd.DataFrame, y:pd.Series, rule_id:int=0, after:bool=False,
rule_only=False, labels=None, percentage=True, normalize='all'):
if rule_id is not None:
X, y = model.get_rule_input(rule_id, X, y, after)
if rule_only:
rule = model.get_rule(rule_id)
y_pred = rule.predict(X)
else:
y_pred = model.predict(X)
y_true, y_pred = y[~np.isnan(y_pred)], y_pred[~np.isnan(y_pred)]
n_input = len(X)
predicted = len(y_pred)
not_predicted = len(X)-predicted
if len(y_true) > 0:
metrics_dict = {
'accuracy' : accuracy_score(y_true, y_pred),
'precision' : precision_score(y_true, y_pred, zero_division=0),
'recall' : recall_score(y_true, y_pred),
'f1' : f1_score(y_true, y_pred),
}
else:
metrics_dict = dict(accuracy=np.nan, precision=np.nan, recall=np.nan, f1=np.nan)
metrics_df = (pd.DataFrame(metrics_dict, index=["score"])
.T.rename_axis(index="metric").reset_index()
.round(3))
return metrics_df | /rule_estimator-0.4.1-py3-none-any.whl/rule_estimator/plotting.py | 0.878627 | 0.487734 | plotting.py | pypi |
__all__ = ['BusinessRule']
from typing import Union, List, Dict, Tuple
from pathlib import Path
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
from sklearn.metrics import accuracy_score, mean_squared_error
from igraph import Graph
from .storable import Storable
def generate_range_mask(range_dict:dict, X:pd.DataFrame, kind:str='all')->pd.Series:
"""generates boolean mask for X based on range_dict dictionary.
range_dict should be of the format
```
range_dict = {
'petal length (cm)': [[4.1, 4.7], [5.2, 7.5]],
'petal width (cm)': [1.6, 2.6]
}
```
Can also be categorical:
```
range_dict = {
'gender': ['male']
}
```
Args:
range_dict (dict): dictionary describing which ranges for which columns should be evaluated
X (pd.DataFrame): input dataframe
kind (str, optional): Only return True if range conditions hold for all cols ('all')
or if range conditions hold for any column ('any'). Defaults to 'all'.
Returns:
pd.Series: boolean mask
"""
def get_mask(X, col, col_range):
if isinstance(col_range[0], str):
return X[col].isin(col_range)
else:
return (X[col] > col_range[0]) & (X[col] < col_range[1])
def AND_masks(masks):
for i, mask in enumerate(masks):
combined_mask = mask if i == 0 else combined_mask & mask # noqa: F82
return combined_mask
def OR_masks(masks):
for i, mask in enumerate(masks):
combined_mask = mask if i == 0 else combined_mask | mask # noqa: F82
return combined_mask
def generate_range_mask(X, col, col_range):
if isinstance(col_range[0], list):
return OR_masks([get_mask(X, col, cr) for cr in col_range])
return get_mask(X, col, col_range)
if not range_dict:
return pd.Series(np.full(len(X), False), index=X.index)
if kind == 'all':
return AND_masks([generate_range_mask(X, col, col_range) for col, col_range in range_dict.items()])
elif kind == 'any':
return OR_masks([generate_range_mask(X, col, col_range) for col, col_range in range_dict.items()])
else:
raise ValueError("ValueError! Only kind='all' and kind='any' are supported!")
class BusinessRule(BaseEstimator, Storable):
def __init__(self, prediction=None, default=None):
self._store_child_params(level=2)
if not hasattr(self, "prediction"):
self.prediction = prediction
if not hasattr(self, "default"):
self.default = default
if self.prediction is None:
self.prediction = np.nan
if self.default is None:
self.default = np.nan
self._rule_id = None
def fit(self, X:pd.DataFrame, y:Union[pd.Series, np.ndarray]=None):
pass
def predict(self, X:pd.DataFrame)->np.ndarray:
assert hasattr(self, "__rule__"), "You need to implement the __rule__ method first!"
return np.where(self.__rule__(X), self.prediction, self.default)
def _score_rule(self, y, y_preds, mask, prediction, default,
scores_df=None, is_classifier=False)->pd.DataFrame:
if scores_df is None:
scores_df = pd.DataFrame(columns=[
'rule_id', 'name','description', 'prediction',
'n_inputs', 'n_outputs','coverage',
'accuracy' if is_classifier else 'rmse'
])
score_dict = dict(
rule_id=self._rule_id, name=self.__class__.__name__,
description=self.__rulerepr__(),
prediction = prediction,
n_inputs=len(y), n_outputs=mask.sum(),
coverage = mask.mean() if len(mask)>0 else np.nan,
)
if is_classifier:
if len(y[mask]) > 0:
score_dict['accuracy'] = accuracy_score(y[mask], y_preds[mask])
else:
score_dict['accuracy'] = np.nan
else:
if len(y[mask]) > 0:
score_dict['rmse'] = mean_squared_error(y[mask], y_preds[mask], squared=False)
else:
score_dict['rmse'] = np.nan
scores_df = scores_df.append(score_dict, ignore_index=True)
if not np.isnan(default):
default_score_dict = dict(
rule_id=self._rule_id,
name=chr(int("21B3", 16)), #self.__class__.__name__,
description=f"default: predict {self.default}",
prediction=default,
n_inputs=len(y), n_outputs=np.invert(mask).sum(),
coverage = np.invert(mask).mean() if len(mask)>0 else np.nan,
)
if is_classifier:
if np.invert(mask).sum() > 0:
default_score_dict['accuracy'] = accuracy_score(
y[~mask], np.full(np.invert(mask).sum(), default))
else:
default_score_dict['accuracy'] = np.nan
else:
if np.invert(mask).sum() > 0:
default_score_dict['rmse'] = mean_squared_error(
y[~mask], np.full(np.invert(mask).sum(), default), squared=False)
else:
default_score_dict['rmse'] = np.nan
scores_df = scores_df.append(default_score_dict, ignore_index=True)
return scores_df
def score_rule(self, X:pd.DataFrame, y:Union[np.ndarray, pd.Series],
scores_df:pd.DataFrame=None, is_classifier:bool=False)->pd.DataFrame:
mask = pd.Series(self.__rule__(X)).values
y_preds = self.predict(X)
return self._score_rule(y, y_preds, mask,
self.prediction, self.default,
scores_df, is_classifier)
def set_rule_id(self, rule_id:int=0)->int:
self._rule_id = rule_id
return rule_id+1
def get_max_rule_id(self, max_rule_id:int=0)->int:
if self._rule_id is not None and self._rule_id > max_rule_id:
return self._rule_id
return max_rule_id
def get_rule(self, rule_id:int):
if self._rule_id is not None and self._rule_id == rule_id:
return self
def replace_rule(self, rule_id:int, new_rule)->None:
if self._rule_id is not None and self._rule_id == rule_id:
assert isinstance(new_rule, BusinessRule)
self.__class__ = new_rule.__class__
self.__dict__ = new_rule.__dict__
return self
def remove_rule(self, rule_id:int):
return None
def get_rule_params(self, rule_id:int)->dict:
if self._rule_id is not None and self._rule_id == rule_id:
return self.get_params()
def set_rule_params(self, rule_id:int, **params)->None:
if self._rule_id is not None and self._rule_id == rule_id:
self.set_params(**params)
def get_rule_input(self, rule_id:int, X:pd.DataFrame, y:Union[pd.Series, np.ndarray]=None
)->Union[pd.DataFrame, Tuple[pd.DataFrame, Union[pd.Series, np.ndarray]]]:
if self._rule_id is not None and self._rule_id == rule_id:
if y is not None:
return X, y
else:
return X
if y is not None:
return None, None
else:
return None
def get_rule_leftover(self, rule_id:int, X:pd.DataFrame, y:Union[pd.Series, np.ndarray]=None
)->Union[pd.DataFrame, Tuple[pd.DataFrame, Union[pd.Series, np.ndarray]]]:
if self._rule_id is not None and self._rule_id == rule_id:
mask = np.invert(pd.Series(self.__rule__(X)).values)
if y is not None:
return X[mask], y[mask]
else:
return X[mask]
if y is not None:
return None, None
else:
return None
def get_params(self, deep:bool=True)->dict:
""" """
return self._stored_params
def set_params(self, **params)->None:
""" """
for k, v in params.items():
if k in self._stored_params:
self._stored_params[k] = v
setattr(self, k, v)
def _get_casewhens(self, casewhens:dict=None):
if casewhens is None:
return {}
else:
return casewhens
def _get_binarynodes(self, binarynodes:dict=None):
if binarynodes is None:
return {}
else:
return binarynodes
def add_to_igraph(self, graph:Graph=None)->Graph:
if graph is None:
graph = Graph()
graph.vs.set_attribute_values('rule_id', [])
graph.vs.set_attribute_values('name', [])
graph.vs.set_attribute_values('description', [])
graph.vs.set_attribute_values('rule', [])
graph.es.set_attribute_values('casewhen', [])
graph.es.set_attribute_values('binary_node', [])
graph.add_vertex(
rule_id=self._rule_id,
name=self.__class__.__name__,
description=self.__rulerepr__(),
rule=self
)
return graph
def __rulerepr__(self)->str:
return "BusinessRule"
def to_yaml(self, filepath:Union[Path, str]=None, return_dict:bool=False):
"""Store object to a yaml format.
Args:
filepath: file where to store the .yaml file. If None then just return the
yaml as a str.
return_dict: instead of return a yaml str, return the raw dict.
"""
return super().to_yaml(filepath, return_dict, comment=self.describe()) | /rule_estimator-0.4.1-py3-none-any.whl/rule_estimator/businessrule.py | 0.893193 | 0.642573 | businessrule.py | pypi |
import math
from typing import Dict, List, Tuple
import numpy as np
from pandas import DataFrame
def support(subset: List[str], data_df: DataFrame) -> float:
"""Calculates the support for a given itemset over all transactions.
Args:
subset (List[str]): List containing a candidate itemset
data_df (DataFrame): Contains all itemsets
Returns:
float: Support for the itemset
"""
numberTransactions = len(data_df)
itemset_count = data_df.loc[:, subset].all(axis=1).sum()
return itemset_count / numberTransactions
def get_frequent_1_itemsets(
items: np.ndarray, transactions: DataFrame, support_threshold: float
) -> Dict[Tuple[str], float]:
"""Calculates all frequent 1 itemsets and returns them aswell as their support.
Args:
items (np.ndarray): Numpy array of all items
transactions (DataFrame): The set of all transactions
support_threshold (float): Support threshold
Returns:
Dict[Tuple[str], float]: Frequent 1 itemsets and their support
"""
frequent_1_item_sets = {}
for item in items:
supp = support([item], transactions)
if support_threshold <= supp:
frequent_1_item_sets[(item,)] = supp
return frequent_1_item_sets
def lift(supp_antecedent: float, supp_consequent: float, supp_union: float) -> float:
if supp_antecedent * supp_consequent == 0:
return float("inf")
return supp_union / (supp_antecedent * supp_consequent)
def cosine(supp_antecedent: float, supp_consequent: float, supp_union: float) -> float:
if supp_antecedent * supp_consequent == 0:
return float("inf")
return supp_union / math.sqrt(supp_antecedent * supp_consequent)
def independent_cosine(supp_antecedent: float, supp_consequent: float) -> float:
return math.sqrt(supp_consequent * supp_antecedent)
def imbalance_ratio(supp_antecedent: float, supp_consequent: float, supp_union: float) -> float:
if (supp_antecedent + supp_consequent - supp_union) == 0:
return 0
return abs(supp_antecedent - supp_consequent) / (supp_antecedent + supp_consequent - supp_union)
def kulczynski(supp_antecedent: float, supp_consequent: float, supp_union: float) -> float:
return 0.5*(confidence(supp_antecedent, supp_union) + confidence(supp_consequent, supp_union))
def confidence(supp_antecedent: float, supp_union: float) -> float:
if supp_antecedent == 0:
return 0
return supp_union / supp_antecedent
def conviction(supp_antecedent: float, supp_consequent: float, supp_union: float) -> float:
denominator = (1-confidence(supp_antecedent, supp_union))
if denominator == 0:
return float("inf")
return (1-supp_consequent) / denominator
def measure_dict(ant_supp: float, con_supp: float, supp: float) -> Dict[str, float]:
return {"cosine": cosine(ant_supp, con_supp, supp), "idependent_cosine": independent_cosine(ant_supp, con_supp),
"lift": lift(ant_supp, con_supp, supp), "conviction": conviction(ant_supp, con_supp, supp),
"imbalance_ratio": imbalance_ratio(ant_supp, con_supp, supp), "kulczynksi": kulczynski(ant_supp, con_supp, supp)} | /rule_mining_algs-0.1.1-py3-none-any.whl/algs/util.py | 0.908873 | 0.715958 | util.py | pypi |
import pkg_resources
import pandas as pd
from mlxtend.preprocessing import TransactionEncoder
def load_store_data() -> pd.DataFrame:
""" Loads the stored_data.csv file and binarizes the data.
Returns:
pd.DataFrame: One-hot encoded store data, where each column
corresponds to an item.
"""
data = []
filename = pkg_resources.resource_filename(
__name__, "datasets/store_data.csv")
with open(filename) as f:
for line in f:
transaction = [item.strip()
for item in line.strip().rstrip().split(",")]
data.append(transaction)
te = TransactionEncoder()
te_ary = te.fit_transform(data)
data_df = pd.DataFrame(te_ary, columns=te.columns_)
return data_df
def load_shroom_data() -> pd.DataFrame:
"""Loads the mushroom dataset and names each column thereby. Further the
eleventh attribute is dropped since it's missing for roughly a quarter of all
instances.
Returns:
pd.DataFrame: DataFrame storing the categorical value for each attribute, with
the stalk-root attribute being dropped.
"""
names = [
"label",
"cap-shape",
"cap-surface",
"cap-color",
"bruises",
"odor",
"gill-attach",
"gill-spacing",
"gill-size",
"gill-color",
"stalk-shape",
"stalk-root",
"stalk-surf-ab-ring",
"stalk-surface-be-ring",
"stalk-color-ab-ring",
"stalk-color-be-ring",
"veil-type",
"veil-color",
"ring-number",
"ring-type",
"spore-print-color",
"habitat",
"population"]
df = pd.read_csv(
pkg_resources.resource_filename(
__name__, "datasets/agaricus-lepiota.data"),
names=names, index_col=False
)
df["id"] = [i for i in range(len(df))]
df.set_index("id", inplace=True)
# Drop the stalk-root attribute since it has unknown values for 2480 instances
df.drop("stalk-root", axis=1, inplace=True)
return df | /rule_mining_algs-0.1.1-py3-none-any.whl/algs/data.py | 0.700178 | 0.419648 | data.py | pypi |
from copy import deepcopy
from math import floor
import random
from typing import Any, Dict, List, Tuple
import numpy as np
import pandas as pd
class Gene:
"""Store the information associated with an individual attribute.
For categorical attributes lower, upper is meaningless same goes for
numerical ones and value.
"""
def __init__(self, name: str, numerical: bool, lower: float, upper: float, value: Any) -> None:
self.name = name
self.numerical = numerical
self.upper = upper
self.lower = lower
self.value = value
def is_numerical(self) -> bool:
return self.numerical
def __repr__(self) -> str:
if not self.numerical:
return f"{self.name}: {self.value}"
else:
return f"{self.name}: [{self.lower}, {self.upper}]"
def __eq__(self, __o: object) -> bool:
if self.numerical and __o.numerical:
return self.lower == __o.lower and self.upper == __o.upper
return self.value == __o.value
class Individuum:
def __init__(self, items: Dict[str, Gene]) -> None:
self.items = items
self.fitness = 0.0
self.coverage = 0
self.marked = 0
def num_attrs(self) -> int:
return len(self.items)
def get_fitness(self) -> float:
return self.fitness
def get_items(self) -> Dict[str, Gene]:
return self.items
def matches(self, record: pd.Series) -> bool:
for name, gene in self.items.items():
val = record[name]
if gene.is_numerical() and (val > gene.upper or val < gene.lower):
return False
elif not gene.is_numerical() and (val != gene.value):
return False
return True
def crossover(self, other: Any, probability: float) -> Tuple[Any, Any]:
"""Performs crossover operator to generate two offsprings from two individuals.
Common genes are inherited by taking one at random with the given probability.
Other genes are inherited by default.
Args:
other (Any): Individual to cross the current individual with
probability (float): Crossover probability
Returns:
Tuple[Any, Any]: Two offsprings resulting from the crossover
"""
other_genes = other.get_items()
genes1 = deepcopy(self.get_items())
genes2 = deepcopy(other_genes)
common_genes = set(genes1).intersection(other_genes)
rand_prob = np.random.rand(len(common_genes))
for name, prob in zip(common_genes, rand_prob):
if prob < probability:
genes1[name] = deepcopy(other_genes[name])
if prob < probability:
genes2[name] = deepcopy(self.get_items()[name])
return (Individuum(genes1), Individuum(genes2))
def mutate(self, db: pd.DataFrame, probability: float) -> None:
"""Mutates randomly selected genes. For numeric genes the interval bounaries
are either increased or deacreased by [0, interval_width/11]. In case of
categorical attributes there's a 25% of changing the attribute to a random
value of the domain.
Args:
db (pd.DataFrame): Database
probability (float): Mutation probability
"""
for gene in self.items.values():
# Mutate in this case
if random.random() < probability:
name = gene.name
if gene.is_numerical():
# Change the upper and lower bound of the interval
lower = db[name].min()
upper = db[name].max()
width_delta = (upper - lower) / 17
delta1 = random.uniform(0, width_delta)
delta2 = random.uniform(0, width_delta)
gene.lower += delta1 * (-1 if random.random() < 0.5 else 1)
gene.upper += delta2 * (-1 if random.random() < 0.5 else 1)
# All this mess ensures that the interval boundaries do not exceed DB [min, max]
gene.lower = max(lower, gene.lower)
gene.upper = min(upper, gene.upper)
if gene.lower > gene.upper:
gene.upper, gene.lower = gene.lower, gene.upper
gene.lower = max(lower, gene.lower)
gene.upper = min(upper, gene.upper)
else:
# Only seldomly change the value of the categorical attribute
gene.value = gene.value if random.random(
) < 0.75 else np.random.choice(db[name].to_numpy())
def get_all_subsets(self) -> List[Any]:
"""Generates all subsets of the current itemset.
Returns:
List[Any]: List of all subsets.
"""
seeds = []
for gene in self.items.values():
seeds = seeds + [Individuum({gene.name: gene})] + \
[Individuum({gene.name: gene}) + ind for ind in seeds]
return seeds
def to_tuple(self) -> Tuple[str, ...]:
"""Converts an individual to a tuple of strings, where each
gene and its values are respected.
Returns:
Tuple[str, ...]: String representation of the individuum
"""
items = []
for gene in self.items.values():
if gene.is_numerical():
items.append(f"{gene.name} = {gene.lower}..{gene.upper}")
else:
items.append(f"{gene.name} = {gene.value}")
return tuple(items)
def __repr__(self) -> str:
return self.items.__repr__()
def __add__(self, other: object) -> object:
self.items.update(other.get_items())
return Individuum(self.items)
def _get_lower_upper_bound(db: pd.DataFrame, num_cat_attrs: Dict[str, bool]) -> Dict[str, Tuple[float, float]]:
"""Determines a dictionary where for all numerical attributes the maximum and minimum value for
the intervals are obtained.
Args:
db (pd.DataFrame): The database storing the domain information.
num_cat_attrs (Dict[str, bool]): Mapping marking categorical and numerical attributes.
Raises:
Exception: When not all attributes in db given in num_cat_attrs, then this exception is raised.
Returns:
Dict[str, Tuple[float, float]]: Mapping from all numerical attributes to their bounding boxes [min,max].
"""
if len(num_cat_attrs) < len(list(db.columns)):
raise Exception(
"Need to specify the type for each attribute in the database.")
interval_boundaries = {}
for name, is_num in num_cat_attrs.items():
if is_num:
min_val = db[name].min()
max_val = db[name].max()
interval_boundaries[name] = (min_val, max_val)
return interval_boundaries
def _generate_first_population(db: pd.DataFrame, population_size: int, interval_boundaries: Dict[str, Tuple[float, float]], set_attribute: str) -> List[Individuum]:
"""Determines an initial population, where each individuum may have 2 to n randomly sampled attributes.
Further to come up with an individuum that is covered by at least one tuple, a random tuple from the db
is sampled. For numeric attributes a random uniform number from 0 to 1/7 of the entire domain is added/
subtracted from the interval boundaries.
Note: There is no specification on how to exactly implement this in 'An Evolutionary Algorithm to Discover
Numeric Association Rules'.
Args:
db (pd.DataFrame): Database to sample initial individuals from.
population_size (int): Number of individuals in the inital population.
interval_boundaries (Dict[str, Tuple[float, float]]): Result of _get_lower_upper_bound
set_attribute (str): Name of attribute that should be included in every itemset.
Returns:
List[Individuum]: Initial population.
"""
individuums = []
for i in range(population_size):
item = {}
items = list(db.columns)
# Add two random attributes and then fill up with a coin toss for each attribute
attrs = random.sample(items, 2)
# If the target attribute is not sampled, removed the second sample
if set_attribute and set_attribute not in attrs:
attrs = attrs[0:1] + [set_attribute]
assert set_attribute in attrs
attrs = [itm for itm in items if itm not in attrs and random.random()
> 0.5] + attrs
row = floor(random.uniform(0, len(db)-1))
register = db.iloc[row]
for column in attrs:
value = register[column]
if interval_boundaries.get(column):
# Add/Subtract at most 1/7th of the entire attribute domain
lower, upper = interval_boundaries[column]
u = floor(random.uniform(0, (upper-lower) / 7))
lower = max(lower, value - u)
upper = min(upper, value + u)
item[column] = Gene(column, True, lower, upper, lower)
else:
value = register[column]
item[column] = Gene(column, False, value, value, value)
individuums.append(Individuum(item))
return individuums
def _process(db: pd.DataFrame, marked_rows: Dict[int, bool], population: List[Individuum]) -> None:
"""Counts the number of records each individual covers aswell as the number of
covered records that are already marked and stores them in the individual.
Args:
db (pd.DataFrame): Database
marked_rows (Dict[int, bool]): Rows that are already covered by some fittest itemset
population (List[Individuum]): Current population
"""
def __match(record: pd.Series) -> None:
for individual in population:
if individual.matches(record):
individual.coverage += 1
individual.marked += 1 if marked_rows[record.name] else 0
for individual in population:
individual.coverage = 0
individual.marked = 0
db.apply(__match, axis=1)
def _amplitude(intervals: Dict[str, Tuple[float, float]], ind: Individuum) -> float:
"""Calculates the average amplitude over all numerical attributes.
Sum over all attributes with (ind.upper - ind.lower) / (attr.upper - attr.lower)
divided by the number of numeric attributes.
Args:
intervals (Dict[str, Tuple[float, float]]): Result of _get_upper_lower_bound
ind (Individuum): Individual whose marked and coverage fields have been set
Returns:
float: The average amplitude used to penalize the fitness.
"""
avg_amp = 0.0
count = 0
for name, gene in ind.get_items().items():
if intervals.get(name):
lower, upper = intervals[name]
avg_amp += (gene.upper - gene.lower) / \
(upper - lower) if upper-lower != 0 else 1
count += 1
return avg_amp / count if count != 0 else 0
def _cross_over(population: List[Individuum], probability: float, number_offspring: int) -> List[Individuum]:
"""Crossover genes of the individuals and produce two offsprings, for each pair of
randomly sampled progenitors.
Args:
population (List[Individuum]): Progenitors that are crossed at random
probability (float): Crossover probability
number_offspring (int): Number of remaining offsprings to generate
Returns:
List[Individuum]: Offspring pair for each crossover event. It has double the size of
the given population.
"""
recombinations = []
for i in range(number_offspring):
progenitors = random.sample(population, k=2)
offspring = progenitors[0].crossover(progenitors[1], probability)
recombinations.extend(offspring)
return recombinations
def _get_fittest(population: List[Individuum], selection_percentage: float) -> List[Individuum]:
"""Determines the selection percentage fittest individuals.
Note: An alternative would be to just take the fittest one and then randomly sample.
Args:
population (List[Individuum]): Individuals of the current generation.
selection_percentage (float): Percentage of how much individuals of the current generation pass on to the next.
Returns:
List[Individuum]: Fittest individuals, Remaining ones being subject to the crossover operator
"""
population.sort(key=lambda x: x.fitness, reverse=True)
fittest = floor(selection_percentage*len(population) + 1)
return population[:fittest]
def _update_marked_records(db: pd.DataFrame, marked_records: Dict[int, bool], chosen: Individuum) -> None:
"""In a postprocessing step, the itemset with the highest fitness is used to mark all the
records in the db, that are covered by the itemset.
Args:
db (pd.DataFrame): Database whose records will be marked
marked_records (Dict[int, bool]): Stores for each record whether its already marked
chosen (Individuum): The fittest itemset of the fully evolved population
"""
def __update_marks(record: pd.Series) -> None:
marked_records[record.name] = chosen.matches(
record) or marked_records[record.name]
db.apply(__update_marks, axis=1)
def gar(db: pd.DataFrame, num_cat_attrs: Dict[str, bool], num_sets: int, num_gens: int, population_size: int,
omega: float, psi: float, mu: float, selection_percentage: float = 0.15, recombination_probability: float = 0.5,
mutation_probability: float = 0.4, set_attribute: str = None) -> pd.DataFrame:
"""Implementation of the GAR evolutionary algorithm from 'An Evolutionary Algorithm to Discover Numeric Association Rules'.
Coverage was assumed to be relative support, amplitude was defined as (gene.upper-gene.lower) / (upper-lower), tuples were
marked when a chosen individual is supported by a row, a more elaborate marking could store the attributes that are covered
for each row and use a normalized row sum. For the categorical attributes only a concrete value is stored and mutated with
lower probability than the interval boundaries of numerical attributes.
Note:
Unfortunately many details of implementation were left open and some terms were not precisely defined, therefore some ideas
from 'An evolutionary algorithm to discover quantitative association rules from huge databases without the need for
an a priori discretization' were used but this again did not cover all the details.
Args:
db (pd.DataFrame): Database
num_cat_attrs (Dict[str, bool]): Maps numerical attributes to true and categorical ones to false
num_sets (int): Number of itemsets to be generated
num_gens (int): Number of generations
population_size (int): Number of individuals used in each population
omega (float): Penalization factor for coverage
psi (float): Penalization factor for amplitude
mu (float): Rewarding factor for attribute size
selection_percentage (float, optional): Percentage of fittest individuals for the next generation. Defaults to 0.15.
recombination_probability (float, optional): Probability that the offspring inherits the genes from the other progenitor. Defaults to 0.5.
mutation_probability (float, optional): Mutation probability of numerical attributes. Defaults to 0.4.
set_attribute (str): Attribute that should be included in every individual. Defaults to None.
Returns:
pd.DataFrame: Fittest itemsets, aswell as their subsets and support information, columns are ["itemsets","support"].
"""
def __update_counts(db: pd.DataFrame, marked_rows: Dict[int, bool], population: List[Individuum]) -> None:
"""Processes the population and updates the coverage and marked counts.
"""
_process(db, marked_rows, population)
for individual in population:
individual.fitness = _get_fitness(individual.coverage / len(db), individual.marked/len(
db), _amplitude(intervals, individual), individual.num_attrs() / len(num_cat_attrs))
def _get_fitness(coverage: float, marked: float, amplitude: float, num_attr: float) -> float:
return coverage - marked*omega - amplitude*psi + num_attr*mu*coverage
fittest_itemsets = []
# Store which rows of the DB were marked
marked_rows: Dict[int, bool] = {row: False for row in db.index}
intervals = _get_lower_upper_bound(db, num_cat_attrs)
for n_itemsets in range(num_sets):
population = _generate_first_population(
db, population_size, intervals, set_attribute)
for n_gen in range(num_gens):
_process(db, marked_rows, population)
for individual in population:
individual.fitness = _get_fitness(individual.coverage / len(db), individual.marked/len(
db), _amplitude(intervals, individual), individual.num_attrs() / len(num_cat_attrs))
next_population = _get_fittest(
population, selection_percentage)
offsprings = _cross_over(population, recombination_probability, len(
population)-len(next_population))
__update_counts(db, marked_rows, offsprings)
offsprings = [offsprings[i] if offsprings[i].get_fitness(
) > offsprings[i+1].get_fitness() else offsprings[i+1] for i in range(0, len(offsprings), 2)]
next_population.extend(offsprings)
for individual in next_population:
individual.mutate(db, mutation_probability)
population = next_population
__update_counts(db, marked_rows, population)
chosen_one = max(population, key=lambda item: item.get_fitness())
_update_marked_records(db, marked_rows, chosen_one)
fittest_itemsets.append(chosen_one)
# Get all subsets of the itemsets and map into tuples to reuse the rule generation framework
final_itemsets = []
for itemset in fittest_itemsets:
final_itemsets.extend(itemset.get_all_subsets())
_process(db, marked_rows, final_itemsets)
# Stuff into df
final_itemsets_tuples = [{"itemsets": item.to_tuple(
), "support": item.coverage / len(db)} for item in final_itemsets]
return pd.DataFrame(final_itemsets_tuples).drop_duplicates() | /rule_mining_algs-0.1.1-py3-none-any.whl/algs/gar.py | 0.92183 | 0.628208 | gar.py | pypi |
from collections import defaultdict
from typing import Dict, Iterator, List, Tuple
import numpy as np
import pandas as pd
from pandas import DataFrame
from algs.util import get_frequent_1_itemsets
def ais(dataframe: DataFrame, support_threshold: float = 0.005) -> DataFrame:
"""Calculates the frequent itemsets satsifying the min support constraint, using the
AIS algorithm.
Args:
dataframe (DataFrame): All transactions. Needs to be one hot encoded.
support_threshold (float, optional): Support threshold. Defaults to 0.005.
Returns:
DataFrame: Dataframe where the first column contains a list of all items in the itemset and the second
one contains the support for that itemset.
"""
items = np.array(dataframe.columns)
num_transactions = len(dataframe)
all_set = get_frequent_1_itemsets(
items, dataframe, support_threshold)
frequent_k_itemsets = [list(frequent_1_itemset)
for frequent_1_itemset in all_set.keys()]
while len(frequent_k_itemsets) != 0:
candidate_sets = defaultdict(int)
# Iterate over potential itemsets of length k and check whether they are frequent
for candidate_set in __generate_itemsets(frequent_k_itemsets, dataframe):
candidate_sets[candidate_set] += 1
frequent_k_itemsets = __get_frequent_k_itemsets(
candidate_sets, num_transactions, support_threshold)
all_set.update(frequent_k_itemsets)
frequent_k_itemsets = [list(item)
for item in frequent_k_itemsets.keys()]
# Generate dataframe from all frequent itemsets and their support
df = pd.DataFrame(all_set.items(), index=[i for i in range(
len(all_set))], columns=['itemsets', 'support'])
return df
def __generate_itemsets(frequent_k_itemsets: List[List[str]], transactions: DataFrame) -> Iterator[Tuple[str]]:
"""Iterates over all transactions and concatenates any item to the list of frequent k itemsets, when
the transaction contains the k itemset and the item has a greater lexicographic order than the last
element in the frequent k itemset. This implies the frequent k itemsets' items to be sorted.
Args:
frequent_k_itemsets (List[List[str]]): Frequent itemsets of length k
transactions (DataFrame): All transactions
Yields:
Iterator[Tuple[str]]: Candidate itemset of length k+1
"""
for row in range(len(transactions)):
transaction = list(transactions.loc[row, transactions.iloc[row]].index)
for itemset in frequent_k_itemsets:
if not all(item in transaction for item in itemset):
continue
last_element = itemset[-1]
for item in transaction:
if last_element < item:
yield tuple(itemset + [item])
def __get_frequent_k_itemsets(candidate_sets: Dict[Tuple[str], int], num_transactions: int, support_threshold: float) -> Dict[Tuple[str], float]:
"""Checks whether the count for each k candidate itemset is above the min suppor threshold.
Args:
candidate_sets (Dict[Tuple[str], int]): Candidate sets
num_transactions (int): Number of transactions
support_threshold (float): Support threshold
Returns:
Dict[Tuple[str], float]: Dictionary with all frequent itemsets satisfying the min support constraint.
"""
min_count = num_transactions * support_threshold
return {itemset: supp / num_transactions for itemset, supp in candidate_sets.items() if supp >= min_count} | /rule_mining_algs-0.1.1-py3-none-any.whl/algs/ais.py | 0.938032 | 0.577436 | ais.py | pypi |
from itertools import chain, combinations
from typing import Any, Dict, Iterator, List, Tuple
import pandas as pd
from pandas import DataFrame, Series
from algs.util import confidence, measure_dict
def generate_rules(frequent_itemsets: DataFrame,
min_conf: float = 0.5) -> DataFrame:
"""Generates all rules that satisfy the minimum confidence constraint for all frequent itemsets.
This algorithm is described in 'Fast Algorithms for Mining Association Rules'
on p.14.
Args:
frequent_itemsets (DataFrame): Frequent itemsets, which were found by e.g. using the apriori algorithm
min_conf (float, optional): Minimum confidence threshold. Defaults to 0.5.
Returns:
DataFrame: All rules satisfying the constraints.
"""
support_mapping = frequent_itemsets.set_index(
"itemsets").to_dict()["support"]
def __ap_genrules(itemset: Series, consequents: List[Tuple[str]],
m: int) -> Iterator[Dict[str, Any]]:
"""Checks the minimum confidence constraint for all rules that can be built with the consequents
in the consequents argument and yields them. The consequences are extended as long as the size is smaller than
the size of the corresponding itemset and the frontier is not empty.
Args:
itemset (Series): The itemset along its support
consequents (List[Tuple[str]]): List of all candidate consequents, which may give rules that have minimum confidence
m (int): The size of the elements contained in consequents.
Yields:
Iterator[Dict[str, Any]]
]: Rule antecedents and consequents with objective measures
"""
new_consequents = []
for consequent in consequents:
support_rule = itemset["support"]
if support_rule == 0:
continue
antecedent = tuple([
item for item in itemset["itemsets"] if item not in consequent
])
conf = confidence(support_mapping[antecedent], support_rule)
if conf >= min_conf:
new_consequents.append(consequent)
yield {
"antecedents":
antecedent,
"consequents":
consequent,
"support":
support_rule,
"confidence":
conf,
**measure_dict(support_mapping[antecedent], support_mapping[consequent], support_rule)
}
if len(itemset["itemsets"]) > m + 1:
yield from __ap_genrules(itemset,
__apriori_gen(new_consequents, m - 1),
m + 1)
rules = []
for _, itemsets in frequent_itemsets.iterrows():
itemset = itemsets["itemsets"]
# Some algorithms prune itemsets, but their support information would still be
# required. This itemsets are added to the df but ignore is True for them.
if "ignore" in frequent_itemsets.columns and itemsets["ignore"] == True:
continue
if len(itemset) >= 2:
consequents = __get_1_item_consequents(itemset)
for rule in __ap_genrules(itemsets, consequents, 1):
rules.append(rule)
df = DataFrame(
rules,
index=[i for i in range(len(rules))],
)
return df
def __get_1_item_consequents(itemsets: List[str]) -> List[Tuple[str]]:
"""Calculates the consequents for frequent itemsets consisting of 1 element.
Args:
itemsets (List[str]): Frequent itemset
Returns:
List[Tuple[str]]: List of consequents, where each tuple contains one item.
"""
return [(itemsets[i], ) for i in range(len(itemsets))]
def __apriori_gen(old_candidates: List[Tuple[str]],
k: int) -> List[Tuple[str]]:
"""Similar to the apriori gen method, this algorithm merges consequents of the previous
pass satisfying the minimum confidence constraint to generate new candidate consequences
and thus new rules.
Args:
old_candidates (List[Tuple[str]]): List of k element consequences in the last pass.
k (int): Number of elements that are supposed to match, when joining two consequents of the last pass.
Returns:
List[Tuple[str]]: Consequents with size of k+2, where k refers to the size of the input parameter.
"""
candidates = set()
for i in range(len(old_candidates)):
for j in range(i + 1, len(old_candidates)):
skip = False
for l in range(k - 1):
if old_candidates[i][l] != old_candidates[j][l]:
skip = True
break
if not skip and old_candidates[i][k - 1] < old_candidates[j][k -
1]:
candidates.add(old_candidates[i] +
(old_candidates[j][k - 1], ))
cands = [
candidate for candidate in candidates
if all(candidate[:i] + candidate[i + 1:] in old_candidates
for i in range(len(candidate)))
]
return cands
def minimal_non_redundant_rules(closed_frequent_itemsets: DataFrame,
min_conf: float = 0.5) -> DataFrame:
"""Determines the set of minimal non redundant rules by first calculating the generic basis and then
the transitive reduction of the informative basis, all according to 'Mining minimal non-redundant
association rules'.
Args:
closed_frequent_itemsets (DataFrame): All frequent closed itemsets and their generators as determined
by the AClose algorithm.
min_conf (float, optional): Minimum confidence threshold. Defaults to 0.5.
Returns:
DataFrame: Minimal non-redundant association rules with confidence, support, antecedents and consequents.
"""
gen_to_cls = {
tuple(itemset["generators"]): (
tuple(itemset["closed_itemsets"]),
itemset["support"],
)
for _, itemset in closed_frequent_itemsets.iterrows()
}
generating_set = generic_basis(gen_to_cls)
generating_set.extend(
transitive_reduction_of_informative_basis(gen_to_cls, min_conf))
return DataFrame(generating_set,
index=[i for i in range(len(generating_set))])
def generic_basis(
generators: Dict[Tuple[str], Tuple[Tuple[str], float]]
) -> List[Dict[str, Any]]:
"""Calculates the generic basis for exact valid association rules as described in
in 'Mining minimal non-redundant association rules'.
Args:
generators (Dict[Tuple[str], Tuple[Tuple[str], float]]): Mapping from generators to their closures and support
Returns:
List[Dict[str, Any]]: List of dictionaries containing the antecedent and consequent as tuples, aswell as the
support and confidence for each rule.
"""
gb = []
for generator, cls_info in generators.items():
closure, supp = cls_info
if closure != generator:
consequent = tuple(sorted(set(closure) - set(generator)))
row_entry = {
"antecedents": generator,
"consequents": consequent,
"support": supp,
"confidence": 1,
}
gb.append(row_entry)
return gb
def transitive_reduction_of_informative_basis(
generators: Dict[Tuple[str], Tuple[Tuple[str], float]],
min_conf: float) -> List[Dict[str, Any]]:
"""Calculates the transitive reduction of the informative basis for approximate association rules according
to the paper 'Mining minimal non-redundant association rules'.
Args:
generators (Dict[Tuple[str], Tuple[Tuple[str], float]]): Mapping from generators to their closures and support.
min_conf (float): Minimum confidence threshold.
Returns:
List[Dict[str, Any]]: List of dictionaries containing the antecedent and consequent as tuples, aswell as the
support and confidence for each rule.
"""
# Calculate the size of the longest maximal frequent closed itemset
# and partition the FCs based on their length
mu = 0
FC_j = {}
for cls, supp in generators.values():
size_cls = len(cls)
mu = max(size_cls, mu)
if FC_j.get(size_cls) != None:
FC_j[size_cls].update({cls: supp})
else:
FC_j[size_cls] = {cls: supp}
ib = []
for generator, cls_info in generators.items():
closure, gen_supp = cls_info
closure = set(closure)
successors = []
S = [] # Union of S_j
# Determine the set of all fc_s that may be rhs of a rule
skip = {}
for j in range(len(closure), mu + 1):
if FC_j.get(j) == None:
skip[j] = True
s_j = {}
else:
s_j = {
fci: supp
for fci, supp in FC_j[j].items() if closure < set(fci)
}
S.append(s_j)
for j in range(len(S)):
if skip.get(j):
continue
for fci in S[j]:
fci_set = set(fci)
# Check whether there's no real subset in succ_g
if all(not fci_set > s for s in successors):
successors.append(fci_set)
consequent = tuple(sorted(fci_set - set(generator)))
support_fc = FC_j[len(closure) + j][fci]
conf = support_fc / gen_supp
if conf >= min_conf:
ib.append({
"antecedents": generator,
"consequents": consequent,
"support": support_fc,
"confidence": conf,
})
return ib
def classification_rules(frequent_itemsets: DataFrame,
label: str,
min_conf: float = 0.5) -> DataFrame:
"""Constructs association rules from frequent itemsets directly.
The label is expected to be a singe string which is the attribute
of the consequent.
Args:
frequent_itemsets (DataFrame): Frequent itemsets to mine rules from.
label (str): Name of the class label attribute.
min_conf (float, optional): Minimum confidence threshold. Defaults to 0.5.
Returns:
DataFrame: All rules where the itemsets had the class label as an item.
This item is placed in the consequent and the rest of the items constitutes
the antecedent.
"""
# Map each item to its support
support_mapping = frequent_itemsets.set_index(
"itemsets").to_dict()["support"]
# Skip over too short rules or itemset not containing the label
frequent_itemsets = frequent_itemsets[
(frequent_itemsets['itemsets'].map(len) >= 2)
& (frequent_itemsets['itemsets'].map(lambda x: any(label in str(i)
for i in x))) &
(frequent_itemsets['support'] != 0)]
if "ignore" in frequent_itemsets.columns:
frequent_itemsets = frequent_itemsets[(frequent_itemsets["ignore"] !=
True)]
rules = []
for idx, row in frequent_itemsets.iterrows():
itemset = row["itemsets"]
support = row["support"]
# Build antecedent and consequent
rule = {}
antecedent = []
consequent = None
for item in itemset:
if label not in str(item):
antecedent.append(item)
else:
consequent = (item, )
antecedent = tuple(antecedent)
conf = confidence(support_mapping[antecedent], support)
if conf < min_conf:
continue
rule = {
"antecedents": antecedent,
"consequents": consequent,
"support": support,
"confidence": conf
}
rule.update(
measure_dict(support_mapping[antecedent],
support_mapping[consequent], support))
rules.append(rule)
return DataFrame(
rules,
index=[i for i in range(len(rules))],
)
def get_classification_rules(rules: DataFrame, label: str) -> DataFrame:
"""Post-Processing of rules, to only filter out rules, that have the
classification label as the only consquent of the rule.
Args:
rules (DataFrame): Mined rules, superset of classification rules
label (str): Target attribute
Returns:
DataFrame: All rules with only the label as consequent.
"""
return rules.loc[rules["consequents"].apply(
lambda x: len(x) == 1 and x[0].startswith(label))]
def prune_by_improvement(db: DataFrame,
rules: DataFrame,
minimp: float = 0.002) -> DataFrame:
"""Calculates the improvement for all rules and prunes any rules that do not
fulfill the minimp constraint. It also finds all the subrules that are not conatained
within rules to stick to the definition of improvement.
Args:
db (DataFrame): Database the rules were minded from
rules (DataFrame): Mined rules
minimp (float, optional): Minimum improvement threshold. Defaults to 0.002.
Returns:
DataFrame: Pruned rule set containing only productive rules.
"""
potential_rules = _compare_to_mined_rules(rules, minimp)
subsets = _get_proper_subsets(potential_rules)
supports = _get_subset_supports(db, subsets)
return _prune_by_improvement(potential_rules, supports, minimp)
def _prune_by_improvement(rules: DataFrame, support_info: Dict[Tuple[int],
Any],
minimp: float) -> DataFrame:
"""Uses all the support information stored in support info to calculate the max confidence of any
subrule for all the rules in the rules DataFrame.
Args:
rules (DataFrame): Mined rules
support_info (Dict[Tuple[int], Any]): Information required to calculate by the improvement defintion
minimp (float): Minimum improvement threshold
Returns:
DataFrame: All rules with the unproductive rules being pruned
"""
drop_rows = []
for idx, row in rules.iterrows():
rule = row["antecedents"]
items = sorted(rule)
itemsets = list(
chain.from_iterable(
combinations(items, r) for r in range(1, len(items))))
for itemset in itemsets:
ant_sup = support_info[itemset]
cons_sup = support_info[itemset + row["consequents"]]
if row["confidence"] - cons_sup / ant_sup < minimp:
drop_rows.append(idx)
break
return rules.drop(index=drop_rows)
def _compare_to_mined_rules(rules: DataFrame, minimp: float) -> DataFrame:
"""Checks the improvement constraint for the set of mined rules by searching for
rules, whose antecedents are real subsets.
Args:
rules (DataFrame): Set of mined rules, with a single consequent
minimp (float): Minimum improvement threshold
Raises:
Exception: When more than one attribute is present in the consequent an exception is raised.
Returns:
DataFrame: Pruned ruleset using the above condition.
"""
if (rules["consequents"].map(len) > 1).any():
raise Exception("Only a single attribute as antecedent allowed.")
drop_rows = set()
pd.options.mode.chained_assignment = None # Disable the warning
# Preprocess the DataFrame
rules['rule_items'] = rules.apply(
lambda x: frozenset(x['antecedents'] + x['consequents']), axis=1)
pd.options.mode.chained_assignment = 'warn' # Enable the warning
if len(rules) > 100000:
for row in rules.sort_values(
by="rule_items",
key=lambda x: x.map(len)).iloc[:1250].itertuples():
if row[0] in drop_rows:
continue
rule_items = row[-1]
rule_conf = row[4]
temp = (rules.loc[rules["rule_items"] > rule_items, "confidence"] -
rule_conf < minimp)
if temp.any():
drop_rows.update(temp.loc[temp == True].index)
rules = rules.drop(drop_rows)
drop_rows.clear()
for row in rules.itertuples():
rule_items = row[-1]
rule_conf = row[4]
temp = (rules.loc[rules["rule_items"] > rule_items, "confidence"] -
rule_conf < minimp)
if temp.any():
drop_rows.update(temp.loc[temp == True].index)
return rules.drop(index=drop_rows).drop(labels=["rule_items"], axis=1)
def _get_proper_subsets(rules: DataFrame) -> Dict[Tuple[Any], int]:
"""Generates all proper subsets of the itemsets that make up a rule in the given set
of potential rules (the already pruned rules do no longer have to be respected).
Args:
rules (DataFrame): Set of rules to get all subsets from
Returns:
Dict[Tuple[Any], int]: Itemsets with count 0
"""
required_sets = set()
# If there's any subset relation prevent that the same subsets
# have to be recomputed
grouped_rules = rules.groupby("consequents")
drop_list = []
for _, group in grouped_rules:
for i in range(len(group)):
row = group.iloc[i]
for j in range(i + 1, len(group)):
other = group.iloc[j]
if set(row["antecedents"]) < set(other["antecedents"]):
drop_list.append(row.name)
break
rules = rules.drop(drop_list)
for row in rules.itertuples(index=False, name=None):
rule = row[0]
items = sorted(rule)
itemsets = set(
chain.from_iterable(
combinations(items, r) for r in range(1, len(items))))
for itemset in itemsets:
required_sets.add(itemset + row[1])
required_sets.update(itemsets)
return {itemset: 0 for itemset in required_sets}
def _get_subset_supports(
db: DataFrame, subsets: Dict[Tuple[Any],
int]) -> Dict[Tuple[Any], int]:
"""Counts the support for all subsets generated by the _get_proper_subsets function.
It thereby increments the counts associated with each itemset.
Args:
db (DataFrame): Database that was initially mined
subsets (Dict[Tuple[Any], int]): All subsets with support set to 0
Returns:
Dict[Tuple[Any], int]: All subsets with their support in the given DB
"""
for _, row in db.iterrows():
for itemset in subsets.keys():
if all(__compare_attribute(row, item) for item in itemset):
subsets[itemset] += 1
return subsets
def __compare_attribute(row: Series, item: str) -> bool:
"""Parses the string describing an item of the itemset to get the involved attributes
and values/interval boundaries. It then compares these informations with the
current db row.
Args:
row (Series): Row of the database to match with the item
item (str): Description of an item
Returns:
bool: True when the item is supported, False otherwise
"""
# Handle clustering {x,y} = [20,30] x [25,35]
if item.startswith("{"):
attrlist = item[1:item.find("}")]
names = [name.strip() for name in attrlist.split(",")]
lower_boundaries = [
s.strip()
for s in item[item.find("[") + 1:item.find("]")].split(",")
]
second_interval = item[item.find("x") + 3:]
upper_boundaries = [
s.strip()
for s in second_interval[:second_interval.find("]")].split(",")
]
for i in range(len(names)):
name = names[i]
if row[name] < float(lower_boundaries[i]) and row[name] > float(
upper_boundaries[i]):
return False
return True
elif "=" in item:
name, _, value = item.partition("=")
name = name.strip()
value = value.strip()
if ".." in value:
# Numeric attributes: x = 123..456
lower, _, upper = value.partition("..")
return float(lower) <= row[name] <= float(upper)
else:
return str(row[name]) == value
else:
# Handle the binary case w/o discretization
return row[item]
def get_tidlists(db: DataFrame, rules: DataFrame) -> DataFrame:
"""Creates a copy of the rules DataFrame with a new
column called 'tidlists' that stores a defaultdict
with a set of all TIDs.
Args:
db (DataFrame): Database that was initially mined
rules (DataFrame): Mined rules
Returns:
DataFrame: Copy of rules DataFrame with additional
tidlists column.
"""
rules = rules.copy()
rules["tidlists"] = rules.apply(lambda row: set(), axis=1)
for tid, row in db.iterrows():
for _, rule in rules.iterrows():
itemset = rule["antecedents"] + rule["consequents"]
if all(__compare_attribute(row, item) for item in itemset):
rule["tidlists"].add(tid)
return rules | /rule_mining_algs-0.1.1-py3-none-any.whl/algs/rule_gen.py | 0.893152 | 0.565479 | rule_gen.py | pypi |
from copy import deepcopy
import random
from math import floor
from typing import Any, Dict, List, Tuple
import numpy as np
import pandas as pd
from algs.gar import Gene, _amplitude, _get_fittest, _get_lower_upper_bound
from algs.util import measure_dict
class RuleIndividuum:
def __init__(self, items: Dict[str, Gene], consequent: str) -> None:
self.items = items
self.consequent = consequent
self.fitness = 0.0
self.re_coverage = 0.0
self.support = 0
self.antecedent_supp = 0
self.consequent_supp = 0
self.attr_count = len(self.items)
def num_attrs(self) -> int:
return self.attr_count
def get_items(self) -> Dict[str, Gene]:
return self.items
def get_consequent(self) -> str:
return self.consequent
def confidence(self) -> float:
if self.antecedent_supp == 0.0:
return 0
return self.support / self.antecedent_supp
def to_tuple(self, attrs: List[str]) -> Tuple[str]:
items = []
for attr in attrs:
gene = self.items[attr]
if gene.is_numerical():
items.append(f"{gene.name} = {gene.lower}..{gene.upper}")
else:
items.append(f"{gene.name} = {gene.value}")
return tuple(items)
def to_dict(self, n: int) -> Dict[str, float]:
"""Converts the rule individual to an entry that is compatible with the rule
framework in rule_gen.
Args:
n (int): Number of database entries.
Returns:
Dict[str, float]: Map with all strings s.a. antecedents, consequents, cosine, etc.
mapped to their respective values.
"""
antecedents = tuple(
[item for item in self.items.keys() if item != self.consequent]
)
antecedents = self.to_tuple(antecedents)
consequent = self.to_tuple([self.consequent])
items = {
"antecedents": antecedents,
"consequents": consequent,
"support": self.support / n,
"confidence": self.support / self.antecedent_supp
if self.antecedent_supp != 0
else 0,
}
items.update(
measure_dict(
self.antecedent_supp / n, self.consequent_supp / n, self.support / n
)
)
return items
def matching_attributes(self, record: pd.Series) -> bool:
"""Matches a row of the database against the individual.
Args:
record (pd.Series): Row of the database
Returns:
bool: True, when the record is covered by the individuum, False elsewise.
"""
for name, gene in self.items.items():
val = record[name]
if gene.is_numerical():
if val > gene.upper or val < gene.lower:
return False
elif val != gene.value:
return False
return True
def crossover(self, other: Any, probability: float) -> Tuple[Any, Any]:
"""Performs crossover operator to generate two offsprings from two individuals.
Common genes are inherited by taking one at random with the given probability.
Other genes are inherited by default.
Args:
other (Any): Individual to cross the current individual with
probability (float): Crossover probability
Returns:
Tuple[Any, Any]: Two offsprings resulting from the crossover
"""
other_genes = other.items
genes1 = deepcopy(self.items)
genes2 = deepcopy(other_genes)
common_genes = set(genes1).intersection(other_genes)
rand_prob = np.random.rand(len(common_genes))
for name, prob in zip(common_genes, rand_prob):
if prob < probability:
genes1[name] = deepcopy(other_genes[name])
if prob < probability:
genes2[name] = deepcopy(self.items[name])
return (
RuleIndividuum(genes1, self.consequent),
RuleIndividuum(genes2, self.consequent),
)
def mutate(self, db: pd.DataFrame, probability: float) -> None:
"""Mutates randomly selected genes. For numeric genes the interval bounaries
are either increased or deacreased by [0, interval_width/11]. In case of
categorical attributes there's a 25% of changing the attribute to a random
value of the domain.
Args:
db (pd.DataFrame): Database
probability (float): Mutation probability
"""
random_numbers = np.random.random(size=len(self.items))
for i, gene in enumerate(self.items.values()):
name = gene.name
# Mutate in this case
if random_numbers[i] < probability:
name = gene.name
if gene.numerical:
# Change the upper and lower bound of the interval
lower = db[name].min()
upper = db[name].max()
width_delta = (upper - lower) / 17
delta1 = np.random.uniform(0, width_delta)
delta2 = np.random.uniform(0, width_delta)
rands = np.random.random(size=(2))
gene.lower += delta1 * (-1 if rands[0] < 0.5 else 1)
gene.upper += delta2 * (-1 if rands[1] < 0.5 else 1)
# All this mess ensures that the interval boundaries do not exceed DB [min, max]
gene.lower = min(upper, max(gene.lower, lower))
gene.upper = min(upper, max(gene.upper, lower))
if gene.lower > gene.upper:
gene.upper, gene.lower = gene.lower, gene.upper
else:
# Only seldomly change the value of the categorical attribute
gene.value = (
gene.value
if np.random.random() < 0.75
else np.random.choice(db[name].to_numpy())
)
def __repr__(self) -> str:
antecedent = [
item.__repr__()
for item in self.items.values()
if item.name != self.consequent
]
return f"{antecedent.__str__()} -> {self.items[self.consequent]}"
def _generate_first_rule_population(
db: pd.DataFrame,
population_size: int,
interval_boundaries: Dict[str, Tuple[float, float]],
set_attribute: str,
attr_probability: float = 0.5,
) -> List[RuleIndividuum]:
"""Determines an initial population, where each individuum may have 2 to n randomly sampled attributes.
Further to come up with an individuum that is covered by at least one tuple, a random tuple from the db
is sampled. For numeric attributes a random uniform number from 0 to 1/7 of the entire domain is added/
subtracted from the interval boundaries.
Args:
db (pd.DataFrame): Database to sample initial individuals from.
population_size (int): Number of individuals in the inital population.
interval_boundaries (Dict[str, Tuple[float, float]]): Result of _get_lower_upper_bound
set_attribute (str): Name of attribute that should be included in every itemset.
attr_probability (float): Probability that the attribute is not picked.
Returns:
List[RuleIndividuum]: Initial population.
"""
individuums = []
for i in range(population_size):
item = {}
items = list(db.columns)
# Add two random attributes and then fill up with a coin toss for each attribute
attrs = random.sample(items, 2)
# If the target attribute is not sampled, removed the second sample
if set_attribute not in attrs:
attrs = attrs[0:1] + [set_attribute]
attrs = [
itm
for itm in items
if itm not in attrs and np.random.random() > attr_probability
] + attrs
row = np.random.randint(0, len(db) - 1)
register = db.iloc[row]
for column in attrs:
value = register[column]
if interval_boundaries.get(column):
# Add/Subtract at most 1/7th of the entire attribute domain
lower, upper = interval_boundaries[column]
u = floor(np.random.uniform(0, (upper - lower) / 7))
lower = max(lower, value - u)
upper = min(upper, value + u)
item[column] = Gene(column, True, lower, upper, lower)
else:
value = register[column]
item[column] = Gene(column, False, value, value, value)
individuums.append(RuleIndividuum(item, set_attribute))
return individuums
def _count_support(
db: pd.DataFrame, marked_rows: pd.DataFrame, population: List[RuleIndividuum]
) -> None:
"""Updates the support count (antecedent and rule) and re-coverage for all the individuals
in the population. Everytime a rule applies to a row, the sum of all
the marks for that row are added and normalized by means of the row sum
in marked_rows.
Args:
db (pd.DataFrame): Database to be mined
marked_rows (pd.DataFrame): Counts how often each attribute and rule was covered
population (List[RuleIndividuum]): Current Population
"""
for individuum in population:
relevant_rows = [name for name in individuum.items.keys()]
relevant_db = pd.DataFrame(columns=[relevant_rows])
for name, gene in individuum.items.items():
if gene.numerical:
relevant_db[name] = db[name].between(gene.lower, gene.upper)
else:
relevant_db[name] = db[name] == gene.value
individuum.support = relevant_db.all(axis=1).sum()
individuum.antecedent_supp = (
relevant_db.drop(individuum.consequent, axis=1,
level=0).all(axis=1).sum()
)
mask = (relevant_db.all(axis=1)) & (marked_rows.sum(axis=1) != 0)
column_sums = marked_rows.loc[mask].sum(axis=1)
if column_sums.any():
relevant_coverage = (
marked_rows[relevant_rows].loc[mask].sum(axis=1) / column_sums
)
individuum.re_coverage = relevant_coverage.sum()
def _count_consequent_support(
db: pd.DataFrame, final_rule_set: List[RuleIndividuum]
) -> None:
"""Counts the support for the consequent and stores them in the respective
RuleIndividuum.
Args:
db (pd.DataFrame): Database
final_rule_set (List[RuleIndividuum]): All fittest, mined rules
"""
for individuum in final_rule_set:
gene = individuum.items[individuum.consequent]
mask = (
(db[gene.name] >= gene.lower) & (db[gene.name] <= gene.upper)
if gene.is_numerical()
else (db[gene.name] == gene.value)
)
individuum.consequent_supp = mask.sum()
def _cross_over(
population: List[RuleIndividuum], probability: float, number_offspring: int
) -> List[RuleIndividuum]:
"""Crossover genes of the individuals and produce two offsprings, for each pair of
randomly sampled progenitors.
Args:
population (List[RuleIndividuum]): Progenitors that are crossed at random
probability (float): Crossover probability
number_offspring (int): Number of remaining offsprings to generate
Returns:
List[RuleIndividuum]: Offspring pair for each crossover event. It has double the size of
the given population.
"""
recombinations = []
for i in range(number_offspring):
progenitors = random.sample(population, k=2)
offspring = progenitors[0].crossover(progenitors[1], probability)
recombinations.extend(offspring)
return recombinations
def _update_marked_records(
db: pd.DataFrame, marked_records: pd.DataFrame, chosen: RuleIndividuum
) -> None:
"""In a postprocessing step, the itemset with the highest fitness is used to mark all the
records in the db, that are covered by the itemset.
Args:
db (pd.DataFrame): Database whose records will be marked
marked_records (Dict[int, bool]): Stores for each record whether its already marked
chosen (RuleIndividuum): The fittest itemset of the fully evolved population
"""
attributes = [name for name in chosen.items.keys()]
db = db[attributes]
for i in range(len(db)):
row = db.iloc[i]
matches = chosen.matching_attributes(row)
if matches:
marked_records.loc[i, attributes] += 1
def gar_plus(
db: pd.DataFrame,
num_cat_attrs: Dict[str, bool],
num_rules: int,
num_gens: int,
population_size: int,
w_s: float,
w_c: float,
n_a: float,
w_a: float,
w_recov: float,
consequent: str,
selection_percentage: float = 0.15,
recombination_probability: float = 0.5,
mutation_probability: float = 0.4,
attr_probability: float = 0.5,
) -> pd.DataFrame:
"""Implements a version of the gar plus algorithm from 'An evolutionary algorithm to discover quantitative association rules from huge
databases without the need for an a priori discretization', where the consequent can consist of a single item, which has to be determined
a priori.
Args:
db (pd.DataFrame): Database
num_cat_attrs (Dict[str, bool]): Maps numerical attributes to true and categorical ones to false
num_rules (int): _description_
num_gens (int): Number of generations
population_size (int): Number of individuals used in each population
w_s (float): Weighting factor for support
w_c (float): Weighting factor for confidence
n_a (float): Weighting factor for number attributes
w_a (float): Weighting factor for amplitude
w_recov (float): Weighting factor for re-coverage
consequent (str): Consequent attribute name
selection_percentage (float, optional): Number of individuals passing to the next generation. Defaults to 0.15.
recombination_probability (float, optional): Crossover probability. Defaults to 0.5.
mutation_probability (float, optional): Mutation probability. Defaults to 0.4.
attr_probability (float, optional): Probability with which attributes are chosen, when constructing the initial
population. Defaults to 0.5.
Returns:
pd.DataFrame: Fittest rules with a bunch of measures added.
"""
def __update_counts(
db: pd.DataFrame, marked_rows: Dict[int, bool], population: List[RuleIndividuum]
) -> None:
"""Processes the population and updates the coverage and marked counts."""
_count_support(db, marked_rows, population)
for individual in population:
individual.fitness = _get_fitness(individual)
def _get_fitness(ind: RuleIndividuum) -> float:
result = (
(ind.support / n * w_s)
+ (ind.confidence() * w_c)
+ (n_a * ind.attr_count / num_attrs)
- (w_a * _amplitude(intervals, individual))
- (w_recov * ind.re_coverage / n)
)
return result
n = len(db)
num_attrs = len(num_cat_attrs)
best_rules_found = []
intervals = _get_lower_upper_bound(db, num_cat_attrs)
# Store a counter for each attribute, that is incremented when the row is covered by a rule
marked_rows = pd.DataFrame(
0, index=[i for i in range(n)], columns=list(db.columns))
for _ in range(num_rules):
population = _generate_first_rule_population(
db, population_size, intervals, consequent, attr_probability
)
for n_gen in range(num_gens):
_count_support(db, marked_rows, population)
for individual in population:
individual.fitness = _get_fitness(individual)
# Get selection percentage of the best adapted individuals for the next gen
next_population = _get_fittest(population, selection_percentage)
# Crossover events two produce offspring
offsprings = _cross_over(
population,
recombination_probability,
len(population) - len(next_population),
)
# Keep the better adapted of the offspring
__update_counts(db, marked_rows, offsprings)
offsprings = [
offsprings[i]
if offsprings[i].fitness > offsprings[i + 1].fitness
else offsprings[i + 1]
for i in range(0, len(offsprings), 2)
]
next_population.extend(offsprings)
for individual in next_population:
individual.mutate(db, mutation_probability)
population = next_population
__update_counts(db, marked_rows, population)
chosen_one = max(population, key=lambda item: item.fitness)
_update_marked_records(db, marked_rows, chosen_one)
best_rules_found.append(chosen_one)
# Final count of consequent support to calculate the rule measures
_count_consequent_support(db, best_rules_found)
# Return a dataframe containing rules only
return pd.DataFrame(
[rule.to_dict(len(db)) for rule in best_rules_found]
).drop_duplicates() | /rule_mining_algs-0.1.1-py3-none-any.whl/algs/gar_plus.py | 0.888487 | 0.46721 | gar_plus.py | pypi |
from typing import DefaultDict, Dict, List, Tuple
import numpy as np
from pandas import DataFrame
from collections import defaultdict
from algs.util import get_frequent_1_itemsets
class FPNode:
"""Node used in a fp tree.
"""
def __init__(self, item: str, parent: "FPNode", count: int = 1) -> None:
self.node_link = None
self.parent = parent
self.item = item
self.count = count
self.children = {}
class FPTree:
"""Class used for fp trees and conditional fp trees.
"""
def __init__(self, header_table: Dict[str, None]) -> None:
self.root = FPNode(None, None)
self.header_table = header_table
def add_transaction(self, transaction: List[str], node_count: int = 1) -> None:
"""Encodes a sorted (e.g. by support) transaction to a path in the fp tree.
Args:
transaction (List[str]): Sorted transaction
node_count (int, optional): Should only be set when construction conditional fp trees.
Defaults to 1.
"""
def __add_transaction(depth: 0, node: FPNode) -> None:
if depth == len(transaction):
return
item_name = transaction[depth]
child = node.children.get(item_name)
if child != None:
child.count += node_count
else:
child = FPNode(item_name, node, node_count)
node.children[item_name] = child
self.__set_node_link(item_name, child)
__add_transaction(depth + 1, child)
__add_transaction(0, self.root)
def __set_node_link(self, item_name: str, node: FPNode) -> None:
"""Set the node_link for a node or add an entry to the header table.
Args:
item_name (str): Name of the item
node (FPNode): Node to link to
"""
next_node = self.header_table.get(item_name)
if next_node == None:
self.header_table[item_name] = node
else:
while next_node != None:
previous_node = next_node
next_node = next_node.node_link
previous_node.node_link = node
def add_transactions(self, transactions: DataFrame) -> None:
"""Iterates over the list of sorted (e.g. support) transactions
and calls add_transaction.
Args:
transactions (DataFrame): All transactions to build the fp tree from.
"""
for idx, row in transactions.iterrows():
transaction = list(transactions.loc[idx:, list(row)])
self.add_transaction(transaction)
def get_sum_item_counts(self, item: str) -> int:
"""Given a item in the header list, sum all counts of nodes
that can be reached via node_links starting from the header link.
Args:
item (str): Item in the header list
Returns:
int: Total count for the respective item
"""
header_item = self.header_table[item]
count_sum = 0
while header_item != None:
count_sum += header_item.count
header_item = header_item.node_link
return count_sum
def fp_growth(transactions: DataFrame, min_support: float = 0.05) -> DataFrame:
"""Uses the fp_growth method described by Han et al. to calculate all frequent itemsets
satisfying the minimum support constraint.
Args:
transactions (DataFrame): One-Hot encoded dataframe containing all transactions.
min_support (float, optional): Minimum support threshold. Defaults to 0.05.
Returns:
DataFrame: Dataframe where the first column contains a list of all items in the itemset and the second
one contains the support for that itemset.
"""
fptree = construct_fp_tree(
transactions, min_support)
min_supp = int(min_support*len(transactions))
itemsets = fp_tree_growth(fptree, min_supp)
# Build df from dictionary
fp_itemsets = DataFrame(itemsets.items(), index=[i for i in range(
len(itemsets))], columns=['itemsets', 'support'])
fp_itemsets["support"] = fp_itemsets["support"] / len(transactions)
fp_itemsets = fp_itemsets[fp_itemsets['support']
> min_support] # Cut off rounding errors
return fp_itemsets
def get_transformed_dataframe(old_df: DataFrame, all_items: np.ndarray, frequent_items: Dict[str, float]) -> DataFrame:
"""Removes all infrequent itemsets from the transactions. It sorts the transactions in
descending order of support.
Args:
old_df (DataFrame): All transactions.
all_items (np.ndarray): Items contained in the transactions.
frequent_items (Dict[str, float]): All frequent items in the transactions.
Returns:
DataFrame: New dataframe with all infrequent items removed and remaining items sorted in
descending order of support.
"""
drop_columns = [item for item in all_items if not frequent_items.get(item)]
return old_df.drop(drop_columns, inplace=False, axis=1)[frequent_items.keys()]
def construct_fp_tree(transactions: DataFrame, min_support: float) -> FPTree:
"""Constructs a fp_tree from the given transactions and minimum support
threshold.
Args:
transactions (DataFrame): All transactions.
min_support (float): Minimum support threshold.
Returns:
FPTree: fp tree containing all frequent itemset information.
"""
# Get frequent items and sort transactions
items = np.array(transactions.columns)
frequent_items = get_frequent_1_itemsets(items, transactions, min_support)
frequent_items = {k[0]: v for k, v in sorted(
frequent_items.items(), key=lambda item: item[1], reverse=True)}
sorted_transactions = get_transformed_dataframe(
transactions, items, frequent_items)
# Build header table for node links and construct FP tree
header_table = {k: None for k in frequent_items.keys()}
fptree = FPTree(header_table)
fptree.add_transactions(sorted_transactions)
return fptree
def conditional_pattern_base(item: str, fptree: FPTree, min_supp: int, header_table: Dict[str, int]) -> DefaultDict[Tuple[str], int]:
"""Generates the conditional base pattern for given fp tree and item. Further this method removes
any non-frequent item from the paths (this is not a property of conditional pattern bases).
Thereby it also populates a dictionary with frequent items and their support count.
Args:
item (str): The item to build a conditional base pattern on.
fptree (FPTree): fp tree
min_supp (int): Minimum support as count.
header_table (Dict[str,int]): Empty header_table, that will be populated with frequent items
and their count, respectively.
Returns:
DefaultDict[Tuple[str], int]: Count adjusted prefix-paths without item as leaf are stored as keys.
The count of the prefix path is stored as value.
"""
first_item = fptree.header_table.get(item)
paths = {}
frequent_items = defaultdict(int)
while first_item != None:
leaf_with_item_label = first_item
first_item = first_item.parent
# Create dictionary from one path and store the path string as tuple
path_str = tuple()
while first_item != fptree.root:
path_str = (first_item.item,) + path_str
frequent_items[first_item.item] += leaf_with_item_label.count
first_item = first_item.parent
paths[path_str] = leaf_with_item_label.count
first_item = leaf_with_item_label.node_link
# Calculate frequent items over all paths
frequent_items = {path: supp for path,
supp in frequent_items.items() if supp >= min_supp}
paths_with_frequent_items = defaultdict(int)
for items, supp in paths.items():
adjusted_path = tuple()
for item in items:
if item in frequent_items:
adjusted_path += (item,)
if len(adjusted_path) > 0:
paths_with_frequent_items[adjusted_path] += supp
header_table.update(frequent_items)
return paths_with_frequent_items
def conditional_fp_tree(pattern_base: DefaultDict[Tuple[str], int], header_table: Dict[str, int]) -> FPTree:
"""Constructs a conditional fp tree under the pattern_base for an item.
It uses the frequent_items and their support to construct an ordered header table
for the fp tree.
Args:
pattern_base (DefaultDict[Tuple[str], int]): Result of the conditional pattern base function.
header_table (Dict[str, int]): Frequent items and their counts.
Returns:
FPTree: Conditional fp tree under the item, whose pattern base was provided.
"""
header_table = {k[0]: None for k, v in sorted(
header_table.items(), key=lambda item: item[1], reverse=True)}
tree = FPTree(header_table)
for path, count in pattern_base.items():
tree.add_transaction(list(path), count)
return tree
def generate_patterns_single_path(suffix: Tuple[str], path: Tuple[str], count: int) -> Dict[Tuple[str], int]:
"""Single path optimisation for a conditional fp tree. Builds all path combinations over the prefix path
and appends the suffix to those. The support count is the same for all combinations as the path's count
has been adjusted.
Args:
suffix (Tuple[str]): Current frequent itemset suffix.
path (Tuple[str]): Single path in the conditional tree under the suffix.
count (int): Support count of all path combinations.
Returns:
Dict[Tuple[str], int]: All path combinations concatenated to the suffix and their count.
"""
frequent_itemsets = {}
seeds = []
for path_prefix in path:
for seed in range(len(seeds)):
frequent_itemset = seeds[seed] + (path_prefix,)
seeds.append(frequent_itemset)
frequent_itemsets[frequent_itemset + suffix] = count
seeds.append((path_prefix,))
frequent_itemsets[(path_prefix,) + suffix] = count
return frequent_itemsets
def fp_tree_growth(fptree: FPTree, min_supp: int) -> Dict[Tuple[str], int]:
"""FP_growth algorithm to calculate all frequent itemsets satisfying the
minimum support constraint from a given fp tree.
Args:
fptree (FPTree): fp tree to obtain frequent itemsets from.
min_supp (int): Minimum support threshold as count.
Returns:
Dict[Tuple[str], int]: All frequent itemsets
"""
frequent_items = {}
def __fp_tree_growth(item_suffix: Tuple[str], fptree: FPTree, frequent_items: Dict[Tuple[str], int]):
item = item_suffix[0]
header_table = {}
count = fptree.get_sum_item_counts(item)
if count >= min_supp:
frequent_items[item_suffix] = count
pattern_base = conditional_pattern_base(
item, fptree, min_supp, header_table)
# empty set case
if len(pattern_base) == 0:
return
# single path case
if len(pattern_base) == 1:
path, count = next(iter(pattern_base.items()))
frequent_items.update(generate_patterns_single_path(
item_suffix, path, count))
return
conditional_tree = conditional_fp_tree(pattern_base, header_table)
for item in reversed(conditional_tree.header_table.keys()):
__fp_tree_growth((item,) + item_suffix,
conditional_tree, frequent_items)
for item in reversed(fptree.header_table.keys()):
__fp_tree_growth((item,), fptree, frequent_items)
return frequent_items | /rule_mining_algs-0.1.1-py3-none-any.whl/algs/fp_tree.py | 0.950457 | 0.482673 | fp_tree.py | pypi |
import pandas as pd
import numpy as np
from typing import Dict, Iterator, List, Tuple
from pandas import DataFrame
from algs.util import get_frequent_1_itemsets
from algs.hash_tree import HashTree
def apriori(dataframe: DataFrame, support_threshold: float = 0.005) -> DataFrame:
"""Calculate all frequent itemsets for the given transactions and support
threshold.
Args:
dataframe (DataFrame): All transactions stored in the dataframe. Needs to be one hot encoded.
support_threshold (float, optional): Min threshold used to prune candidate itemsets
Returns:
DataFrame: Dataframe where the first column contains a list of all items in the itemset and the second
one contains the support for that itemset.
"""
items = np.array(dataframe.columns)
all_sets = get_frequent_1_itemsets(items, dataframe, support_threshold)
frequent_k_itemsets = [
frequent_1_itemset for frequent_1_itemset in all_sets.keys()]
k = 1
while len(frequent_k_itemsets) != 0:
# Iterate over potential itemsets of length k and check whether they are frequent
hash_tree = HashTree()
for candidate_set in _generate_itemsets_by_join(frequent_k_itemsets, k):
if _is_candidate(frequent_k_itemsets, candidate_set):
hash_tree.add_itemset(candidate_set)
_count_transactions(dataframe, hash_tree, k)
frequent_k_itemsets = hash_tree.get_frequent_itemsets(
support_threshold, len(dataframe)
)
all_sets.update(frequent_k_itemsets)
frequent_k_itemsets = sorted(frequent_k_itemsets.keys())
k += 1
# Generate dataframe from all frequent itemsets and their support
df = pd.DataFrame(
all_sets.items(),
index=[i for i in range(len(all_sets))],
columns=["itemsets", "support"],
)
return df
def _generate_itemsets_by_join(
old_itemsets: List[Tuple[str]], k: int
) -> Iterator[Tuple[str]]:
"""Joins frequent k-1 itemsets to generate k itemsets.
It assumes the frequent k-1 itemsets are lexicographically ordered .
Args:
old_itemsets (List[Tule[str]]): List of itemsets of length k-1
k (int): The number of items that must match to join two frequent k-1 itemsets
Yields:
Iterator[Tuple[str]]: A candidate k itemset
"""
for i in range(len(old_itemsets)):
for j in range(i + 1, len(old_itemsets)):
skip = False
for l in range(k - 1):
if old_itemsets[i][l] != old_itemsets[j][l]:
skip = True
break
if not skip and old_itemsets[i][k - 1] < old_itemsets[j][k - 1]:
yield old_itemsets[i] + (old_itemsets[j][k - 1],)
def _is_candidate(old_itemsets: List[Tuple[str]], candidate_set: Tuple[str]) -> bool:
"""Checks whether there's any subset contained in the candidate_set, that isn't
contained within the old_itemsets. If that is the case the candidate set can not
be a frequent itemset and False is returned.
Args:
old_itemsets (List[Tuple[str]]): List of itemsets of length k
candidate_set (Tuple[str]): Candidate itemset with length k+1
Returns:
bool: True if all k-1 element subsets of candidate_set are contained within old_itemsets.
"""
# Joining two 1 frequent itemsets, every subset must be frequent
if len(candidate_set) == 2:
return True
for i in range(len(candidate_set)):
if not candidate_set[0:i] + candidate_set[i + 1:] in old_itemsets:
return False
return True
def _count_transactions(transactions: DataFrame, tree: HashTree, k: int) -> None:
"""Iterates over all transactions and uses them to traverse the hash tree. If a
leaf is encountered all itemsets at that leaf are compared against the transaction
and their count is incremented by 1.
Args:
transactions (DataFrame): All transactions
tree (HashTree): HashTree containing candidate itemsets
k (int): Length of candidate itemsets
"""
for idx, row in transactions.iterrows():
transaction = list(transactions.loc[idx:, list(row)])
tree.transaction_counting(transaction, 0, k + 1, dict())
def a_close(dataframe: DataFrame, support_threshold: float = 0.005) -> DataFrame:
"""Implementation of the a-close algorithm according to 'Discovering frequent closed itemsets
for association rules'.
Args:
dataframe (DataFrame): All transactions, one-hot encoded, columns are lexicographically sorted.
support_threshold (float, optional): Minimum support threshold. Defaults to 0.005.
Returns:
DataFrame: Generators, their frequent closed itemsets and support
"""
# Calculate frequent 1-generators
items = np.array(dataframe.columns)
generators = [get_frequent_1_itemsets(items, dataframe, support_threshold)]
current_generators = [
frequent_1_itemset for frequent_1_itemset in generators[0].keys()
]
closed_level = 0
k = 1
while len(current_generators) != 0:
# Build (i+1)-generators by combining frequent (i)-generators and count support
hash_tree = HashTree()
for candidate_set in _generate_itemsets_by_join(current_generators, k):
if _is_candidate(current_generators, candidate_set):
hash_tree.add_itemset(candidate_set)
_count_transactions(dataframe, hash_tree, k)
current_generators = hash_tree.get_frequent_itemsets(
support_threshold, len(dataframe)
)
# Remove generators having the same support as one of their i-subsets
current_generators, found = _remove_same_closure_as_subset(
current_generators, generators[k-1])
closed_level = k if found and closed_level == 0 else closed_level
generators.append(current_generators)
current_generators = sorted(current_generators.keys())
k += 1
# Calculate closure for all generators at index >= level
generators_and_closures = {}
for k_generators in generators[:closed_level-1]:
for k_generator, supp in k_generators.items():
generators_and_closures[k_generator] = (k_generator, supp)
if closed_level > 0:
generators_and_closures.update(
closure(dataframe, generators[closed_level-1:-1]))
# Generate dataframe from all generators their closed frequent itemsets and support
df = pd.DataFrame(
index=[i for i in range(len(generators_and_closures))],
columns=["generators", "closed_itemsets", "support"],
)
i = 0
for generator, closed in generators_and_closures.items():
df.loc[i, "generators"] = generator
df.loc[i, "closed_itemsets"] = closed[0]
df.loc[i, "support"] = closed[1]
i += 1
return df
def _remove_same_closure_as_subset(
current_generators: Dict[Tuple[str], float], all_generators: Dict[Tuple[str], float]
) -> Tuple[Dict[Tuple[str], float], bool]:
"""Prunes all (i+1)-generators, that have a subset i-generator with the same support.
This implies their closure is the same and thus the (i+1)-generator is redundant.
Args:
current_generators (Dict[Tuple[str], float]): (i+1)-generators
all_generators (Dict[Tuple[str], float]): i-generators
Returns:
Tuple[Dict[Tuple[str], float], bool]: Dictonary with all redundant generators removed and
a flag indicating, whether a generator happened to be redundant.
"""
same_closure = False
pruned_generators = {}
for candidate_set, supp in current_generators.items():
valid = True
for i in range(len(candidate_set)):
i_generator = candidate_set[0:i] + candidate_set[i + 1:]
if all_generators[i_generator] == supp:
same_closure = True
valid = False
break
if valid:
pruned_generators[candidate_set] = supp
return pruned_generators, same_closure
def closure(transactions: DataFrame, unclosed_generators: List[Dict[Tuple[str], float]]) -> Dict[Tuple[str], float]:
"""Calculates the galois closure operator h. It receives a list of potentially unclosed generators and
updates the closure for each generator by building the intersection with f(o) where o is a transaction.
Args:
transactions (DataFrame): All transactions
unclosed_generators (List[Dict[Tuple[str], float]]): Generators for i >= level, having at least one unclosed generator
Returns:
Dict[Tuple[str], Tuple[str,float]]: The generator and their closure for all itemsets in unclosed_generators.
"""
fc = {generator: [set(), supp]
for i_generators in unclosed_generators for generator, supp in i_generators.items()}
for idx, row in transactions.iterrows():
transaction = list(transactions.loc[idx:, list(row)])
for p in fc.keys():
if set(p).issubset(transaction):
if len(fc[p][0]) == 0:
fc[p][0] = set(transaction)
else:
fc[p][0] = fc[p][0].intersection(set(transaction))
return {generator: (tuple(sorted(closure[0])), closure[1]) for generator, closure in fc.items()} | /rule_mining_algs-0.1.1-py3-none-any.whl/algs/apriori.py | 0.879134 | 0.574484 | apriori.py | pypi |
from typing import Dict, Tuple
import numpy as np
import pandas as pd
from pandas import DataFrame
from algs.apriori import _count_transactions, _generate_itemsets_by_join, _is_candidate
from algs.hash_tree import HashTree
from algs.util import get_frequent_1_itemsets
def hclique(dataframe: DataFrame, hconf_threshold: float = 0.5, support_threshold: float = 0.0) -> DataFrame:
"""Implements the hclique-miner algorithm from the 'Mining Strong Affinity Association Patterns in
Data Sets with Skewed Support Distribution' paper in order to determine all hyperclique patterns.
(The cross-support property of the h-confidence is not used to prune cross-support itemsets before they
are generated.)
Args:
dataframe (DataFrame): Transactional database
hconf_threshold (float, optional): Minimum h-confidence threshold. Defaults to 0.5.
support_threshold (float, optional): Minimum support threshold. Defaults to 0.0.
Returns:
DataFrame: All hyperclique patterns, satisfying min support and min h-confidence constraints, with
their support values.
"""
items = np.array(dataframe.columns)
all_sets = get_frequent_1_itemsets(
items, dataframe, support_threshold)
frequent_items = {item[0]: support for item, support in all_sets.items()}
frequent_k_itemsets = [
frequent_1_itemset for frequent_1_itemset in all_sets.keys()]
k = 1
while len(frequent_k_itemsets) != 0:
hash_tree = HashTree(max_size=570)
for candidate_set in _generate_itemsets_by_join(frequent_k_itemsets, k):
# Prune wrt. antimonotone property of support/h-conf and cross-support upper bound of h-conf
if _is_candidate(frequent_k_itemsets, candidate_set) and _prune_by_upper_bound(hconf_threshold, candidate_set, frequent_items):
hash_tree.add_itemset(candidate_set)
_count_transactions(dataframe, hash_tree, k)
frequent_k_itemsets = hash_tree.get_frequent_itemsets(
support_threshold, len(dataframe)
)
frequent_k_itemsets = _prune_by_hconf_threshold(
frequent_k_itemsets, frequent_items, hconf_threshold)
all_sets.update(frequent_k_itemsets)
frequent_k_itemsets = sorted(frequent_k_itemsets.keys())
k += 1
# Generate dataframe from all frequent itemsets and their support
df = pd.DataFrame(
all_sets.items(),
index=[i for i in range(len(all_sets))],
columns=["itemsets", "support"],
)
return df
def _prune_by_upper_bound(hconf_threshold: float, pattern: Tuple[str], items: Dict[str, float]) -> bool:
"""Prunes the candidate pattern, based on the cross-support, which is an upper bound on the
h-confidence.
Args:
hconf_threshold (float): Minimum h-confidence threshold
pattern (Tuple[str]): Candidate pattern
items (Dict[str, float]): Frequent 1 items
Returns:
bool: Returns true when the item is not a cross-support pattern else false.
"""
min_item = 1
max_item = -1
for item in pattern:
support = items.get(item)
min_item = min(support, min_item)
max_item = max(support, max_item)
return min_item / max_item >= hconf_threshold
def _prune_by_hconf_threshold(frequent_k_itemsets: Dict[Tuple[str], float], items: Dict[str, float], hconf_threshold: float) -> Dict[Tuple[str], float]:
"""Removes any patterns whose h-confidence is smaller than the given h-confidence threshold.
Args:
frequent_k_itemsets (Dict[Tuple[str], float]): Frequent k itemsets, satisfying the min support constraint
items (Dict[str, float]): Frequent 1 itemsets
hconf_threshold (float): h-confidence threshold
Returns:
Dict[Tuple[str], float]: Patterns satisfying the threshold.
"""
result = {}
for itemset, supp in frequent_k_itemsets.items():
max_item = -1
for item in itemset:
max_item = max(max_item, items.get(item))
if supp / max_item >= hconf_threshold:
result[itemset] = supp
return result | /rule_mining_algs-0.1.1-py3-none-any.whl/algs/hclique.py | 0.93161 | 0.567128 | hclique.py | pypi |
from math import ceil, floor
from typing import Any, Dict, Iterator, Set, Tuple
import numpy as np
import pandas as pd
from mlxtend.preprocessing import TransactionEncoder
from pandas import DataFrame
from sklearn.cluster import Birch
def partition_intervals(
num_intervals: int, attribute: str, db: DataFrame, equi_depth: bool
) -> pd.Series:
"""Discretizes a numerical attribute into num_intervals of equal size/ width.
Args:
num_intervals (int): Number of intervals for this attribute
attribute (str): Name of the attribute
db (DataFrame): Database
equi_depth (bool): Equi-depth discretization else equi-width
Returns:
pd.Series : Series where every ajacent intervals are encoded as consecutive integers.
The order of the intervals is reflected in the integers.
"""
if equi_depth:
# Determine the new number of labels
_, y = pd.qcut(
x=db[attribute],
q=num_intervals,
retbins=True,
duplicates="drop")
return pd.qcut(
x=db[attribute],
q=num_intervals,
labels=[i for i in range(len(y)-1)],
retbins=True,
duplicates="drop"
)
return pd.cut(
x=db[attribute],
bins=num_intervals,
labels=[i for i in range(num_intervals)],
include_lowest=True,
retbins=True,
)
def partition_categorical(attribute: str, db: DataFrame) -> Dict[int, Any]:
"""Maps the given categorical attribute to consecutive integers. Can also be used for
numerical attributes.
Args:
attribute (str): Name of the attribute
db (DataFrame): Database
Returns:
Dict[int, Any]: Mapping from category encoded as int to its categorical value
"""
mapping = dict(zip(db[attribute].astype(
"category").cat.codes, db[attribute]))
return mapping
def discretize_values(
db: DataFrame, discretization: Dict[str, int], equi_depth: bool,
) -> Tuple[Dict[str, Dict[int, Any]], DataFrame]:
"""Maps the numerical and quantititative attributes to integers as described in 'Mining Quantitative Association
Rules in Large Relational Tables'.
s:
db (DataFrame): Original Database
discretization (Dict[str, int]): Name of the attribute (pandas column name) and the number of intervals
for numerical attributes or 0 for categorical attributes and numerical attributes (no intervals)
equi_depth (bool): Equi-depth discretization else equi-width.
Returns:
Tuple[Dict[str,Dict[int, Any]], DataFrame]: Encoded database and the mapping from the consecutive integers back to
the interval / value for each attribute.
"""
attribute_mappings = {}
for attribute, ival in discretization.items():
if ival == 0:
attribute_mappings[attribute] = partition_categorical(
attribute, db)
db[attribute].replace(
to_replace=dict(
zip(db[attribute], db[attribute].astype(
"category").cat.codes)
),
inplace=True,
)
else:
x, y = partition_intervals(ival, attribute, db, equi_depth)
int_val = pd.api.types.is_integer_dtype(db[attribute])
attribute_mappings[attribute] = {
i: (
ceil(y[i]) if int_val else y[i],
floor(y[i + 1]) if int_val else y[i + 1],
)
for i in range(len(y) - 1)
}
db[attribute] = x.astype("int")
return attribute_mappings, db
def static_discretization(db: DataFrame, discretization: Dict[str, int], equi_depth: bool = False) -> DataFrame:
"""Discretizes all attributes in the dataframe. It thereby reduces the problem of mining
quantitative itemsets to the problem of mining itemsets over binary data.
Args:
db (DataFrame): Dataframe to be transformed
discretization (Dict[str, int]): Name of the attribute (pandas column name) and the number of intervals
equi_depth (bool): Equi-depth discretization else equi-width (Defaults to False).
Returns:
DataFrame: DataFrame, where all columns correspond to binary attributes
"""
mappings, encoded_db = discretize_values(
db.copy(deep=True), discretization, equi_depth)
return _static_discretization(encoded_db, mappings)
def _static_discretization(
encoded_db: DataFrame, mapped_vals: Dict[str, Dict[int, Any]]
) -> DataFrame:
"""Discretizes all attributes in the dataframe.
Args:
encoded_db (DataFrame): Transformed database, where each value / interval is represented by an integer
mapped_vals (Dict[str, Dict[int, Any]]): Stores the information of the value transformations for each attribute
Returns:
DataFrame: DataFrame, where all columns correspond to binary attributes
"""
rows = []
for idx, row in encoded_db.iterrows():
row_entry = []
attributes = row.index.array
for attribute in attributes:
name = ""
val = mapped_vals[attribute][row[attribute]]
if type(val) == tuple:
name = f"{attribute} = {val[0]}..{val[1]}"
else:
name = f"{attribute} = {val}"
row_entry.append(name)
rows.append(row_entry)
te = TransactionEncoder()
te_ary = te.fit_transform(rows)
df = pd.DataFrame(te_ary, columns=te.columns_)
return df
def cluster_interval_data(db: DataFrame, attr_threshold: Dict[Tuple[str], float], num_clusters: bool = False) -> DataFrame:
"""Clusters interval data, using the birch clustering algorithm as described in
'Association Rules over Interval Data'. The threshold is the upper bound of the
radius of subclusters. Further the clusters are described by their smallest bounding box.
Args:
db (DataFrame): Dataset, to mine quantitative association rules from
attr_threshold (Dict[Tuple[str], float]): Maps attribute (sets) to their radius threshold, which
in turn determines the cluster quality. If num_clusters is set, the determined number of
clusters is generated instead.
num_clusters (bool): If set to True the thresholds are interpreted as number of clusters.
Returns:
DataFrame: One column for each attribute, value pair of all attributes (after clustering).
In the case of clusters the values are bounding boxes to represent the cluster.
"""
data = db.copy(deep=True)
names = [name for tpl in attr_threshold.keys() for name in tpl]
discretization = {name: 0 for name in data.columns if name not in names}
for attributes, radius in attr_threshold.items():
# Build name of the attribute (can be combined e.g. x,y)
name = "{" + attributes.__str__()[1:-1].replace("'", "") + "}"
name = name.replace(",", "") if len(attributes) == 1 else name
discretization[name] = 0
attributes = list(attributes)
# Use birch clustering alg and calculate bounding boxes to represent clusters
brc = Birch(n_clusters=int(radius) if num_clusters else None,
threshold=0.5 if num_clusters else radius, copy=True)
data[name] = brc.fit_predict(data[attributes])
mins = data.groupby(name).min(numeric_only=True)[
attributes].to_dict("tight")
maxs = data.groupby(name).max(numeric_only=True)[
attributes].to_dict("tight")
# Map the cluster id to a name representing the cluster
replace_dict = {}
idx = 0
for d1, d2 in zip(mins["data"], maxs["data"]):
attr_name = d1.__str__() + " x " + d2.__str__()
replace_dict[idx] = attr_name
idx += 1
data[name].replace(replace_dict, inplace=True)
data.drop(labels=attributes, axis=1, inplace=True)
return static_discretization(data, discretization)
class Item:
"""Represents an item, where upper and lower are the same in case of a categorical attribute
and lower <= upper in case of a numerical attribute with interval values.
"""
def __init__(self, name: str, lower: int, upper: int) -> None:
self.name = name
self.lower = lower
self.upper = upper
def __lt__(self, __o: object) -> bool:
return self.name < __o.name
def __eq__(self, __o: object) -> bool:
return (
self.name == __o.name
and self.lower == __o.lower
and self.upper == __o.upper
)
def is_generalization(self, other: object) -> bool:
return other.lower >= self.lower and other.upper <= self.upper
def is_specialization(self, other: object) -> bool:
return other.lower <= self.lower and other.upper >= self.upper
def __sub__(self, __o: object) -> object:
if (
__o.lower == self.lower and __o.upper == self.upper
): # Same interval -> categorical
return __o
if (
__o.lower > self.lower and __o.upper < self.upper
): # Inclusion relation would cause a split in 2 non-adjecent subintervals
return None
if self.lower == __o.lower: # [5,8] - [5,6] = [6,8]
return Item(self.name, __o.upper, self.upper)
else: # [5,8] - [7,8] = [5,7]
return Item(self.name, self.lower, __o.lower)
def __hash__(self) -> int:
return hash(self.name)
def __repr__(self) -> str:
return f"<{self.name}, {self.lower}, {self.upper}>"
def count_support(
db: DataFrame, items: Dict[Tuple[Item], int], minsupp: float, drop: bool = True
) -> Dict[Tuple[Item], int]:
"""Counts the support for the given itemsets.
Args:
db (DataFrame): Encoded Database
items (Dict[Tuple[Item], int]): Candidate itemsets with support count 0
minsupp (float): minimum support threshold
drop (bool, optional): Deletes items not having minimal support when set to true. Defaults to True.
Returns:
Dict[Tuple[Item], int]: Itemsets with their support
"""
for its in items.keys():
conditions = [(db[it.name] >= it.lower) & (
db[it.name] <= it.upper) for it in its]
mask = np.column_stack(conditions).all(axis=1)
items[its] = mask.sum()
if drop:
return {item: supp for item, supp in items.items() if supp / len(db) >= minsupp}
else:
return items
def find_frequent_items(
mappings: Dict[str, Dict[int, Any]],
db: DataFrame,
discretizations: Dict[str, int],
min_supp: float,
max_supp: float,
) -> Dict[Tuple[Item], int]:
"""Generates all frequent items given the encoded database and the mappings.
Args:
mappings (Dict[str, Dict[int, Any]]): Attributes to their integer mapping
db (DataFrame): Encoded Database
discretizations (Dict[str, int]): Name of attributes to Number intervals
min_supp (float): Minimum support for frequent itemsets
max_supp (float): Maximum support for limiting interval merging
Returns:
Dict[Tuple[Item], int]: All frequent items
"""
def merge_intervals(
itemsets: Dict[Tuple[Item], int], max_upper: int, min_lower: int
) -> Dict[Tuple[Item], int]:
"""Obnoxious function to merge adjacent intervals.
Args:
itemsets (Dict[Tuple[Item], int]): Quantitative Attributes and their support
max_upper (int): Max integer of interval to integer mapping
min_lower (int): Min integer of interval to integer mapping
Returns:
Dict[Tuple[Item], int]: All items representing intervals, that satisfy min support
"""
intervals = {}
seeds = {}
for item, supp in itemsets.items():
norm_supp = supp / len(db)
if norm_supp >= min_supp:
intervals[item] = supp
if norm_supp < max_supp:
seeds[item] = supp
while seeds:
candidates = {}
for item, supp in seeds.items():
norm = supp / len(db)
if norm >= min_supp:
intervals[item] = supp
if norm < max_supp:
lower, upper = item[0].lower, item[0].upper
if lower > min_lower:
it = Item(item[0].name, lower - 1, upper)
for item, sup in itemsets.items():
if item[0].upper == lower - 1:
val = supp + sup
if candidates.get((it,)) == None:
candidates[(it,)] = val
else:
candidates[(it,)] = max(
candidates[(it,)], val)
if upper < max_upper:
it = Item(item[0].name, lower, upper + 1)
for item, sup in itemsets.items():
if item[0].lower == upper + 1:
val = supp + sup
if candidates.get((it,)) == None:
candidates[(it,)] = val
else:
candidates[(it,)] = max(
candidates[(it,)], val)
seeds = candidates
return intervals
frequent_items = {}
for attribute, num_intervals in discretizations.items():
# Categorical / numerical attribute -> no intervals
itemsets = {
(Item(attribute, val, val),): 0 for val in mappings[attribute].keys()
}
itemsets = count_support(db, itemsets, min_supp, num_intervals == 0)
if num_intervals != 0:
itemsets = merge_intervals(
itemsets,
max(mappings[attribute].keys()),
min(mappings[attribute].keys()),
)
frequent_items.update(itemsets)
return frequent_items
def _prune_by_r_interest(
frequent_items: Dict[Tuple[Item], int],
discretizations: Dict[str, int],
R: float,
n: int,
) -> Dict[Tuple[Item], int]:
"""Prunes all quantitative attributes with support/n > 1/R (Lemma 5)
Args:
frequent_items (Dict[Tuple[Item], int]): Frequent items
discretizations (Dict[str, int]): Name of Attributes to num intervals
R (float): R-Interest
n (int): Number of entries in the db
Returns:
Dict[Tuple[Item], int]: All items whose fractional support does not exceed 1/R
"""
if R == 0:
return frequent_items
return {
item: supp
for item, supp in frequent_items.items()
if discretizations[item[0].name] == 0 or supp / n <= 1 / R
}
def get_generalizations_specializations(
frequent_itemsets: Dict[Tuple[Item], int], itemset: Tuple[Item]
) -> Dict[int, Dict[Tuple[Item], int]]:
"""Determines all generalizations and specializations of the given itemset.
Args:
frequent_itemsets (Dict[Tuple[Item], int]): All frequent itemsets.
itemset (Tuple[Item]): Itemset, whose generalizations and specializations are to be determined.
Returns:
Dict[int, Dict[Tuple[Item], int]]: The key 0 maps to all specializations of the itemset and the key 1
gives all generalizations of the itemset.
"""
result = {0: {}, 1: {}}
for items, supp in frequent_itemsets.items():
if len(items) != len(itemset): # Attributes(X) != Attributes(X')
continue
found_spec = 0
found_gen = 0
attrs = True
for i in range(len(items)):
# Attributes(X) != Attributes(X')
if items[i].name != itemset[i].name:
attrs = False
break
if (
items[i] == itemset[i]
): # Having the same boundaries, implies a categorical attribute
continue
elif items[i].lower == items[i].upper and itemset[i].lower == itemset[i].upper and items[i].lower != itemset[i].lower:
attrs = False
break
elif itemset[i].is_generalization(items[i]):
found_spec = 1
elif itemset[i].is_specialization(items[i]):
found_gen = 1
# Neither a generalization nor a specialization
else:
attrs = False
break
if found_gen + found_spec != 1 or not attrs:
continue
elif found_spec:
result[0][items] = supp
else:
result[1][items] = supp
return result
def _get_subintervals(
db: DataFrame, specializations: Dict[Tuple[Item], int], itemset: Tuple[Item]
) -> Tuple[Set[Tuple[Item]], Dict[Tuple[Item], int]]:
"""Calculates the difference of an itemset to all its specializations.
Args:
db (DataFrame): Transformed Database
specializations (Dict[Tuple[Item], int]): All specializations of the given itemset
itemset (Tuple[Item]): Itemset to substract a specialization from
Returns:
Tuple[Set[Tuple[Item]], Dict[Tuple[Item], int]]: Itemsets generated from the difference,
all individual items that were generated from the difference and their support aswell as
the itemsets themselves.
"""
new_itemsets = set() # Holds X-X'
new_items = {} # Holds the items that are created by X-X'
for items in specializations.keys():
new_itemset = []
for i in range(len(items)):
sub_interval = itemset[i] - items[i]
if sub_interval is None:
break
else:
new_items.update(
{(sub_interval,): 0}
) # We need the support for individual elements
new_itemset.append(sub_interval)
if len(new_itemset) == len(itemset):
new_itemsets.add(tuple(new_itemset))
new_items.update(
{tuple(new_itemset): 0}
) # We need the support for all X-X' aswell
new_items = count_support(db, new_items, 0.0, False)
return new_itemsets, new_items
def _is_specialization_interesting(
specializations: Set[Tuple[Item]],
generalization: Tuple[Item],
new_items: Dict[Tuple[Item], int],
frequent_itemsets: Dict[Tuple[Item], int],
R: float,
gen_supp: float,
n: int,
) -> bool:
"""Determine whether the difference (X-X') from the itemset to any of its specializations
is r-interesting wrt. the generalization of the itemset.
Args:
specializations (Set[Tuple[Item]]): All itemsets of the form: X-X'
generalization (Tuple[Item]): The generalization of the itemset
new_items (Dict[Tuple[Item], int]): Items/Itemsets from (X-X') with support information
frequent_itemsets (Dict[Tuple[Item], int]): All mined frequent itemsets
R (float): Interest level
gen_supp (float): Support for the generalization
n (int): Number of transactions in the database
Returns:
bool: False if there's any specialization of X' st. X-X' is not r-interesting.
"""
if len(specializations) == 0:
return True
for specialization in specializations:
exp_supp = gen_supp
for i in range(len(specialization)):
exp_supp *= (
new_items[(specialization[i],)]
/ frequent_itemsets[(generalization[i],)]
)
if (new_items[specialization] / n / exp_supp) < R:
return False
return True
def remove_r_uninteresting_itemsets(
db: DataFrame, frequent_itemsets: Dict[Tuple[Item], int], R: float
) -> Tuple[Dict[Tuple[Item], int], Dict[Tuple[Item], int]]:
"""Uses the definition of R-interestingness of itemsets in the context of
quantitative association rules to prune itemsets, that do not fullfill it.
Args:
db (DataFrame): Transformed Database
frequent_itemsets (Dict[Tuple[Item], int]): All mined frequent itemsets
R (float): Interest Level
Returns:
Tuple[Dict[Tuple[Item], int], Dict[Tuple[Item], int]]: Position[0]: Frequent and R-interesting itemsets.
Position[1]: Itemsets that are not R-interesting.
"""
def _is_r_interesting(generalization: Tuple[Item], itemset: Tuple[Item]) -> bool:
"""Indicates whether the support of the itemset is r times the expected support
given its generalization.
Args:
generalization (Tuple[Item]): Generalization of the itemset
itemset (Tuple[Item]): Potentially r-interesting itemset
Returns:
bool: True if the itemset is r-interesting wrt. to its generalization else False
"""
n = len(db)
exp_supp = frequent_itemsets[generalization] / n
for i in range(len(generalization)):
exp_supp *= (
frequent_itemsets[(itemset[i],)]
/ frequent_itemsets[(generalization[i],)]
)
return (frequent_itemsets[itemset] / n / exp_supp) >= R
n = len(db)
r_interesting_itemsets = {}
elements_to_remove = {}
for item, support in frequent_itemsets.items():
partial_order = get_generalizations_specializations(
frequent_itemsets, item)
interesting = True
sub_intervals, sub_items = _get_subintervals(
db, partial_order[0], item)
for gen, supp in partial_order[1].items():
if not _is_r_interesting(gen, item) or not _is_specialization_interesting(
sub_intervals, gen, sub_items, frequent_itemsets, R, supp / n, n
):
interesting = False
elements_to_remove[item] = support
break
if interesting:
r_interesting_itemsets[item] = support
return r_interesting_itemsets, elements_to_remove
def _generate_itemsets_by_join(
old_itemsets: Dict[Tuple[Item], int], k: int
) -> Dict[Tuple[Item], int]:
"""Joins frequent k-1 itemsets to generate k itemsets.
It assumes the frequent k-1 itemsets are lexicographically ordered .
Args:
old_itemsets (Dict[Tule[Item], int]): Frequent k-1 itemsets
k (int): The number of items that must match to join two frequent k-1 itemsets
Return:
Dict[Tuple[Item], int]: Candidate k itemsets with support count 0
"""
new_candidates = {}
for itemset in old_itemsets.keys():
for other in old_itemsets.keys():
if all(itemset[i] == other[i] for i in range(k-1)) and itemset[k-1] < other[k-1]:
new_candidates[
itemset + (
Item(other[k - 1].name, other[k - 1].lower,
other[k - 1].upper),
)] = 0
return new_candidates
def _downward_closure(old_itemsets: Dict[Tuple[Item], int], candidates: Dict[Tuple[Item], int]) -> Dict[Tuple[Item], int]:
"""Uses the downward closure property of support to prune any k-itemsets, which do
have at least one k-1 itemset, which is not frequent.
Args:
old_itemsets (Dict[Tuple[Item], int]): Frequenkt k-1 itemsets
candidates (Dict[Tuple[Item], int]): Potential k itemsets
Returns:
Dict[Tuple[Item], int]: Pruned potential k itemsets
"""
result = {}
for candidate in candidates:
found = all(candidate[0:i] + candidate[i + 1:]
in old_itemsets for i in range(len(candidate)))
if found:
result[candidate] = 0
return result
def quantitative_itemsets(
db: DataFrame,
discretization: Dict[str, int],
minsupp: float = 0.05,
maxsupp: float = 0.1,
R: float = 0.0,
equi_depth: bool = False,
) -> DataFrame:
""" Provides an algorithm similar to the one introduced in
'Mining Quantitative Association Rules in Large Relational Tables'.
Optimizations for support counting are omitted, however.
Args:
db (DataFrame): Data mining context
discretization (Dict[str, int]): Attributes and how they should be discretized. 0 indicates no
merging of intervals. Any number greater than 0 will yield the amount of intervals for this attribute, for
the initial partitioning.
minsupp (float, optional): Min support threshold. Defaults to 0.05.
maxsupp (float, optional): Max support threshold for interval merging. Defaults to 0.1.
R (float, optional): R Interest Level. Defaults to 0.0. If left at 0.0 no R-interestingess pruning
occurs.
equi_depth (bool, optional): Equi-depth intervals when True else equi-width intervals. Defaults to False.
Returns:
DataFrame: All quantitative itemsets satisfying the given constraints.
"""
mappings, encoded_db = discretize_values(
db.copy(deep=True), discretization, equi_depth)
frequent_items = find_frequent_items(
mappings, encoded_db, discretization, minsupp, maxsupp
)
frequent_items = _prune_by_r_interest(
frequent_items, discretization, R, len(db))
frequent_k_itemsets = frequent_items.copy()
k = 1
to_remove = {}
while len(frequent_k_itemsets) != 0:
candidates = _generate_itemsets_by_join(frequent_k_itemsets, k)
candidates = _downward_closure(frequent_k_itemsets, candidates)
frequent_k_itemsets = count_support(encoded_db, candidates, minsupp)
frequent_items.update(frequent_k_itemsets)
k += 1
if R != 0:
frequent_items, to_remove = remove_r_uninteresting_itemsets(
encoded_db, frequent_items, R)
itemsets = []
for itemset, support in {**frequent_items, **to_remove}.items():
items = []
for item in itemset:
vals = mappings[item.name]
lower = vals[item.lower]
upper = vals[item.upper]
item_value = f"{lower[0]}..{upper[1]}" if discretization[item.name] else f"{lower}"
items.append(f"{item.name} = {item_value}")
itemsets.append({"itemsets": tuple(
items), "support": support / len(db), "ignore": itemset in to_remove})
return pd.DataFrame(itemsets) | /rule_mining_algs-0.1.1-py3-none-any.whl/algs/quantitative.py | 0.942242 | 0.560463 | quantitative.py | pypi |
from typing import Dict, List, Tuple
class HashTree:
def __init__(self, depth: int = 0, leaf: bool = True, max_size: int = 57) -> None:
self.children = {}
self.itemsets = {}
self.leaf = leaf
self.max_size = max_size
self.depth = depth
def add_itemset(self, itemset: Tuple[str]) -> None:
"""Adds the given itemset to the hash tree. It is assumed that no duplicates occur
and no differently sized itemsets are inserted.
When max_size is exceeded at a leaf node, that node is converted to an inner node,
unless its already at depth equal to the length of the itemset.
Args:
itemset (Tuple[str]): The itemset to add to the tree.
"""
if (self.leaf and self.max_size > len(self.itemsets)) or self.depth == len(
itemset
):
self.itemsets[itemset] = 0
else:
# Hash to some value to navigate to the child
if not self.leaf:
hash_value = self.hash_func(itemset, self.depth)
if self.children.get(hash_value) == None:
self.children[hash_value] = HashTree(
self.depth + 1, max_size=self.max_size
)
self.children[hash_value].add_itemset(itemset)
# Make the leaf an inner node
else:
self.itemsets[itemset] = 0
for items in self.itemsets.keys():
hash_value = self.hash_func(items, self.depth)
if self.children.get(hash_value) == None:
self.children[hash_value] = HashTree(
self.depth + 1, max_size=self.max_size
)
self.children[hash_value].add_itemset(items)
self.itemsets = {}
self.leaf = False
def hash_func(self, itemset: Tuple[str], depth: int) -> int:
"""Hash function used for hashing items in itemsets.
Args:
itemset (Tuple[str]): The itemset to hash
depth (int): The position of the item to apply the hash function to
Returns:
int: Hash value of the hashed item.
"""
return sum(ord(item) for item in itemset[depth]) % 7
def transaction_counting(
self,
transaction: List[str],
lower_boundary: int,
k: int,
visited: Dict["HashTree", bool],
) -> None:
"""Traverses the hash tree, given a transaction. The transaction is
recursively hashed. Upon encountering a leaf the transaction is matched
against all stored itemsets. If any of these are a subset of the transaction
their support count is increase by one.
Args:
transaction (List[str]): Transaction to match against candidates
lower_boundary (int): Index of the item in the transaction that is to be hashed
k (int): Length of itemsets
visited (Dict[HashTree]): Stores the already visited leaves of the tree, as to not double down
on counting the same itemset for one transaction.
"""
if self.leaf:
if visited.get(self) != None:
return
for itemset in self.itemsets.keys():
if set(itemset).issubset(transaction):
self.itemsets[itemset] += 1
visited[self] = True
else:
for i in range(lower_boundary, len(transaction) - k + self.depth + 1):
hash_value = self.hash_func(transaction, i)
child = self.children.get(hash_value)
if child:
child.transaction_counting(transaction, i + 1, k, visited)
def get_frequent_itemsets(
self, min_support: float, transaction_count: int
) -> Dict[Tuple[str], float]:
"""Finds all itemsets in the tree, whose count / len(transactions) >= min_support and
returns them.
Args:
min_support (float): Minimum support
transaction_count (int): Number of transactions in the database
Returns:
Dict[Tuple[str], float]: Dictionary containing pairs of itemsets, satisfying the minimum
support constraint and their support.
"""
if self.leaf:
return {
itemset: support / transaction_count
for itemset, support in self.itemsets.items()
if support / transaction_count >= min_support
}
else:
itemsets = {}
for child_node in self.children.values():
itemsets.update(
child_node.get_frequent_itemsets(
min_support, transaction_count)
)
return itemsets
def get_all_itemsets(self) -> Dict[Tuple[str], int]:
"""Returns all stored itemsets and their counts.
Note: This method should only be used for testing
Returns:
Dict[Tuple[str], int]: Dictionary with itemset, count pairs.
"""
if self.leaf:
return self.itemsets
else:
result = {}
for child in self.children.values():
result.update(child.get_all_itemsets())
return result
def number_items(self) -> int:
"""Returns the number of itemsets stored in the tree
Returns:
int: Number of itemsets
"""
if self.leaf:
return len(self.itemsets)
items = 0
for child in self.children.values():
items += child.number_items()
return items | /rule_mining_algs-0.1.1-py3-none-any.whl/algs/hash_tree.py | 0.935391 | 0.594963 | hash_tree.py | pypi |
from typing import Any, Callable, Dict
from algs.apriori import a_close
from algs.fp_tree import fp_growth
from algs.gar import gar
from algs.gar_plus import gar_plus
from algs.hclique import hclique
from algs.quantitative import quantitative_itemsets
from algs.rule_gen import generate_rules, minimal_non_redundant_rules
import pandas as pd
class NoMiningAlgorithmException(Exception):
pass
class WrongArgumentException(Exception):
pass
class NotAValidCallableException(Exception):
pass
class Model:
"""Sets up a pipeline to transform the data a set of association rules.
"""
def __init__(
self, transformer: Callable, itemset_miner: Callable, rule_miner: Callable
) -> None:
if not itemset_miner or not callable(itemset_miner):
raise NoMiningAlgorithmException(
"Need to specify an algorithm for mining frequent itemsets."
)
if not rule_miner or not callable(rule_miner):
raise NoMiningAlgorithmException(
"Need to specify an algorithm for mining rules."
)
self.transformer = transformer
self.itemset_miner = itemset_miner
self.rule_miner = rule_miner
if self.transformer:
self.args = {
self.transformer: {},
self.itemset_miner: {},
self.rule_miner: {},
}
else:
self.args = {self.itemset_miner: {}, self.rule_miner: {}}
def set_args(self, func: Callable, args: Dict[str, Any]) -> None:
"""Associates with the function which was passed in the constructor
all paramters.
Args:
func (Callable): Function that is executed at some stage of the model
args (Dict[str, Any]): Dictionary mapping from argument names to values that
will be passed to the arguments having the same name.
"""
if self.args.get(func) == None:
raise NotAValidCallableException(
"func arg must be a function that's been set in the constructor.")
names = func.__code__.co_varnames[:func.__code__.co_argcount]
for name in args.keys():
if name not in names:
raise WrongArgumentException(
f"{func.__name__} does not have an argument named {name}")
self.args[func] = args
def run(self, data: pd.DataFrame) -> pd.DataFrame:
""" Transforms and mines the given data, using the stored functions and arguments.
Args:
data (pd.DataFrame): Data to be mined
Returns:
pd.DataFrame: Resulting association rules
"""
if self.transformer:
data = self.transformer(data, **self.args[self.transformer])
itemsets = self.itemset_miner(data, **self.args[self.itemset_miner])
return self.rule_miner(itemsets, **self.args[self.rule_miner])
class StandardMiner(Model):
"""Uses the fp_growth algorithm to mine frequent itemsets.
"""
def __init__(self, transformer: Callable = None, gen_rules: Callable = generate_rules) -> None:
super().__init__(transformer, fp_growth, gen_rules)
class HyperCliqueMiner(Model):
"""Uses the hyperclique miner for mining frequent itemsets.
"""
def __init__(self, transformer: Callable = None, gen_rules: Callable = generate_rules) -> None:
super().__init__(transformer, hclique, gen_rules)
class QuantitativeMiner(Model):
"""Uses the quantitative miner with dynamic interval boundaries.
"""
def __init__(self, gen_rules: Callable = generate_rules) -> None:
super().__init__(None, quantitative_itemsets, gen_rules)
class MinimalNonRedudantMiner(Model):
"""Determines frequent closed itemsets and then minimal non redundant rules.
"""
def __init__(self, transformer: Callable = None) -> None:
super().__init__(transformer, a_close, minimal_non_redundant_rules)
class GeneticAlgorithmMiner(Model):
"""Uses a genetic algorithm to discover itemsets.
"""
def __init__(self) -> None:
super().__init__(None, gar, generate_rules)
class GarPlusMiner(Model):
"""Uses a the gar-plus algorithm to discover itemsets.
"""
def __init__(self) -> None:
super().__init__(None, gar_plus, lambda x: x) | /rule_mining_algs-0.1.1-py3-none-any.whl/algs/models.py | 0.91366 | 0.32536 | models.py | pypi |
class Rule34Post:
"""
The data structure for images on rule34. By default, all items are none,
they will only be something else if rule34.xxx specifies a value.
if ``initialised`` is False, that means somehow this object wasn't initialised properly, and you should discard it
"""
initialised = False # If this is false, the post data isn't complete for whatever reason, dont use it
# The image's data
height = None # Image dimension height
width = None # Image dimension width
score = None # The image's user determined rating
file_url = None # The image URL
id = None # The id generated by rule34 for the image
tags = None # All the tags associated with this image
parent_id = None # If this post is a child, this will show the ID of its parent
has_children = None # Is this post a parent?
has_comments = None # Are there comments on this post?
has_notes = None # Are there notes on this post?
created_at = None # When the post was posted, funnily enough
change = None # Not sure what this is used for, but all posts have it. If you know, leave an issue telling me
md5 = None # The MD5 hash of the post, i have no idea why its necessary to expose, but rule34.xxx generates it
creator_ID = None # The post author's ID
rating = None # The rating of the post, pretty much always "e", ie Explicit
status = None # Not sure what this is used for, but all posts have it. If you know, leave an issue telling me
source = None # The source of the image, if listed
# SAMPLE VERSION - These are smaller images, saving some data, if necessary
sample_url = None # Sample Image URL
sample_height = None # Sample image dimension height
sample_width = None # Sample image dimension width
# PREVIEW VERSION - A TINY version of the image, suitable for thumbnails
preview_url = None # Preview image URL
preview_height = None # Preview image height
preview_width = None # Preview image width
def parse(self, post):
"""Processes the data returned by rule34 into a more useful object"""
# If for whatever reason an attribute isn't in the data returned by r34, we set it to None
try:
self.height = int(post['@height']) if '@height' in post else None
self.width = int(post['@width']) if '@width' in post else None
except TypeError:
# Occasionally rule34 sends invalid height/width values, this catches that
pass
self.score = int(post['@score']) if '@score' in post else None
self.file_url = str(post['@file_url']) if '@file_url' in post else None
self.id = str(post['@id']) if '@id' in post else None
self.tags = post['@tags'].strip().split(" ") if '@tags' in post else None
self.parent_id = int(post['@parent_ID']) if '@parentID' in post else None
try:
self.has_children = False if post['@has_children'] == "false" else True
except KeyError:
self.has_children = None
self.has_comments = False if post['@has_comments'] == "false" else True
self.has_notes = False if post['@has_notes'] == "false" else True
self.created_at = str(post['@created_at']) if '@created_at' in post else None
self.change = str(post['@change']) if '@change' in post else None
self.md5 = str(post['@md5']) if '@md5' in post else None
self.creator_ID = int(post['@creator_id']) if '@creator_id' in post else None
self.rating = str(post['@rating']) if '@rating' in post else None
self.status = str(post['@status']) if '@status' in post else None
self.source = str(post['@source']) if '@source' in post else None
# SAMPLE VERSION - These are smaller images, saving some data, if necessary
self.sample_url = str(post['@sample_url']) if '@sample_url' in post else None
self.sample_height = int(post['@sample_height']) if '@sample_height' in post else None
self.sample_width = int(post['@sample_width']) if '@sample_width' in post else None
# PREVIEW VERSION - A TINY version of the image, suitable for thumbnails
self.preview_url = str(post['@preview_url']) if '@preview_url' in post else None
self.preview_height = int(post['@preview_height']) if '@preview_height' in post else None
self.preview_width = int(post['@preview_width']) if '@preview_width' in post else None
self.initialised = True | /rule34_new-1.0.4-py3-none-any.whl/rule34/objectClasses.py | 0.503418 | 0.384508 | objectClasses.py | pypi |
class Rule34Post:
"""
The data structure for images on rule34. By default, all items are none,
they will only be something else if rule34.xxx specifies a value.
if ``initialised`` is False, that means somehow this object wasn't initialised properly, and you should discard it
"""
initialised = False # If this is false, the post data isn't complete for whatever reason, dont use it
# The image's data
height = None # Image dimension height
width = None # Image dimension width
score = None # The image's user determined rating
file_url = None # The image URL
id = None # The id generated by rule34 for the image
tags = None # All the tags associated with this image
parent_id = None # If this post is a child, this will show the ID of its parent
has_children = None # Is this post a parent?
has_comments = None # Are there comments on this post?
has_notes = None # Are there notes on this post?
created_at = None # When the post was posted, funnily enough
change = None # Not sure what this is used for, but all posts have it. If you know, leave an issue telling me
md5 = None # The MD5 hash of the post, i have no idea why its necessary to expose, but rule34.xxx generates it
creator_ID = None # The post author's ID
rating = None # The rating of the post, pretty much always "e", ie Explicit
status = None # Not sure what this is used for, but all posts have it. If you know, leave an issue telling me
source = None # The source of the image, if listed
# SAMPLE VERSION - These are smaller images, saving some data, if necessary
sample_url = None # Sample Image URL
sample_height = None # Sample image dimension height
sample_width = None # Sample image dimension width
# PREVIEW VERSION - A TINY version of the image, suitable for thumbnails
preview_url = None # Preview image URL
preview_height = None # Preview image height
preview_width = None # Preview image width
def parse(self, post):
"""Processes the data returned by rule34 into a more useful object"""
# If for whatever reason an attribute isn't in the data returned by r34, we set it to None
self.height = int(post['@height']) if '@height' in post else None
self.width = int(post['@width']) if '@width' in post else None
self.score = int(post['@score']) if '@score' in post else None
self.file_url = str(post['@file_url']) if '@file_url' in post else None
self.id = str(post['@id']) if '@id' in post else None
self.tags = post['@tags'].strip().split(" ") if '@tags' in post else None
self.parent_id = int(post['@parent_ID']) if '@parentID' in post else None
try:
self.has_children = False if post['@has_children'] == "false" else True
except KeyError:
self.has_children = None
self.has_comments = False if post['@has_comments'] == "false" else True
self.has_notes = False if post['@has_notes'] == "false" else True
self.created_at = str(post['@created_at']) if '@created_at' in post else None
self.change = str(post['@change']) if '@change' in post else None
self.md5 = str(post['@md5']) if '@md5' in post else None
self.creator_ID = int(post['@creator_id']) if '@creator_id' in post else None
self.rating = str(post['@rating']) if '@rating' in post else None
self.status = str(post['@status']) if '@status' in post else None
self.source = str(post['@source']) if '@source' in post else None
# SAMPLE VERSION - These are smaller images, saving some data, if necessary
self.sample_url = str(post['@sample_url']) if '@sample_url' in post else None
self.sample_height = int(post['@sample_height']) if '@sample_height' in post else None
self.sample_width = int(post['@sample_width']) if '@sample_width' in post else None
# PREVIEW VERSION - A TINY version of the image, suitable for thumbnails
self.preview_url = str(post['@preview_url']) if '@preview_url' in post else None
self.preview_height = int(post['@preview_height']) if '@preview_height' in post else None
self.preview_width = int(post['@preview_width']) if '@preview_width' in post else None
self.initialised = True | /rule34-1.7.4.tar.gz/rule34-1.7.4/Rule34/objectClasses.py | 0.525369 | 0.384739 | objectClasses.py | pypi |
from ruleau import All, ApiAdapter, OverrideLevel, execute, rule
@rule(rule_id="rul_child", name="Has children")
def has_children(_, payload):
"""
Checks whether the custom has any children.
>>> has_children(None, {"data": {"number_of_children": 0}})
False
>>> has_children(None, {"data": {"number_of_children": 1}})
True
"""
return payload["data"]["number_of_children"] > 0
@rule(rule_id="rul_cap", name="Have sufficient capital", depends_on=[has_children])
def has_sufficient_capital(context_test, payload):
"""
Checks that the client has sufficient capital, considering the number of
children they have.
>>> from ruleau import mock_context
>>> context = mock_context({
... "has_children": True
... })
>>> has_sufficient_capital(context, {"data": {"capital": 10_000}})
False
>>> from ruleau import mock_context
>>> context = mock_context({
... "has_children": False
... })
>>> has_sufficient_capital(context, {"data": {"capital": 10_000}})
True
"""
if context_test.get_result(has_children):
return payload["data"]["capital"] > 12_000
else:
return payload["data"]["capital"] > 8_000
@rule(rule_id="rul_01", name="kyc_threshold", override_level=OverrideLevel.DISABLED)
def kyc_risk_greater_than_threshold(_, payload):
"""
Know Your Customer (KYC) score must be greater than the threshold,
in this case greater than `LOW`
:Override Guidance: kyc_threshold to mark this failure into review
>>> kyc_risk_greater_than_threshold(None, {"data": {"kyc": "HIGH"}})
False
>>> kyc_risk_greater_than_threshold(None, {"data": {"kyc": "LOW"}})
True
"""
return payload["data"]["kyc"] == "LOW"
@rule(rule_id="rul_02", name="fico_score")
def fico_score_greater_than_threshold(_, payload):
"""
FICO score must be greater than 630
:Owner: Penny Farthing
:Override Guidance: Feel free to override in almost any circumstance
>>> fico_score_greater_than_threshold(None, {"data": {"fico_score": 400}})
False
>>> fico_score_greater_than_threshold(None, {"data": {"fico_score": 630}})
False
>>> fico_score_greater_than_threshold(None, {"data": {"fico_score": 650}})
True
"""
return payload["data"]["fico_score"] > 630
@rule(rule_id="rul_ccjs_required", name="CCJS check required")
def ccjs_check_required(_, payload):
return payload["data"]["ccjs_required"]
@rule(rule_id="rul_03", name="no_ccjs", run_if=ccjs_check_required)
def has_no_ccjs(_, payload):
"""
Make sure customer has no county court judgements
>>> has_no_ccjs(None, {"data": {"ccjs": []}})
True
>>> has_no_ccjs(None, {"data": {"ccjs": ["Example CCJ"]}})
False
>>> has_no_ccjs(None, {"data": {"ccjs": [{"example": "CCJ Object"}]}})
False
"""
return len(payload["data"]["ccjs"]) == 0 | /ruleau-0.7.1-py3-none-any.whl/examples/kitchen_sink/lending_rules.py | 0.800497 | 0.292739 | lending_rules.py | pypi |
from lending_rules import (
ccjs_check_required,
fico_score_greater_than_threshold,
has_no_ccjs,
has_sufficient_capital,
kyc_risk_greater_than_threshold,
)
from ruleau import All, ApiAdapter, Process, execute, rule
@rule("rul-101A", "Causes skipped")
def causes_skip(_, __):
return False
@rule("rul_101", "Skipped Rule", run_if=causes_skip)
def skipped_rule(_, __):
return False
@rule("rul-102A", "Causes not skipped")
def causes_no_skip(_, __):
return True
@rule("rul_102", "Not Skipped Rule", run_if=causes_no_skip)
def not_skipped_rule(_, __):
return False
@rule("rul200-c", "Skipped tree bottom")
def skipped_tree_bottom(_, __):
return False
@rule(
"rul200-a",
"Skipped tree 1 skipped",
depends_on=[skipped_tree_bottom],
run_if=causes_skip,
)
def skipped_tree_1_a(_, __):
return False
@rule(
"rul200-b",
"Skipped tree 1 not skipped",
depends_on=[skipped_tree_bottom],
run_if=causes_no_skip,
)
def skipped_tree_1_b(_, __):
return False
@rule("rul200", "Skipped tree 1 Top", depends_on=[skipped_tree_1_a, skipped_tree_1_b])
def skipped_tree_1(_, __):
return False
conditionals = All(
"ID",
"name",
skipped_rule,
causes_no_skip,
skipped_tree_1,
)
will_lend = All(
"WL_01",
"Will Lend",
kyc_risk_greater_than_threshold,
fico_score_greater_than_threshold,
has_no_ccjs,
has_sufficient_capital,
conditionals,
)
if __name__ == "__main__":
api_adapter = ApiAdapter(base_url="http://127.0.0.1:8000")
process = Process.create_process_from_rule(will_lend)
api_adapter.sync_process(process)
result = execute(
will_lend,
{
"data": {
"fico_score": 150,
"ccjs": [],
"kyc": "low",
"number_of_children": 1,
"capital": 10_000,
"ccjs_required": True,
}
},
process,
api_adapter=api_adapter,
case_id="132",
)
print("Result", result.result) | /ruleau-0.7.1-py3-none-any.whl/examples/kitchen_sink/rules.py | 0.526099 | 0.19112 | rules.py | pypi |
from rule_based_block_rules import account_status
from ruleau import Process, execute
if __name__ == "__main__":
execution_result = execute(
account_status,
{
"loc_record": {
"loc_number": 12345,
"current_dnp": 0,
"current_cons_full_pmt": 4,
"expected_payments_passed": 1,
"expected_payments_future": 15,
"total_pending_payment": 1234.51,
"total_balance": 2345.21,
"principal_balance": 1995.85,
"plaid_active_flag": True,
"draw": [
{
"draw_date": "2019-01-01T00:00:00.000Z",
"amount": 5000.01,
"principal_post_draw": 8123.50,
},
{
"draw_date": "2019-06-01T00:00:00.000Z",
"amount": 4000.02,
"principal_post_draw": 5123.50,
},
],
"missed_pmt": [
{
"missed_pmt_cluster": 1,
"payment_date": "2021-01-01T00:00:00.000Z",
},
{
"missed_pmt_cluster": 1,
"payment_date": "2020-12-26T00:00:00.000Z",
},
{
"missed_pmt_cluster": 2,
"payment_date": "2020-11-01T00:00:00.000Z",
},
],
"modification": [
{
"applied": "2020-01-01T00:00:00.000Z",
"suspended": "2020-02-01T00:00:00.000Z",
"mod_cluster": 1,
"reduction_perc": 50,
"instalment_amount": 200.05,
},
{
"applied": "2020-02-01T00:00:00.000Z",
"suspended": "2020-02-05T00:00:00.000Z",
"mod_cluster": 1,
"reduction_perc": 100,
"instalment_amount": 0,
},
{
"applied": "2020-03-01T00:00:00.000Z",
"suspended": "2020-01-01T00:00:00.000Z",
"mod_cluster": 2,
"reduction_perc": 75,
"instalment_amount": 100.03,
},
],
}
},
Process.create_process_from_rule(account_status),
)
for rule_result in execution_result.rule_results:
print(
"{0} - {1} - {2}".format(
rule_result.rule.id.ljust(20),
str(rule_result.result).ljust(5),
rule_result.rule.name,
)
) | /ruleau-0.7.1-py3-none-any.whl/examples/account_status_rules/main.py | 0.482917 | 0.288723 | main.py | pypi |
from datetime import datetime, timedelta
from ruleau import All, rule
@rule(rule_id="MP-001-B1", name="Days Not Paid")
def days_not_paid(_, payload):
"""
Block if the latest payment is now late (i.e. Days not paid is greater than zero).
>>> days_not_paid(None, {"loc_record": {"current_dnp": 1}})
False
>>> days_not_paid(None, {"loc_record": {"current_dnp": 0}})
True
"""
# Fail if days not paid is greater than zero.
return payload["loc_record"]["current_dnp"] == 0
@rule(rule_id="MP-001-U1", name="Two Consecutive Payments Made")
def two_consecutive_payments_made(_, payload):
"""
Unblock if two consecutive, non-reduced payments been made.
:Override Guidance: Testing out override guidance param.
>>> two_consecutive_payments_made(None, {"loc_record": {"current_cons_full_pmt": 1, "expected_payments_passed": 10}})
False
>>> two_consecutive_payments_made(None, {"loc_record": {"current_cons_full_pmt": 2, "expected_payments_passed": 10}})
True
>>> two_consecutive_payments_made(None, {"loc_record": {"current_cons_full_pmt": 0, "expected_payments_passed": 1}})
False
>>> two_consecutive_payments_made(None, {"loc_record": {"current_cons_full_pmt": 1, "expected_payments_passed": 1}})
True
"""
# Default the required number of consecutive payments to two.
consecutive_payment_required = 2
# If the account is new (and so can not yet meet the required number of consecutive payments) then lower the number required.
if payload["loc_record"]["expected_payments_passed"] < consecutive_payment_required:
consecutive_payment_required = payload["loc_record"]["expected_payments_passed"]
# Fail if the current number of consecutive full payments is less than the required number of payments.
return (
payload["loc_record"]["current_cons_full_pmt"] >= consecutive_payment_required
)
@rule(rule_id="MP-002-B1", name="Two Missed Payments in 90 Days")
def two_missed_payments(_, payload):
"""
Block if there are missed two payments within the last 90 days.
:Override Guidance: Test guidance
>>> two_missed_payments(None, {"loc_record": {"missed_pmt": [
... {
... "missed_pmt_cluster": 1,
... "payment_date": "2021-02-01T09:00:00.000Z"
... },
... {
... "missed_pmt_cluster": 1,
... "payment_date": "2021-01-01T12:00:00.000Z"
... },
... {
... "missed_pmt_cluster": 2,
... "payment_date": "2020-11-01T00:00:00.000Z"
... }
... ]}})
True
"""
# Identify all the missed payments within the last 90 days
missed_payments_list = []
for missed_pmt in payload["loc_record"]["missed_pmt"]:
if datetime.strptime(missed_pmt["payment_date"], "%Y-%m-%dT%H:%M:%S.%fZ") >= (
datetime.today() - timedelta(days=90)
):
missed_payments_list.append(missed_pmt)
# Fail if the number of missed payments identified is 2 or more.
return len(missed_payments_list) < 2
@rule(rule_id="MP-002-U1", name="Three Consecutive Payments Made")
def three_consecutive_payments_made(_, payload):
"""
Unblock if three consecutive, non-reduced payments have been made.
>>> three_consecutive_payments_made(None, {"loc_record": {"current_cons_full_pmt": 2, "expected_payments_passed": 10}})
False
>>> three_consecutive_payments_made(None, {"loc_record": {"current_cons_full_pmt": 3, "expected_payments_passed": 10}})
True
>>> three_consecutive_payments_made(None, {"loc_record": {"current_cons_full_pmt": 0, "expected_payments_passed": 1}})
False
>>> three_consecutive_payments_made(None, {"loc_record": {"current_cons_full_pmt": 2, "expected_payments_passed": 2}})
True
"""
# Default the required number of consecutive payments to three.
consecutive_payment_required = 3
# If the account is new (and so can not yet meet the required number of consecutive payments) then lower the number required.
if payload["loc_record"]["expected_payments_passed"] < consecutive_payment_required:
consecutive_payment_required = payload["loc_record"]["expected_payments_passed"]
# Fail if the current number of consecutive full payments is less than the required number of payments.
return (
payload["loc_record"]["current_cons_full_pmt"] >= consecutive_payment_required
)
@rule(rule_id="MP-003-B1", name="Three Missed Payments in 90 Days")
def three_missed_payments(_, payload):
"""
Block if there are three missed payments within the last 90 days.
>>> three_missed_payments(None, {"loc_record": {"missed_pmt": [
... {
... "missed_pmt_cluster": 1,
... "payment_date": "2021-08-01T09:00:00.000Z"
... },
... {
... "missed_pmt_cluster": 1,
... "payment_date": "2021-07-25T12:00:00.000Z"
... },
... {
... "missed_pmt_cluster": 1,
... "payment_date": "2021-07-18T00:00:00.000Z"
... }
... ]}})
False
>>> three_missed_payments(None, {"loc_record": {"missed_pmt": [
... {
... "missed_pmt_cluster": 1,
... "payment_date": "2021-08-01T09:00:00.000Z"
... },
... {
... "missed_pmt_cluster": 1,
... "payment_date": "2021-07-01T12:00:00.000Z"
... },
... {
... "missed_pmt_cluster": 2,
... "payment_date": "2020-02-01T00:00:00.000Z"
... }
... ]}})
True
"""
# Identify all missed payments within the last 90 days.
missed_payments_list = []
for missed_pmt in payload["loc_record"]["missed_pmt"]:
if datetime.strptime(missed_pmt["payment_date"], "%Y-%m-%dT%H:%M:%S.%fZ") >= (
datetime.today() - timedelta(days=90)
):
missed_payments_list.append(missed_pmt)
# Fail if the number of missed payments identified is 3 or more.
return len(missed_payments_list) < 3
@rule(rule_id="MP-003-U1", name="Four Consecutive Payments Made")
def four_consecutive_payments_made(_, payload):
"""
Unblock if four consecutive, non-reduced payments have been made.
>>> four_consecutive_payments_made(None, {"loc_record": {"current_cons_full_pmt": 3, "expected_payments_passed": 10}})
False
>>> four_consecutive_payments_made(None, {"loc_record": {"current_cons_full_pmt": 4, "expected_payments_passed": 10}})
True
>>> four_consecutive_payments_made(None, {"loc_record": {"current_cons_full_pmt": 2, "expected_payments_passed": 3}})
False
>>> four_consecutive_payments_made(None, {"loc_record": {"current_cons_full_pmt": 3, "expected_payments_passed": 3}})
True
"""
# Default the required number of consecutive payments to four.
consecutive_payment_required = 4
# If the account is new (and so can not yet meet the required number of consecutive payments) then lower the number required.
if payload["loc_record"]["expected_payments_passed"] < consecutive_payment_required:
consecutive_payment_required = payload["loc_record"]["expected_payments_passed"]
# Fail if the current number of consecutive full payments is less than the required number of payments.
return (
payload["loc_record"]["current_cons_full_pmt"] >= consecutive_payment_required
)
@rule(rule_id="MP-004-B1", name="More Than Three Missed Payments in 90 Days")
def more_than_three_missed_payments(_, payload):
"""
Block if there are more than three missed payments within the last 90 days.
>>> more_than_three_missed_payments(None, {"loc_record": {"missed_pmt": [
... {
... "missed_pmt_cluster": 1,
... "payment_date": "2021-08-01T09:00:00.000Z"
... },
... {
... "missed_pmt_cluster": 1,
... "payment_date": "2021-07-25T12:00:00.000Z"
... },
... {
... "missed_pmt_cluster": 2,
... "payment_date": "2021-07-07T08:00:00.456Z"
... },
... {
... "missed_pmt_cluster": 2,
... "payment_date": "2021-06-30T15:45:15.000Z"
... },
... {
... "missed_pmt_cluster": 3,
... "payment_date": "2021-02-01T18:00:12.123Z"
... }
... ]}})
False
>>> more_than_three_missed_payments(None, {"loc_record": {"missed_pmt": [
... {
... "missed_pmt_cluster": 1,
... "payment_date": "2021-08-08T09:00:00.000Z"
... },
... {
... "missed_pmt_cluster": 1,
... "payment_date": "2021-08-01T12:00:00.000Z"
... },
... {
... "missed_pmt_cluster": 1,
... "payment_date": "2021-07-25T15:00:00.000Z"
... },
... {
... "missed_pmt_cluster": 2,
... "payment_date": "2020-11-01T00:00:00.000Z"
... }
... ]}})
True
"""
# Identify all missed payments within the last 90 days.
missed_payments_list = []
for missed_pmt in payload["loc_record"]["missed_pmt"]:
if datetime.strptime(missed_pmt["payment_date"], "%Y-%m-%dT%H:%M:%S.%fZ") >= (
datetime.today() - timedelta(days=90)
):
missed_payments_list.append(missed_pmt)
# Fail if the number of missed payments identified is greater three.
return len(missed_payments_list) < 4
@rule(rule_id="MP-004-U1", name="Six Consecutive Payments Made")
def six_consecutive_payments_made(_, payload):
"""
Unblock if six consecutive, non-reduced payments have been made.
>>> six_consecutive_payments_made(None, {"loc_record": {"current_cons_full_pmt": 6, "expected_payments_passed": 10}})
True
>>> six_consecutive_payments_made(None, {"loc_record": {"current_cons_full_pmt": 5, "expected_payments_passed": 10}})
False
>>> six_consecutive_payments_made(None, {"loc_record": {"current_cons_full_pmt": 5, "expected_payments_passed": 5}})
True
>>> six_consecutive_payments_made(None, {"loc_record": {"current_cons_full_pmt": 4, "expected_payments_passed": 5}})
False
"""
# Default the required number of consecutive payments to three.
consecutive_payment_required = 6
# If the account is new (and so can not yet meet the required number of consecutive payments) then lower the number required.
if payload["loc_record"]["expected_payments_passed"] < consecutive_payment_required:
consecutive_payment_required = payload["loc_record"]["expected_payments_passed"]
# Fail if the current number of consecutive full payments is less than the required number of payments.
return (
payload["loc_record"]["current_cons_full_pmt"] >= consecutive_payment_required
)
@rule(rule_id="PR-001-B1", name="Two Payment Reductions in 180 Days")
def two_payment_reductions_in_180_days(_, payload):
"""
Block if there are two or more payment reductions within the last 180 days.
>>> two_payment_reductions_in_180_days(None, {"loc_record": {
... "modification": [
... {
... "applied": "2021-07-01T00:00:00.000Z",
... "reduction_perc": 50
... },
... {
... "applied": "2021-06-01T00:00:00.000Z",
... "reduction_perc": 75
... },
... ]
... }})
True
>>> two_payment_reductions_in_180_days(None, {"loc_record": {
... "modification": [
... {
... "applied": "2021-07-01T00:00:00.000Z",
... "reduction_perc": 50
... },
... {
... "applied": "2021-01-01T00:00:00.000Z",
... "reduction_perc": 75
... },
... ]
... }})
False
"""
# Identify all the payment reductions applied within the last 180 days.
payment_reductions_list = [
x
for x in payload["loc_record"]["modification"]
if datetime.strptime(x["applied"], "%Y-%m-%dT%H:%M:%S.%fZ")
>= (datetime.today() - timedelta(days=180))
if x["reduction_perc"] > 0
]
# Fail if 2 or more reductions were identified.
return len(payment_reductions_list) >= 2
@rule(rule_id="PR-001-U1", name="50% of Principal Paid")
def fifty_percent_principal_paid(_, payload):
"""
Unblock if 50% of the principal balance has been paid following the most recent payment.
>>> fifty_percent_principal_paid(None, {"loc_record": {
... "principal_balance": 5000,
... "draw": [
... {
... "draw_date": "2021-08-01T00:00:00.000Z",
... "amount": 3000.00,
... "principal_post_draw": 12000.00
... },
... {
... "draw_date": "2019-07-01T00:00:00.000Z",
... "amount": 2000.00,
... "principal_post_draw": 15000.00
... }
... ]
... }})
True
>>> fifty_percent_principal_paid(None, {"loc_record": {
... "principal_balance": 8000,
... "draw": [
... {
... "draw_date": "2021-08-01T00:00:00.000Z",
... "amount": 3000.00,
... "principal_post_draw": 12000.00
... },
... {
... "draw_date": "2019-07-01T00:00:00.000Z",
... "amount": 2000.00,
... "principal_post_draw": 15000.00
... }
... ]
... }})
False
"""
# Calculate the required balance threshold (i.e. 50% of the actual balance following the latest draw).
balance_threshold = 0
if len(payload["loc_record"]["draw"]) > 0:
balance_threshold = (
payload["loc_record"]["draw"][0]["principal_post_draw"] * 0.5
)
# Fail if balance required is greater than the account principal balance.
return payload["loc_record"]["principal_balance"] <= balance_threshold
@rule(rule_id="PR-002-B1", name="Two Payment Reductions in 90 Days")
def two_payment_reductions_in_90_days(_, payload):
"""
Block if there are two or more payment reductions within the last 90 days.
>>> two_payment_reductions_in_90_days(None, {"loc_record": {
... "modification": [
... {
... "applied": "2021-08-01T00:00:00.000Z",
... "reduction_perc": 50
... },
... {
... "applied": "2021-07-25T00:00:00.000Z",
... "reduction_perc": 75
... },
... ]
... }})
True
>>> two_payment_reductions_in_90_days(None, {"loc_record": {
... "modification": [
... {
... "applied": "2021-07-01T00:00:00.000Z",
... "reduction_perc": 50
... },
... {
... "applied": "2021-01-01T00:00:00.000Z",
... "reduction_perc": 75
... },
... ]
... }})
False
"""
# Identify all the payment reductions applied within the last 90 days.
payment_reductions_list = [
x
for x in payload["loc_record"]["modification"]
if datetime.strptime(x["applied"], "%Y-%m-%dT%H:%M:%S.%fZ")
>= (datetime.today() - timedelta(days=90))
if x["reduction_perc"] > 0
]
# Fail if 2 or more reductions were identified.
return len(payment_reductions_list) >= 2
@rule(rule_id="PR-002-U1", name="75% of Principal Paid")
def seventy_five_percent_principal_paid(_, payload):
"""
Unblock if 75% of the principal balance has been paid following the most recent payment.
>>> seventy_five_percent_principal_paid(None, {"loc_record": {
... "principal_balance": 2500,
... "draw": [
... {
... "draw_date": "2021-08-01T00:00:00.000Z",
... "amount": 3000.00,
... "principal_post_draw": 12000.00
... },
... {
... "draw_date": "2019-07-01T00:00:00.000Z",
... "amount": 2000.00,
... "principal_post_draw": 15000.00
... }
... ]
... }})
True
>>> seventy_five_percent_principal_paid(None, {"loc_record": {
... "principal_balance": 8000,
... "draw": [
... {
... "draw_date": "2021-08-01T00:00:00.000Z",
... "amount": 3000.00,
... "principal_post_draw": 12000.00
... },
... {
... "draw_date": "2019-07-01T00:00:00.000Z",
... "amount": 2000.00,
... "principal_post_draw": 15000.00
... }
... ]
... }})
False
"""
# Calculate the required balance threshold (i.e. 25% of the actual balance following the latest draw).
balance_threshold = 0
if len(payload["loc_record"]["draw"]) > 0:
balance_threshold = (
payload["loc_record"]["draw"][0]["principal_post_draw"] * 0.25
)
# Fail if balance required is greater than the account principal balance.
return payload["loc_record"]["principal_balance"] <= balance_threshold
@rule(rule_id="PR-003-B1", name="Two Consecutive Payment Reductions")
def two_consecutive_payment_reductions(_, payload):
"""
Block if there are two 2 consecutive payment reductions.
>>> two_consecutive_payment_reductions(None, {"loc_record": {
... "modification": [
... {
... "mod_cluster": 1,
... "reduction_perc": 50,
... },
... {
... "mod_cluster": 2,
... "reduction_perc": 75,
... },
... {
... "applied": "2021-06-01T00:00:00.000Z",
... "mod_cluster": 3,
... "reduction_perc": 75,
... }
... ]
... }})
True
>>> two_consecutive_payment_reductions(None, {"loc_record": {
... "modification": [
... {
... "mod_cluster": 1,
... "reduction_perc": 50,
... },
... {
... "mod_cluster": 1,
... "reduction_perc": 75,
... },
... {
... "mod_cluster": 2,
... "reduction_perc": 75,
... }
... ]
... }})
False
"""
# Create a list of payment modifications grouped by their consecutive clustering
clusters = {}
for mod in payload["loc_record"]["modification"]:
if mod["reduction_perc"] > 0:
if str(mod["mod_cluster"]) not in clusters:
clusters[str(mod["mod_cluster"])] = []
clusters[str(mod["mod_cluster"])].append(mod)
# Check if any of the clusters contain more than 1 payment modifications
consecutive_reductions_found = False
for c in clusters.keys():
if len(clusters[c]) > 1:
consecutive_reductions_found = True
# Fail if any consecutive reductions were found
return consecutive_reductions_found is not True
@rule(rule_id="PR-003-U2", name="No Reductions Since Latest Draw")
def no_reductions_since_latest_draw(_, payload):
"""
Unblock if there have been payment reduction has taken place since the last time a draw was made on the account.
>>> no_reductions_since_latest_draw(None, {"loc_record": {
... "draw": [
... {
... "draw_date": "2021-08-01T00:00:00.000Z",
... "amount": 5000.00,
... "principal_post_draw": 12000.00
... },
... {
... "draw_date": "2019-07-01T00:00:00.000Z",
... "amount": 2000.00,
... "principal_post_draw": 17000.00
... }
... ],
... "modification": [
... {
... "applied": "2021-07-01T00:00:00.000Z",
... "suspended": "2021-07-21T00:00:00.000Z",
... "mod_cluster": 1,
... "reduction_perc": 50,
... "instalment_amount": 200.05
... },
... {
... "applied": "2021-06-01T00:00:00.000Z",
... "suspended": "2021-06-14T00:00:00.000Z",
... "mod_cluster": 2,
... "reduction_perc": 75,
... "instalment_amount": 125.00
... }
... ]
... }})
True
>>> no_reductions_since_latest_draw(None, {"loc_record": {
... "draw": [
... {
... "draw_date": "2021-08-01T00:00:00.000Z",
... "amount": 5000.00,
... "principal_post_draw": 12000.00
... },
... {
... "draw_date": "2019-07-01T00:00:00.000Z",
... "amount": 2000.00,
... "principal_post_draw": 17000.00
... }
... ],
... "modification": [
... {
... "applied": "2021-08-8T00:00:00.000Z",
... "suspended": "2021-08-25T00:00:00.000Z",
... "mod_cluster": 1,
... "reduction_perc": 50,
... "instalment_amount": 200.05
... },
... {
... "applied": "2021-06-01T00:00:00.000Z",
... "suspended": "2021-06-14T00:00:00.000Z",
... "mod_cluster": 2,
... "reduction_perc": 75,
... "instalment_amount": 125.00
... },
... ]
... }})
False
"""
# Get the date of the latest draw. If there are no draws then use a default 'early date'.
latest_draw_date = "1900-01-01T00:00:00Z"
if len(payload["loc_record"]["draw"]) > 0:
latest_draw_date = payload["loc_record"]["draw"][0]["draw_date"]
# Identify any payment reductions that were applied after the latest draw date.
payment_reductions_list = [
x
for x in payload["loc_record"]["modification"]
if x["applied"] > latest_draw_date
if x["reduction_perc"] > 0
]
# Fail if any payment reductions were identified.
return len(payment_reductions_list) == 0
@rule(rule_id="PR-004-B1", name="On Payment Holiday")
def on_payment_holiday(_, payload):
"""
Block if the account is currently subject to an agreed payment holiday.
>>> on_payment_holiday(None, {"loc_record": {
... "modification": [
... {
... "applied": "2021-07-01T00:00:00.000Z",
... "suspended": "2021-12-01T00:00:00.000Z",
... "instalment_amount": 0
... }
... ]
... }})
True
>>> on_payment_holiday(None, {"loc_record": {
... "modification": [
... {
... "applied": "2021-07-01T00:00:00.000Z",
... "suspended": None,
... "instalment_amount": 0
... }
... ]
... }})
True
>>> on_payment_holiday(None, {"loc_record": {
... "modification": [
... {
... "applied": "2021-07-01T00:00:00.000Z",
... "suspended": "2021-08-01T00:00:00.000Z",
... "instalment_amount": 0
... }
... ]
... }})
False
"""
# Identify if there are any active payment holiday modifications
payment_holidays = [
x
for x in payload["loc_record"]["modification"]
if x["instalment_amount"] == 0
if datetime.strptime(x["applied"], "%Y-%m-%dT%H:%M:%S.%fZ") <= datetime.today()
if datetime.strptime(
x["suspended"] or "2070-01-01T00:00:00.000Z", "%Y-%m-%dT%H:%M:%S.%fZ"
)
>= datetime.today()
]
# Fail if any active payment holidays are found
return len(payment_holidays) > 0
@rule(rule_id="PR-004-S1", name="Had Payment Holiday")
def had_payment_holiday(_, payload):
"""
Supporting rule to indicate whether a an account has previously been on a payment holiday.
A pass result indicates the account has previously been subject to payment holiday.
>>> had_payment_holiday(None, {"loc_record": {
... "modification": [
... {
... "applied": "2021-07-01T00:00:00.000Z",
... "suspended": "2021-08-01T00:00:00.000Z",
... "instalment_amount": 0
... }
... ]
... }})
True
>>> had_payment_holiday(None, {"loc_record": {
... "modification": [
... {
... "applied": "2021-07-01T00:00:00.000Z",
... "suspended": None,
... "instalment_amount": 0
... }
... ]
... }})
False
>>> had_payment_holiday(None, {"loc_record": {
... "modification": [
... {
... "applied": "2021-07-01T00:00:00.000Z",
... "suspended": "2021-12-01T00:00:00.000Z",
... "instalment_amount": 0
... }
... ]
... }})
False
"""
# Pass if there are any previous, non-active payment holiday account modifications.
return (
len(
[
x
for x in payload["loc_record"]["modification"]
if x["instalment_amount"] == 0
if datetime.strptime(x["applied"], "%Y-%m-%dT%H:%M:%S.%fZ")
< datetime.today()
if x["suspended"] is not None
if datetime.strptime(
x["suspended"] or "2070-01-01T00:00:00.000Z",
"%Y-%m-%dT%H:%M:%S.%fZ",
)
< datetime.today()
]
)
> 0
)
@rule(
rule_id="PR-004-U1",
name="Three Consecutive Payments Since Payment Holiday",
run_if=had_payment_holiday,
)
def three_consecutive_payments_since_payment_holiday(_, payload):
"""
Unblock if three consecutive, non-reduced payments have been made since the last payment holiday ended.
>>> three_consecutive_payments_since_payment_holiday(None, {"loc_record": {"current_cons_full_pmt": 3}})
True
>>> three_consecutive_payments_since_payment_holiday(None, {"loc_record": {"current_cons_full_pmt": 2}})
False
"""
# Fail if current numebr of consectutive full payments is less then 3.
return payload["loc_record"]["current_cons_full_pmt"] >= 3
block_on_one_missed_payment = All(
"MP-001",
"Block On One Missed Payment",
days_not_paid,
two_consecutive_payments_made,
description="Customer has missed their latest payment.",
)
block_on_two_missed_payments = All(
"MP-002",
"Block On Two Missed Payments",
two_missed_payments,
three_consecutive_payments_made,
description="Customer has missed two payments in the last three months.",
)
block_on_three_missed_payments = All(
"MP-003",
"Block On Three Missed Payments",
three_missed_payments,
four_consecutive_payments_made,
description="Customer has missed three payments in the last three months.",
)
block_on_more_than_three_missed_payments = All(
"MP-004",
"Block On More Than Three Missed Payments",
more_than_three_missed_payments,
six_consecutive_payments_made,
description="Customer has missed more than three payments in the last six months.",
)
missed_payments = All(
"MP",
"Missed Payments",
block_on_one_missed_payment,
block_on_two_missed_payments,
block_on_three_missed_payments,
block_on_more_than_three_missed_payments,
description="Accounts blocks caused by missing or late payments.",
)
block_on_two_payment_reductions_in_180_days = All(
"PR-001",
"Block on Two Payment Reductions in 180 Days",
two_payment_reductions_in_180_days,
fifty_percent_principal_paid,
no_reductions_since_latest_draw,
description="There have been two payment reductions in the last 180 days.",
)
block_on_two_payment_reductions_in_90_days = All(
"PR-002",
"Block on Two Payment Reductions in 90 Days",
two_payment_reductions_in_90_days,
seventy_five_percent_principal_paid,
no_reductions_since_latest_draw,
description="There have been two payment reductions in the last 90 days.",
)
block_on_two_consecutive_payment_reductions = All(
"PR-003",
"Block on Two Consecutive Payment Reductions",
two_consecutive_payment_reductions,
seventy_five_percent_principal_paid,
no_reductions_since_latest_draw,
description="There have been two payment reductions in the last 90 days.",
)
block_on_payment_holiday = All(
"PR-004",
"Block on Payment Holiday",
on_payment_holiday,
three_consecutive_payments_since_payment_holiday,
description="The account is subject to an agreed payment holiday.",
)
payment_reductions = All(
"PR",
"Payment Reductions",
block_on_two_payment_reductions_in_180_days,
block_on_two_payment_reductions_in_90_days,
block_on_two_consecutive_payment_reductions,
block_on_payment_holiday,
description="Accounts blocks caused by multiple payment reductions.",
)
account_status = All(
"account_status",
"Account Status",
missed_payments,
payment_reductions,
description="Overall status for the account, where failure indicates that the account should be blocked.",
)
nest_10 = All(
"nest_10",
"Nest 10 this name is super super super super super super super super super super long",
days_not_paid,
description="Nest stress test",
)
nest_9 = All(
"nest_9",
"Nest 9",
nest_10,
description="Nest stress test",
)
nest_8 = All(
"nest_8",
"Nest 8",
nest_9,
description="Nest stress test",
)
nest_7 = All(
"nest_7",
"Nest 7 this name is very descriptive and probably too long to be useful",
nest_8,
description="Nest stress test",
)
nest_6 = All(
"nest_6",
"Nest 6 short name again",
nest_7,
description="Nest stress test",
)
nest_5 = All(
"nest_5",
"Nest 5 This name is very long, even longer than the last",
nest_6,
description="Nest stress test",
)
nest_4 = All(
"nest_4",
"Nest 4 an even longer name here to fill space",
nest_5,
description="Nest stress test",
)
nest_3 = All(
"nest_3",
"Nest 3 middle length name",
nest_4,
description="Nest stress test",
)
nest_2 = All(
"nest_2",
"Nest 2 short name",
nest_3,
description="Nest stress test",
)
nest_1 = All(
"nest_1",
"Nest 1",
nest_2,
description="Nest stress test",
) | /ruleau-0.7.1-py3-none-any.whl/examples/account_status_rules/rule_based_block_rules.py | 0.738575 | 0.413773 | rule_based_block_rules.py | pypi |
 
# Rule Check
Rule Check (aka rulecheck or source rule check) is a command line system for running custom static analysis rules on C, C++, C#, and Java code. The original use case is checking a code base against coding style or coding standard rules.
Rule Check uses [srcml](https://www.srcml.org/) to parse the source code into XML and then invokes each rule's methods as appropriate to allow the rules to inspect any source code element of interest to the rule. This architecture minimizes duplicate parsing time and allows rule authors to focus on their rule logic instead of the logic needed to parse source code.
Features include:
* Parsing C, C++, C#, and Java source via srcml
* Parsing C and C++ prior to preprocessor execution (thus viewing code as developers do)
* Custom rules
* Groups of rules can be created and published in 'rulepacks'
* Projects can have custom rules within their own code base (no need to publish/install rules)
* Rules can have their own custom settings. Rule check will provide the settings to the rule via its standard config file format.
* Multiple config file inputs
* Projects can use an hierarchical set of configurations allowing organizations to provide rules across projects
* Supression of errors and warnings without modifying source code via ignore list input
* Supression of errors and warnings with source code comments
* Standardized output format for all rules
* Speicifcation of sources to be analyzed via glob format or via stdin
___
### Contents
___
* [Installation](#installation)
* [Running and Configuration](#running)
* [Design Choices](#design)
* [Resources](#resources)
To learn how to write your own rules, see [how to create rules](how_to_create_rules.md).
___
### Installation
Ensure Python 3.6 or greater is present on the system (see below) and then run:
```
git clone https://github.com/e-shreve/rulecheck
cd rulecheck
pip install .
```
#### Dependencies
##### Python
Python 3.6 or greater is required.
##### srcml
srcml is a source code to xml parser that can parse C, C++, C Preprocessor, C#, and Java code. The pip install of rulecheck will not
install srcml. Find installation files at https://www.srcml.org/ and install on your system.
Version required: 1.0.0 or greater.
For easiest use, srcml should be on the path. Otherwise, the path to srcml can be provided when starting rulecheck from the command line.
##### lxml
The python xml library lxml is used over the built-in ElementTree library due to speed and additional functionality such as the ability
to obtain the line number of tag from the source XML file. lxml has been available in Wheel install format since 2016
and thus should not present an issue for users. lxml will be installed by pip automatically when insalling rulecheck.
___
### <a id="running">Running and Configuration
___
```
rulecheck --help
```
#### Selecting Rules
Rules are selected by specifying one or more rule configuration files on the command line, using the -c or --config option. To specify more than one configuration file, use the config option on the command line once for each configuration file to be read.
Note that rule selection is additive across all configuration files specified. Thus, if config1.json activates ruleA and config2.json activates RuleB then both RuleA and RuleB will be active.
Example of specifying more than one configuration file:
```bash
rulecheck -c config1.json -c config2.json ./src/**/*
```
Rule configuration files are json files, with the following structure:
```JSON
{
"rules": [
{
"name" : "rulepack1.ruleA",
"settings" : {
"opt1" : "dog"
}
},
{
"name" : "rulepack1.ruleB",
"settings" : {
"opt1" : "cat"
}
}
]
}
```
At the top level, an array named "rules" must be provided. Each member of this array is a rule object.
Each rule object must consist of a "name" string. The name may contain '.' characters which are used to differentiate between
collections of rules known as rulepacks.
Optionally, a rule object may include a settings object. The full list of settings supported depends
on the particular rule. However, all rules support the following settings:
- werror: if value evaluates as true, it promotes all WARNINGS to ERRORS
- verbose: if value evaluates as true, the rule may provide additional output on stdout
True values are y, yes, t, true, on and 1; false values are n, no, f, false, off and 0.
Note that rules *may* support being specified multiple times. For example, a rule for finding banned terms or words could support multiple instantiations each with a different word or term specified:
```JSON
{
"rules": [
{
"name" : "rulepack1.bannedword",
"settings" : {
"word" : "master"
}
},
{
"name" : "rulepack1.bannedword",
"settings" : {
"word" : "slave"
}
}
]
}
```
Some rules *may*, however, throw an error if configured more than once. Consult the documentation of a rule for specific usage instructions.
To prevent running the same rule multiple times, rulecheck will not load a rule twice if it has the *exact* same settings. In the following run, rulecheck will only load the bannedword rule twice, despite it being specified three times.
```bash
rulecheck -c config1.json -c config2.json ./src/**/*
```
Where config 1.json contains:
```JSON
{
"rules": [
{
"name" : "rulepack1.bannedword",
"settings" : {
"word" : "slave"
}
}
]
}
```
And config2.json contains:
```JSON
{
"rules": [
{
"name" : "rulepack1.bannedword",
"settings" : {
"word" : "master"
}
},
{
"name" : "rulepack1.bannedword",
"settings" : {
"word" : "slave"
}
}
]
}
```
Rulecheck's ability to load multiple configuration files and combine them supports a hierarchical configuration structure. For example, a company may provide a rule set and standard config at the organization level. A team may then apply additional rules and config file for all of their projects. Finally each project may have its own config file. Instead of needing to combine all three config files into a single set (and thus force updates to each project when a higher level policy is changed), rule check can be given all three config files and it takes care of combining the configurations.
#### Specifying Files to Process
The files to process and/or the paths rulecheck will search to find files to process are provided on the command line as the last parameter (it must be the last parameter.)
The paths are specified in glob format. Recursive searches using '**' are supported.
In addition, the '?' (any character), '*' (any number of characters), and '[]' (character in range) wildcards are supported.
Multiple files and paths to search can be specified by separating them with spaces. If a space is in a path, enclose the glob in quotation marks.
Alternatively, the files or paths to check can be specified via stdin. Specify '-' as the final parameter to have rulecheck read the list in from stdin.
When searching the paths specified, rulecheck will process any file found with one of the following case-sensitive extensions:
.c, .h, .i, .cpp, .CPP, .cp, .hpp, .cxx, .hxx, .cc, .hh, .c++, .h++, .C, .H, .tcc, .ii, .java, .aj, .cs
To change the list of extensions rulecheck will parse when searching paths, use the -x or --extensions command line option.
Note that extensions are case sensitive and .C and .H are by default treated as C++ source files whereas .c and .h are treated as C source files.
To change the language to extension mapping see the --register-ext option.
#### Specifying Where Rule Scripts Are
Rules are encouraged to be installed onto the python path using a concept known as "rulepacks." This is covered later in this document.
However, there are situations where rules may not be installed to the python path. For example, when a rule is under development or when a rule is
created for a single project and is kept in the same revision control system as the source being checked by the rule. For these situations, one or more
paths to rules may be specified on the command line using the -r option. If more than one path is needed, repeat the option on the command line for
each path.
Note that the name of a rule specified in a configuration file may contain part of the path to the rule script itself. For example, if
```JSON
"name" : "rulepack1.ruleA"
```
is in a configuration file, rulecheck will look for a 'rulepack1/ruleA.py' script to load on the path.
#### Using Ignore List Files
A single ignore list file may be provided to rulecheck via the -i or --ignorelist command line option.
Ignorelists are created by running rulecheck on the source with the -g or --generatehashes command line option,
capturing the rule violations to a file and then pruning that list to the list of violaions to be ignored.
More information can be found [later in this document](#ignore_lists).
#### Options For Controlling srcml
* '--srcml' to specify the path to the srcml binary. Use this option if srcml is not on the path.
* '--tabs' specifies number of spaces to use when substituting tabs for spaces. This impacts the column numbers reported in rule messages.
* '--register-ext' specifies language to extension mappings used by srcml.
* '--srcml-args' allows for specification of additional options to srcml. Do not specify --tabs or -register-ext options here as they have their own dedicated options described above. This option must be provided within double quotation marks and must start with a leading space.
#### Other Options
* '--Werror' will promote all reported rule warnings to errors.
* '--tabs' specifies number of spaces to use when substituting tabs for spaces. This impacts the column numbers reported in rule messages.
* '-v' for verbose output.
* '--version' prints the version of rulecheck and then exits.
* '--help' prints a short help message and then exits.
___
### Waiving and Ignoring Rule Violations
There are two methods for telling rulecheck to ignore a rule finding for a particular file, line, or element of a file.
The first is to use comments in the source code file to instruct rulecheck on when to disable and reenable a rule.
The second is to use an "ignore list" which allows one to provide this information without modifying the source files.
However, the ignore list method may require additional maintenance as the source code is changed compared to the use of
comments in the source code.
### Source Comment Commands to Rulecheck
A NORC comment is used to have rulecheck ignore violations reported for the same line the comment is on. The NORCNEXTLINE comment will cause rulecheck to ignore violations on the next line.
Both comments must include an open and closed parenthesis containing either the '\*' character or a comma
separated list of rules to be ignored. Use of the '\*' character will cause all rules to be suppressed.
For example:
```C
// Ignore all violations on the line:
void myFunction1(int a); // NORC(*)
// Ignore all violations on the next line:
// NORCNEXTLINE(*)
void myFunction2(int a);
// Specific rules can be ignored:
// NORCNEXTLINE(myrulepack.rule1, myrulepack.rule2)
void myFunction3(int a);
// Comments after the closing parenthesis may contain any text.
// It is good practice to justify the suppression.
void myFunction4(int a); // NORC(myrulepack.function\_name\_prefix): Function name required for backward compatibility.
```
Note that whitespace between NORC/NORCNEXTLINE and the opening parenthesis are not allowed.
### <a id="ignore_lists"></a>Ignore Lists
- [ ] to be written (Feature is implemented.)
___
### Rulepacks
- [ ] to be written
This section will describe the concept of rulepacks and provide a bit of the technical context for how they work (python path).
___
___
### <a id="design"></a>Design Choices
___
rulecheck intentionally does not support modification of the files parsed. Doing so would require rulecheck to
repeatedly run modified files through all rules until no new log messages were produced. However, a modification
made by one rule could trigger another rule to be violated. Thus, the execution might never terminate. In addition,
many coding standard rules that can be automatically fixed deal strictly with whitespace and there are already many
tools and IDEs that support formatting of whitespace to any imaginable standard.
___
### Resources
___
* [srcml](https://www.srcml.org)
* [srcml source](https://github.com/srcML/srcML)
* [lxml](https://lxml.de/)
| /rulecheck-0.6.1.tar.gz/rulecheck-0.6.1/README.md | 0.492188 | 0.954942 | README.md | pypi |
from __future__ import annotations
from random import randint, shuffle
import string
try:
from secrets import choice
except ImportError:
from random import choice
class PasswordGenerator:
"""Random password generator that follows the rules
Args:
length (int): Length of the password (one or two parameters)
- One parameter: fixed_length
- Two parameters: minimum_length, maximum_length
rules (dict[str, int]): Rules for password generation
default:
- It has lowercase at least one
- It has uppercase at least one
- It has digits at least one
- It has symbol at least one
uniques (int): Number of unique characters (default: 0)
"""
def __init__(
self, *length: int,
rules: dict[str, int] = {
string.ascii_lowercase: 1,
string.ascii_uppercase: 1,
string.digits: 1,
string.punctuation: 1,
},
uniques: int = 0):
if len(length) == 1 and length[0] > 0:
max_length = length[0]
min_length = length[0]
elif len(length) == 2 and 0 < length[0] < length[1]:
min_length, max_length = length
else:
raise ValueError("'length' is wrong")
if not all(v >= 0 for v in rules.values()):
raise ValueError("ruled_lengths must be over 0)")
ruled_length = sum(rules.values())
if ruled_length > min_length:
raise ValueError("The ruled length exceeds the minimum length")
self._min = min_length - ruled_length
self._max = max_length - ruled_length
self._rules = rules.copy()
letters = ''.join(self._rules.keys())
self._letters = ''.join(set(letters))
if uniques == -1:
self._uniques = max_length
elif uniques <= max_length:
self._uniques = uniques
else:
raise ValueError(
"'uniques' must be between -1 and password length")
max_uniques = len(self._letters)
if not self._uniques <= max_uniques:
raise ValueError(f"'uniques' must be less than {max_uniques}")
def generate(self) -> str:
"""Generate a password according to rules
Returns:
str: Generated password
"""
self._rules[self._letters] = randint(self._min, self._max)
while True:
choiced_chars = [
choice(k)
for k, v in self._rules.items()
for i in range(v)
]
unique_chars = set(choiced_chars)
if (len(unique_chars) == len(choiced_chars)
or len(unique_chars) >= self._uniques):
shuffle(choiced_chars)
return ''.join(choiced_chars)
def bulk_generate(self, count: int = 1, unique: bool = False) -> list[str]:
"""Generate passwords according to rules
Args:
count (int): Number of passwords
unique (bool): Uniqueness of the password to be generated
Returns:
list[str]: Generated passwords
"""
if unique:
passwords = set()
while len(passwords) < count:
passwords.add(self.generate())
return list(passwords)
else:
return [self.generate() for i in range(count)] | /ruled_password_generator-1.0.1-py3-none-any.whl/ruled_password_generator.py | 0.847053 | 0.307332 | ruled_password_generator.py | pypi |
# Getting Started

[](https://pepy.tech/project/rules-engine) 
## Description
Simple rules engine inspired by [Martin Fowler's blog post in
2009](https://www.martinfowler.com/bliki/RulesEngine.html) and
[funnel-rules-engine](https://github.com/funnel-io/funnel-rules-engine).
Full Documentation can be found [here](https://fyndiq.github.io/rules-engine/)
## Requirements
python >= 3.6
## How to install
pip install rules-engine
## How to use
```python
from rules_engine import Rule, RulesEngine, when, then
name = "fyndiq"
RulesEngine(Rule(when(name == "fyndiq"),then(True))).run(name)
>>> True
```
## When
Evaluates a condition.
let's check if a value is `None` and raise an exception.
```python
from rules_engine import Rule, RulesEngine, when
obj = None
def cannot_be_none_error():
return "not a string error"
RulesEngine(Rule(when(obj is None), cannot_be_none_error)).run(obj)
>>> 'not a string error'
```
## Then
Evaluates an action.
```python
from rules_engine import Rule, RulesEngine, when
obj = None
RulesEngine(Rule(when(obj is None), then('not a string error'))).run(obj)
>>> 'not a string error'
```
## Not
The `not_` keyword is a logical operator.
The return value will be `True` if the statement(s) are not `True`, otherwise it will return `False`.
```python
from rules_engine import Rule, RulesEngine, not_
def is_missing(obj):
return not obj
obj="Hello"
RulesEngine(Rule(not_(is_missing), then(True))).run(obj)
>>> True
```
## All
Evaluates multiple conditions and if all conditions are `True` the action is executed
- Example:
- We need to check if an object `obj` is not missing and is of type `list`
```python
from rules_engine import Rule, RulesEngine, all_
def is_missing(obj):
return not obj
def is_a_list(obj):
return isinstance(obj, list)
obj = [1,2]
RulesEngine(Rule(all_(not_(is_missing), is_a_list), then(True))).run(obj)
>>> True
```
## Any
Evaluates multiple conditions and if any of the conditions is `True` the action is executed
- Example:
- We need to check if an object `obj` is a `str` or a `list`
```python
from rules_engine import Rule, RulesEngine, any_
def is_a_str(obj):
return isinstance(obj, str)
def is_a_list(obj):
return isinstance(obj, list)
obj = "Hello"
RulesEngine(Rule(any_(is_a_str, is_a_list), then(True))).run(obj)
>>> True
```
## Run/RunAll
### Run
Runs rules sequentially and exists executes the action for the first passing condition.
```python
from rules_engine import Rule, RulesEngine, then
obj = None
def is_integer(value):
return isinstance(value, int)
def is_string(value):
return isinstance(value, str)
value=1234
RulesEngine(
Rule(is_integer, then("integer")),
Rule(is_string, then("string")),
).run(value)
>>> "integer"
```
Since the first rule satisfies the conditions the rules engine will go no further
### RunAll
Evaluates all conditions and adds them to a list
```python
from rules_engine import Rule, RulesEngine, then
def is_integer(value):
return isinstance(value, int)
def is_string(value):
return isinstance(value, str)
def is_gr_3_chars(value):
return len(value) > 3
value="Hello"
RulesEngine(
Rule(is_integer, then("integer")),
Rule(is_string, then("string")),
Rule(is_gr_3_chars, then("greater than 3 charcters")),
).run_all(value)
>>> ["string", "greater than 3 charcters"]
```
# Full Example
In order for an article to be completed it must have the following rules
1. stock is > 0.
2. image url is present.
3. price exists.
```python
from collections import namedtuple
from typing import Union
from rules_engine import Otherwise, Rule, RulesEngine, then
Article = namedtuple("Article", "title price image_url stock")
article = Article(title="Iphone Case", price=1000, image_url="http://localhost/image", stock=None)
def produce_article_completed_event():
return None
def article_controller(article: Article):
if not article.stock:
return False
if not article.price:
raise ValueError("Article price missing")
if not article.image_url:
raise ValueError("Article image_url missing")
return produce_article_completed_event()
```
To be able to change to rules engine, we need to split the conditions and actions.
Rules engine is pretty simple if the condition is `True`, its corresponding action will be executed.
```python
### Conditions
def no_article_stock(article):
return not article.stock
def no_article_price(article):
return not article.price
def no_article_image_url(article):
return not article.image_url
### Actions
def article_price_missing_error(article):
raise ValueError("Article price missing")
def article_image_missing_error(article):
raise ValueError("Article image_url missing")
### Rules Engine
def article_complete_rules(article):
RulesEngine(
Rule(no_article_stock, then(False)),
Rule(no_article_price, article_price_missing_error),
Rule(no_article_image_url, article_image_missing_error),
Otherwise(produce_article_completed_event()),
).run(article)
```
| /rules-engine-0.2.5.tar.gz/rules-engine-0.2.5/README.md | 0.596198 | 0.907845 | README.md | pypi |
rules
^^^^^
``rules`` is a tiny but powerful app providing object-level permissions to
Django, without requiring a database. At its core, it is a generic framework
for building rule-based systems, similar to `decision trees`_. It can also be
used as a standalone library in other contexts and frameworks.
.. image:: https://img.shields.io/github/workflow/status/dfunckt/django-rules/CI/master
:target: https://github.com/dfunckt/django-rules/actions
.. image:: https://coveralls.io/repos/dfunckt/django-rules/badge.svg
:target: https://coveralls.io/r/dfunckt/django-rules
.. image:: https://img.shields.io/pypi/v/rules.svg
:target: https://pypi.org/project/rules/
.. image:: https://img.shields.io/pypi/pyversions/rules.svg
:target: https://pypi.org/project/rules/
.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
:target: https://github.com/psf/black
.. image:: https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&logoColor=white
:target: https://github.com/pre-commit/pre-commit
.. _decision trees: http://wikipedia.org/wiki/Decision_tree
Features
========
``rules`` has got you covered. ``rules`` is:
- **Documented**, **tested**, **reliable** and **easy to use**.
- **Versatile**. Decorate callables to build complex graphs of predicates.
Predicates can be any type of callable -- simple functions, lambdas,
methods, callable class objects, partial functions, decorated functions,
anything really.
- **A good Django citizen**. Seamless integration with Django views,
templates and the Admin for testing for object-level permissions.
- **Efficient** and **smart**. No need to mess around with a database to figure
out whether John really wrote that book.
- **Simple**. Dive in the code. You'll need 10 minutes to figure out how it
works.
- **Powerful**. ``rules`` comes complete with advanced features, such as
invocation context and storage for arbitrary data, skipping evaluation of
predicates under specific conditions, logging of evaluated predicates and more!
Table of Contents
=================
- `Requirements`_
- `Upgrading from 2.x`_
- `Upgrading from 1.x`_
- `How to install`_
- `Configuring Django`_
- `Using Rules`_
- `Creating predicates`_
- `Setting up rules`_
- `Combining predicates`_
- `Using Rules with Django`_
- `Permissions`_
- `Permissions in models`_
- `Permissions in views`_
- `Permissions and rules in templates`_
- `Permissions in the Admin`_
- `Permissions in Django Rest Framework`_
- `Advanced features`_
- `Custom rule sets`_
- `Invocation context`_
- `Binding "self"`_
- `Skipping predicates`_
- `Logging predicate evaluation`_
- `Best practices`_
- `API Reference`_
- `Licence`_
Requirements
============
``rules`` requires Python 3.7 or newer. The last version to support Python 2.7
is ``rules`` 2.2. It can optionally integrate with Django, in which case
requires Django 2.2 or newer.
*Note*: At any given moment in time, ``rules`` will maintain support for all
currently supported Django versions, while dropping support for those versions
that reached end-of-life in minor releases. See the `Supported Versions`_
section on Django Project website for the current state and timeline.
.. _Supported Versions: https://www.djangoproject.com/download/#supported-versions
Upgrading from 2.x
==================
The are no significant changes between ``rules`` 2.x and 3.x except dropping
support for Python 2, so before upgrading to 3.x you just need to make sure
you're running a supported Python 3 version.
Upgrading from 1.x
==================
* Support for Python 2.6 and 3.3, and Django versions before 1.11 has been
dropped.
* The ``SkipPredicate`` exception and ``skip()`` method of ``Predicate``,
that were used to signify that a predicate should be skipped, have been
removed. You may return ``None`` from your predicate to achieve this.
* The APIs to replace a rule's predicate have been renamed and their
behaviour changed. ``replace_rule`` and ``replace_perm`` functions and
``replace_rule`` method of ``RuleSet`` have been renamed to ``set_rule``,
``set_perm`` and ``RuleSet.set_perm`` respectively. The old behaviour was
to raise a ``KeyError`` if a rule by the given name did not exist. Since
version 2.0 this has changed and you can safely use ``set_*`` to set a
rule's predicate without having to ensure the rule exists first.
How to install
==============
Using pip:
.. code:: bash
$ pip install rules
Manually:
.. code:: bash
$ git clone https://github.com/dfunckt/django-rules.git
$ cd django-rules
$ python setup.py install
Run tests with:
.. code:: bash
$ ./runtests.sh
You may also want to read `Best practices`_ for general advice on how to
use ``rules``.
Configuring Django
------------------
Add ``rules`` to ``INSTALLED_APPS``:
.. code:: python
INSTALLED_APPS = (
# ...
'rules',
)
Add the authentication backend:
.. code:: python
AUTHENTICATION_BACKENDS = (
'rules.permissions.ObjectPermissionBackend',
'django.contrib.auth.backends.ModelBackend',
)
Using Rules
===========
``rules`` is based on the idea that you maintain a dict-like object that maps
string keys used as identifiers of some kind, to callables, called
*predicates*. This dict-like object is actually an instance of ``RuleSet`` and
the predicates are instances of ``Predicate``.
Creating predicates
-------------------
Let's ignore rule sets for a moment and go ahead and define a predicate. The
easiest way is with the ``@predicate`` decorator:
.. code:: python
>>> @rules.predicate
>>> def is_book_author(user, book):
... return book.author == user
...
>>> is_book_author
<Predicate:is_book_author object at 0x10eeaa490>
This predicate will return ``True`` if the book's author is the given user,
``False`` otherwise.
Predicates can be created from any callable that accepts anything from zero to
two positional arguments:
* ``fn(obj, target)``
* ``fn(obj)``
* ``fn()``
This is their generic form. If seen from the perspective of authorization in
Django, the equivalent signatures are:
* ``fn(user, obj)``
* ``fn(user)``
* ``fn()``
Predicates can do pretty much anything with the given arguments, but must
always return ``True`` if the condition they check is true, ``False``
otherwise. ``rules`` comes with several predefined predicates that you may
read about later on in `API Reference`_, that are mostly useful when dealing
with `authorization in Django`_.
Setting up rules
----------------
Let's pretend that we want to let authors edit or delete their books, but not
books written by other authors. So, essentially, what determines whether an
author *can edit* or *can delete* a given book is *whether they are its
author*.
In ``rules``, such requirements are modelled as *rules*. A *rule* is a map of
a unique identifier (eg. "can edit") to a predicate. Rules are grouped
together into a *rule set*. ``rules`` has two predefined rule sets:
* A default rule set storing shared rules.
* Another rule set storing rules that serve as permissions in a Django
context.
So, let's define our first couple of rules, adding them to the shared rule
set. We can use the ``is_book_author`` predicate we defined earlier:
.. code:: python
>>> rules.add_rule('can_edit_book', is_book_author)
>>> rules.add_rule('can_delete_book', is_book_author)
Assuming we've got some data, we can now test our rules:
.. code:: python
>>> from django.contrib.auth.models import User
>>> from books.models import Book
>>> guidetodjango = Book.objects.get(isbn='978-1-4302-1936-1')
>>> guidetodjango.author
<User: adrian>
>>> adrian = User.objects.get(username='adrian')
>>> rules.test_rule('can_edit_book', adrian, guidetodjango)
True
>>> rules.test_rule('can_delete_book', adrian, guidetodjango)
True
Nice... but not awesome.
Combining predicates
--------------------
Predicates by themselves are not so useful -- not more useful than any other
function would be. Predicates, however, can be combined using binary operators
to create more complex ones. Predicates support the following operators:
* ``P1 & P2``: Returns a new predicate that returns ``True`` if *both*
predicates return ``True``, otherwise ``False``. If P1 returns ``False``,
P2 will not be evaluated.
* ``P1 | P2``: Returns a new predicate that returns ``True`` if *any* of the
predicates returns ``True``, otherwise ``False``. If P1 returns ``True``,
P2 will not be evaluated.
* ``P1 ^ P2``: Returns a new predicate that returns ``True`` if one of the
predicates returns ``True`` and the other returns ``False``, otherwise
``False``.
* ``~P``: Returns a new predicate that returns the negated result of the
original predicate.
Suppose the requirement for allowing a user to edit a given book was for them
to be either the book's author, or a member of the "editors" group. Allowing
users to delete a book should still be determined by whether the user is the
book's author.
With ``rules`` that's easy to implement. We'd have to define another
predicate, that would return ``True`` if the given user is a member of the
"editors" group, ``False`` otherwise. The built-in ``is_group_member`` factory
will come in handy:
.. code:: python
>>> is_editor = rules.is_group_member('editors')
>>> is_editor
<Predicate:is_group_member:editors object at 0x10eee1350>
We could combine it with the ``is_book_author`` predicate to create a new one
that checks for either condition:
.. code:: python
>>> is_book_author_or_editor = is_book_author | is_editor
>>> is_book_author_or_editor
<Predicate:(is_book_author | is_group_member:editors) object at 0x10eee1390>
We can now update our ``can_edit_book`` rule:
.. code:: python
>>> rules.set_rule('can_edit_book', is_book_author_or_editor)
>>> rules.test_rule('can_edit_book', adrian, guidetodjango)
True
>>> rules.test_rule('can_delete_book', adrian, guidetodjango)
True
Let's see what happens with another user:
.. code:: python
>>> martin = User.objects.get(username='martin')
>>> list(martin.groups.values_list('name', flat=True))
['editors']
>>> rules.test_rule('can_edit_book', martin, guidetodjango)
True
>>> rules.test_rule('can_delete_book', martin, guidetodjango)
False
Awesome.
So far, we've only used the underlying, generic framework for defining and
testing rules. This layer is not at all specific to Django; it may be used in
any context. There's actually no import of anything Django-related in the
whole app (except in the ``rules.templatetags`` module). ``rules`` however can
integrate tightly with Django to provide authorization.
.. _authorization in Django:
Using Rules with Django
=======================
``rules`` is able to provide object-level permissions in Django. It comes
with an authorization backend and a couple template tags for use in your
templates.
Permissions
-----------
In ``rules``, permissions are a specialised type of rules. You still define
rules by creating and combining predicates. These rules however, must be added
to a permissions-specific rule set that comes with ``rules`` so that they can
be picked up by the ``rules`` authorization backend.
Creating permissions
++++++++++++++++++++
The convention for naming permissions in Django is ``app_label.action_object``,
and we like to adhere to that. Let's add rules for the ``books.change_book``
and ``books.delete_book`` permissions:
.. code:: python
>>> rules.add_perm('books.change_book', is_book_author | is_editor)
>>> rules.add_perm('books.delete_book', is_book_author)
See the difference in the API? ``add_perm`` adds to a permissions-specific
rule set, whereas ``add_rule`` adds to a default shared rule set. It's
important to know however, that these two rule sets are separate, meaning that
adding a rule in one does not make it available to the other.
Checking for permission
+++++++++++++++++++++++
Let's go ahead and check whether ``adrian`` has change permission to the
``guidetodjango`` book:
.. code:: python
>>> adrian.has_perm('books.change_book', guidetodjango)
False
When you call the ``User.has_perm`` method, Django asks each backend in
``settings.AUTHENTICATION_BACKENDS`` whether a user has the given permission
for the object. When queried for object permissions, Django's default
authentication backend always returns ``False``. ``rules`` comes with an
authorization backend, that is able to provide object-level permissions by
looking into the permissions-specific rule set.
Let's add the ``rules`` authorization backend in settings:
.. code:: python
AUTHENTICATION_BACKENDS = (
'rules.permissions.ObjectPermissionBackend',
'django.contrib.auth.backends.ModelBackend',
)
Now, checking again gives ``adrian`` the required permissions:
.. code:: python
>>> adrian.has_perm('books.change_book', guidetodjango)
True
>>> adrian.has_perm('books.delete_book', guidetodjango)
True
>>> martin.has_perm('books.change_book', guidetodjango)
True
>>> martin.has_perm('books.delete_book', guidetodjango)
False
**NOTE:** Calling `has_perm` on a superuser will ALWAYS return `True`.
Permissions in models
---------------------
**NOTE:** The features described in this section work on Python 3+ only.
It is common to have a set of permissions for a model, like what Django offers with
its default model permissions (such as *add*, *change* etc.). When using ``rules``
as the permission checking backend, you can declare object-level permissions for
any model in a similar way, using a new ``Meta`` option.
First, you need to switch your model's base and metaclass to the slightly extended
versions provided in ``rules.contrib.models``. There are several classes and mixins
you can use, depending on whether you're already using a custom base and/or metaclass
for your models or not. The extensions are very slim and don't affect the models'
behavior in any way other than making it register permissions.
* If you're using the stock ``django.db.models.Model`` as base for your models,
simply switch over to ``RulesModel`` and you're good to go.
* If you already have a custom base class adding common functionality to your models,
add ``RulesModelMixin`` to the classes it inherits from and set ``RulesModelBase``
as its metaclass, like so::
from django.db.models import Model
from rules.contrib.models import RulesModelBase, RulesModelMixin
class MyModel(RulesModelMixin, Model, metaclass=RulesModelBase):
...
* If you're using a custom metaclass for your models, you'll already know how to
make it inherit from ``RulesModelBaseMixin`` yourself.
Then, create your models like so, assuming you're using ``RulesModel`` as base
directly::
import rules
from rules.contrib.models import RulesModel
class Book(RulesModel):
class Meta:
rules_permissions = {
"add": rules.is_staff,
"read": rules.is_authenticated,
}
This would be equivalent to the following calls::
rules.add_perm("app_label.add_book", rules.is_staff)
rules.add_perm("app_label.read_book", rules.is_authenticated)
There are methods in ``RulesModelMixin`` that you can overwrite in order to customize
how a model's permissions are registered. See the documented source code for details
if you need this.
Of special interest is the ``get_perm`` classmethod of ``RulesModelMixin``, which can
be used to convert a permission type to the corresponding full permission name. If
you need to query for some type of permission on a given model programmatically,
this is handy::
if user.has_perm(Book.get_perm("read")):
...
Permissions in views
--------------------
``rules`` comes with a set of view decorators to help you enforce
authorization in your views.
Using the function-based view decorator
+++++++++++++++++++++++++++++++++++++++
For function-based views you can use the ``permission_required`` decorator:
.. code:: python
from django.shortcuts import get_object_or_404
from rules.contrib.views import permission_required
from posts.models import Post
def get_post_by_pk(request, post_id):
return get_object_or_404(Post, pk=post_id)
@permission_required('posts.change_post', fn=get_post_by_pk)
def post_update(request, post_id):
# ...
Usage is straight-forward, but there's one thing in the example above that
stands out and this is the ``get_post_by_pk`` function. This function, given
the current request and all arguments passed to the view, is responsible for
fetching and returning the object to check permissions against -- i.e. the
``Post`` instance with PK equal to the given ``post_id`` in the example.
This specific use-case is quite common so, to save you some typing, ``rules``
comes with a generic helper function that you can use to do this declaratively.
The example below is equivalent to the one above:
.. code:: python
from rules.contrib.views import permission_required, objectgetter
from posts.models import Post
@permission_required('posts.change_post', fn=objectgetter(Post, 'post_id'))
def post_update(request, post_id):
# ...
For more information on the decorator and helper function, refer to the
``rules.contrib.views`` module.
Using the class-based view mixin
++++++++++++++++++++++++++++++++
Django includes a set of access mixins that you can use in your class-based
views to enforce authorization. ``rules`` extends this framework to provide
object-level permissions via a mixin, ``PermissionRequiredMixin``.
The following example will automatically test for permission against the
instance returned by the view's ``get_object`` method:
.. code:: python
from django.views.generic.edit import UpdateView
from rules.contrib.views import PermissionRequiredMixin
from posts.models import Post
class PostUpdate(PermissionRequiredMixin, UpdateView):
model = Post
permission_required = 'posts.change_post'
You can customise the object either by overriding ``get_object`` or
``get_permission_object``.
For more information refer to the `Django documentation`_ and the
``rules.contrib.views`` module.
.. _Django documentation: https://docs.djangoproject.com/en/1.9/topics/auth/default/#limiting-access-to-logged-in-users
Checking permission automatically based on view type
++++++++++++++++++++++++++++++++++++++++++++++++++++
If you use the mechanisms provided by ``rules.contrib.models`` to register permissions
for your models as described in `Permissions in models`_, there's another convenient
mixin for class-based views available for you.
``rules.contrib.views.AutoPermissionRequiredMixin`` can recognize the type of view
it's used with and check for the corresponding permission automatically.
This example view would, without any further configuration, automatically check for
the ``"posts.change_post"`` permission, given that the app label is ``"posts"``::
from django.views.generic import UpdateView
from rules.contrib.views import AutoPermissionRequiredMixin
from posts.models import Post
class UpdatePostView(AutoPermissionRequiredMixin, UpdateView):
model = Post
By default, the generic CRUD views from ``django.views.generic`` are mapped to the
native Django permission types (*add*, *change*, *delete* and *view*). However,
the pre-defined mappings can be extended, changed or replaced altogether when
subclassing ``AutoPermissionRequiredMixin``. See the fully documented source code
for details on how to do that properly.
Permissions and rules in templates
----------------------------------
``rules`` comes with two template tags to allow you to test for rules and
permissions in templates.
Add ``rules`` to your ``INSTALLED_APPS``:
.. code:: python
INSTALLED_APPS = (
# ...
'rules',
)
Then, in your template::
{% load rules %}
{% has_perm 'books.change_book' author book as can_edit_book %}
{% if can_edit_book %}
...
{% endif %}
{% test_rule 'has_super_feature' user as has_super_feature %}
{% if has_super_feature %}
...
{% endif %}
Permissions in the Admin
------------------------
If you've setup ``rules`` to be used with permissions in Django, you're almost
set to also use ``rules`` to authorize any add/change/delete actions in the
Admin. The Admin asks for *four* different permissions, depending on action:
- ``<app_label>.add_<modelname>``
- ``<app_label>.view_<modelname>``
- ``<app_label>.change_<modelname>``
- ``<app_label>.delete_<modelname>``
- ``<app_label>``
*Note:* view permission is new in Django v2.1 and should not be added in versions before that.
The first four are obvious. The fifth is the required permission for an app
to be displayed in the Admin's "dashboard". Overriding it does not restrict access to the add,
change or delete views. Here's some rules for our imaginary ``books`` app as an example:
.. code:: python
>>> rules.add_perm('books', rules.always_allow)
>>> rules.add_perm('books.add_book', is_staff)
>>> rules.add_perm('books.view_book', is_staff | has_secret_access_code)
>>> rules.add_perm('books.change_book', is_staff)
>>> rules.add_perm('books.delete_book', is_staff)
Django Admin does not support object-permissions, in the sense that it will
never ask for permission to perform an action *on an object*, only whether a
user is allowed to act on (*any*) instances of a model.
If you'd like to tell Django whether a user has permissions on a specific
object, you'd have to override the following methods of a model's
``ModelAdmin``:
- ``has_view_permission(user, obj=None)``
- ``has_change_permission(user, obj=None)``
- ``has_delete_permission(user, obj=None)``
``rules`` comes with a custom ``ModelAdmin`` subclass,
``rules.contrib.admin.ObjectPermissionsModelAdmin``, that overrides these
methods to pass on the edited model instance to the authorization backends,
thus enabling permissions per object in the Admin:
.. code:: python
# books/admin.py
from django.contrib import admin
from rules.contrib.admin import ObjectPermissionsModelAdmin
from .models import Book
class BookAdmin(ObjectPermissionsModelAdmin):
pass
admin.site.register(Book, BookAdmin)
Now this allows you to specify permissions like this:
.. code:: python
>>> rules.add_perm('books', rules.always_allow)
>>> rules.add_perm('books.add_book', has_author_profile)
>>> rules.add_perm('books.change_book', is_book_author_or_editor)
>>> rules.add_perm('books.delete_book', is_book_author)
To preserve backwards compatibility, Django will ask for either *view* or
*change* permission. For maximum flexibility, ``rules`` behaves subtly
different: ``rules`` will ask for the change permission if and only if no rule
exists for the view permission.
Permissions in Django Rest Framework
------------------------------------
Similar to ``rules.contrib.views.AutoPermissionRequiredMixin``, there is a
``rules.contrib.rest_framework.AutoPermissionViewSetMixin`` for viewsets in Django
Rest Framework. The difference is that it doesn't derive permission from the type
of view but from the API action (*create*, *retrieve* etc.) that's tried to be
performed. Of course, it also requires you to declare your models as described in
`Permissions in models`_.
Here is a possible ``ModelViewSet`` for the ``Post`` model with fully automated CRUD
permission checking::
from rest_framework.serializers import ModelSerializer
from rest_framework.viewsets import ModelViewSet
from rules.contrib.rest_framework import AutoPermissionViewSetMixin
from posts.models import Post
class PostSerializer(ModelSerializer):
class Meta:
model = Post
fields = "__all__"
class PostViewSet(AutoPermissionViewSetMixin, ModelViewSet):
queryset = Post.objects.all()
serializer_class = PostSerializer
By default, the CRUD actions of ``ModelViewSet`` are mapped to the native
Django permission types (*add*, *change*, *delete* and *view*). The ``list``
action has no permission checking enabled. However, the pre-defined mappings
can be extended, changed or replaced altogether when using (or subclassing)
``AutoPermissionViewSetMixin``. Custom API actions defined via the ``@action``
decorator may then be mapped as well. See the fully documented source code for
details on how to properly customize the default behavior.
Advanced features
=================
Custom rule sets
----------------
You may create as many rule sets as you need:
.. code:: python
>>> features = rules.RuleSet()
And manipulate them by adding, removing, querying and testing rules:
.. code:: python
>>> features.rule_exists('has_super_feature')
False
>>> is_special_user = rules.is_group_member('special')
>>> features.add_rule('has_super_feature', is_special_user)
>>> 'has_super_feature' in features
True
>>> features['has_super_feature']
<Predicate:is_group_member:special object at 0x10eeaa500>
>>> features.test_rule('has_super_feature', adrian)
True
>>> features.remove_rule('has_super_feature')
Note however that custom rule sets are *not available* in Django templates --
you need to provide integration yourself.
Invocation context
------------------
A new context is created as a result of invoking ``Predicate.test()`` and is
only valid for the duration of the invocation. A context is a simple ``dict``
that you can use to store arbitrary data, (eg. caching computed values,
setting flags, etc.), that can be used by predicates later on in the chain.
Inside a predicate function it can be used like so:
.. code:: python
>>> @predicate
... def mypred(a, b):
... value = compute_expensive_value(a)
... mypred.context['value'] = value
... return True
Other predicates can later use stored values:
.. code:: python
>>> @predicate
... def myotherpred(a, b):
... value = myotherpred.context.get('value')
... if value is not None:
... return do_something_with_value(value)
... else:
... return do_something_without_value()
``Predicate.context`` provides a single ``args`` attribute that contains the
arguments as given to ``test()`` at the beginning of the invocation.
Binding "self"
--------------
In a predicate's function body, you can refer to the predicate instance itself
by its name, eg. ``is_book_author``. Passing ``bind=True`` as a keyword
argument to the ``predicate`` decorator will let you refer to the predicate
with ``self``, which is more convenient. Binding ``self`` is just syntactic
sugar. As a matter of fact, the following two are equivalent:
.. code:: python
>>> @predicate
... def is_book_author(user, book):
... if is_book_author.context.args:
... return user == book.author
... return False
>>> @predicate(bind=True)
... def is_book_author(self, user, book):
... if self.context.args:
... return user == book.author
... return False
Skipping predicates
-------------------
You may skip evaluation by returning ``None`` from your predicate:
.. code:: python
>>> @predicate(bind=True)
... def is_book_author(self, user, book):
... if len(self.context.args) > 1:
... return user == book.author
... else:
... return None
Returning ``None`` signifies that the predicate need not be evaluated, thus
leaving the predicate result up to that point unchanged.
Logging predicate evaluation
----------------------------
``rules`` can optionally be configured to log debug information as rules are
evaluated to help with debugging your predicates. Messages are sent at the
DEBUG level to the ``'rules'`` logger. The following `dictConfig`_ configures
a console logger (place this in your project's `settings.py` if you're using
`rules` with Django):
.. code:: python
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'rules': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
},
}
When this logger is active each individual predicate will have a log message
printed when it is evaluated.
.. _dictConfig: https://docs.python.org/3.6/library/logging.config.html#logging-config-dictschema
Best practices
==============
Before you can test for rules, these rules must be registered with a rule set,
and for this to happen the modules containing your rule definitions must be
imported.
For complex projects with several predicates and rules, it may not be
practical to define all your predicates and rules inside one module. It might
be best to split them among any sub-components of your project. In a Django
context, these sub-components could be the apps for your project.
On the other hand, because importing predicates from all over the place in
order to define rules can lead to circular imports and broken hearts, it's
best to further split predicates and rules in different modules.
``rules`` may optionally be configured to autodiscover ``rules.py`` modules in
your apps and import them at startup. To have ``rules`` do so, just edit your
``INSTALLED_APPS`` setting:
.. code:: python
INSTALLED_APPS = (
# replace 'rules' with:
'rules.apps.AutodiscoverRulesConfig',
)
**Note:** On Python 2, you must also add the following to the top of your
``rules.py`` file, or you'll get import errors trying to import ``rules``
itself:
.. code:: python
from __future__ import absolute_import
API Reference
=============
The core APIs are accessible from the root ``rules`` module. Django-specific
functionality for the Admin and views is available from ``rules.contrib``.
Class ``rules.Predicate``
-------------------------
You create ``Predicate`` instances by passing in a callable:
.. code:: python
>>> def is_book_author(user, book):
... return book.author == user
...
>>> pred = Predicate(is_book_author)
>>> pred
<Predicate:is_book_author object at 0x10eeaa490>
You may optionally provide a different name for the predicate that is used
when inspecting it:
.. code:: python
>>> pred = Predicate(is_book_author, name='another_name')
>>> pred
<Predicate:another_name object at 0x10eeaa490>
Also, you may optionally provide ``bind=True`` in order to be able to access
the predicate instance with ``self``:
.. code:: python
>>> def is_book_author(self, user, book):
... if self.context.args:
... return user == book.author
... return False
...
>>> pred = Predicate(is_book_author, bind=True)
>>> pred
<Predicate:is_book_author object at 0x10eeaa490>
Instance methods
++++++++++++++++
``test(obj=None, target=None)``
Returns the result of calling the passed in callable with zero, one or two
positional arguments, depending on how many it accepts.
Class ``rules.RuleSet``
-----------------------
``RuleSet`` extends Python's built-in `dict`_ type. Therefore, you may create
and use a rule set any way you'd use a dict.
.. _dict: http://docs.python.org/library/stdtypes.html#mapping-types-dict
Instance methods
++++++++++++++++
``add_rule(name, predicate)``
Adds a predicate to the rule set, assigning it to the given rule name.
Raises ``KeyError`` if another rule with that name already exists.
``set_rule(name, predicate)``
Set the rule with the given name, regardless if one already exists.
``remove_rule(name)``
Remove the rule with the given name. Raises ``KeyError`` if a rule with
that name does not exist.
``rule_exists(name)``
Returns ``True`` if a rule with the given name exists, ``False`` otherwise.
``test_rule(name, obj=None, target=None)``
Returns the result of calling ``predicate.test(obj, target)`` where
``predicate`` is the predicate for the rule with the given name. Returns
``False`` if a rule with the given name does not exist.
Decorators
----------
``@predicate``
Decorator that creates a predicate out of any callable:
.. code:: python
>>> @predicate
... def is_book_author(user, book):
... return book.author == user
...
>>> is_book_author
<Predicate:is_book_author object at 0x10eeaa490>
Customising the predicate name:
.. code:: python
>>> @predicate(name='another_name')
... def is_book_author(user, book):
... return book.author == user
...
>>> is_book_author
<Predicate:another_name object at 0x10eeaa490>
Binding ``self``:
.. code:: python
>>> @predicate(bind=True)
... def is_book_author(self, user, book):
... if 'user_has_special_flag' in self.context:
... return self.context['user_has_special_flag']
... return book.author == user
Predefined predicates
---------------------
``always_allow()``, ``always_true()``
Always returns ``True``.
``always_deny()``, ``always_false()``
Always returns ``False``.
``is_authenticated(user)``
Returns the result of calling ``user.is_authenticated()``. Returns
``False`` if the given user does not have an ``is_authenticated`` method.
``is_superuser(user)``
Returns the result of calling ``user.is_superuser``. Returns ``False``
if the given user does not have an ``is_superuser`` property.
``is_staff(user)``
Returns the result of calling ``user.is_staff``. Returns ``False`` if the
given user does not have an ``is_staff`` property.
``is_active(user)``
Returns the result of calling ``user.is_active``. Returns ``False`` if the
given user does not have an ``is_active`` property.
``is_group_member(*groups)``
Factory that creates a new predicate that returns ``True`` if the given
user is a member of *all* the given groups, ``False`` otherwise.
Shortcuts
---------
Managing the shared rule set
++++++++++++++++++++++++++++
``add_rule(name, predicate)``
Adds a rule to the shared rule set. See ``RuleSet.add_rule``.
``set_rule(name, predicate)``
Set the rule with the given name from the shared rule set. See
``RuleSet.set_rule``.
``remove_rule(name)``
Remove a rule from the shared rule set. See ``RuleSet.remove_rule``.
``rule_exists(name)``
Returns whether a rule exists in the shared rule set. See
``RuleSet.rule_exists``.
``test_rule(name, obj=None, target=None)``
Tests the rule with the given name. See ``RuleSet.test_rule``.
Managing the permissions rule set
+++++++++++++++++++++++++++++++++
``add_perm(name, predicate)``
Adds a rule to the permissions rule set. See ``RuleSet.add_rule``.
``set_perm(name, predicate)``
Replace a rule from the permissions rule set. See ``RuleSet.set_rule``.
``remove_perm(name)``
Remove a rule from the permissions rule set. See ``RuleSet.remove_rule``.
``perm_exists(name)``
Returns whether a rule exists in the permissions rule set. See
``RuleSet.rule_exists``.
``has_perm(name, user=None, obj=None)``
Tests the rule with the given name. See ``RuleSet.test_rule``.
Licence
=======
``django-rules`` is distributed under the MIT licence.
Copyright (c) 2014 Akis Kesoglou
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
| /rules-3.3.tar.gz/rules-3.3/README.rst | 0.936198 | 0.689482 | README.rst | pypi |
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.
| /ruleskit-1.0.125.tar.gz/ruleskit-1.0.125/CODE_OF_CONDUCT.md | 0.574872 | 0.684277 | CODE_OF_CONDUCT.md | pypi |
# Rules protocol
Core smart contracts of the Rules protocol.
- for marketplace contracts, see [marketplace](https://github.com/ruleslabs/marketplace) repository.
- for pack opening contracts, see [pack-opener](https://github.com/ruleslabs/pack-opener) repository.
## Overview
Rules protocol is composed of 4 contracts interacting with each other.
`RulesData`, `RulesCards` and `RulesPacks` are responsible for all the logic and data storage.
`RulesTokens` implements the ERC-1155 standard, and uses other contracts logic to manage its tokens.
### RulesData
`RulesData` is responsible for holding the most basic informations, currently its only utility is to store artists names.
#### @externals
##### `createArtist`:
Create an artists if it does not already exist.
- ###### parameters
- `artist_name: Uint256`: the artist name (must be at most 27 characters long)
### RulesCards
`RulesCard` handles cards, their scarcities, their supply and can be used to stop the production for the given scarcity level of a season.
#### cards, card models, scarcities and seasons
A card is a struct composed like bellow, and the main purpose of this contract is to store cards.
```cairo
struct Card:
member model: CardModel
member serial_number: felt # uint24
end
struct CardModel:
member artist_name: Uint256
member season: felt # uint8
member scarcity: felt # uint8
end
```
As you can see, each card is associated to a card model, itself associated to a season and a scarcity level.
Scarcity levels are used to control the max supply of a card model and exists in the context of a season, which means a scarcity `n` can have a different max supply from one season to another.
For each possible season, it exists by default the scarcity level `0`, the only scarcity level with an infinite max supply.
#### @externals
##### `addScarcityForSeason`:
Add a new scarcity level to a given season.
- ###### parameters
- `season: felt`: the season for which to create the scarcity level.
- `supply: felt`: the max supply of the scarcity level to create.
##### `stopProductionForSeasonAndScarcity`:
Definitively Stop the production of new cards for the scarcity level of a given season.
- ###### parameters
- `season: felt`
- `scarcity: felt`
##### `createCard`
Store card informations in a `Uint256`, and use it as a card identifier.
If the card informations are invalid, that the scarcity provided does not allow more card creation, or if the card already exists, the transaction will fail.
- ###### parameters
- `card: Card`:
- `model: CardModel`:
- `artist_name: Uint256`: must exist in `RulesData`
- `season: felt`: must be non null and fit in 8 bits
- `scarcity: felt`: must fit in 8 bits, and exist in the given season
- `serial_number: felt`: must be non null and fit in 24 bits
- ###### return value
- `card_id: Uint256` the card identifier
##### `packCardModel`
Increase the packed supply of a card model, in other terms, the quantity of cards in packs. If not enough supply is available for the card model, or if the card model is invalid, the transaction will fail.
- ###### parameters
- `pack_card_model: PackCardModel`:
- `card_model: CardModel`
- `quantity: felt`: the amount of cards to pack
### RulesPacks
#### Packs and common packs
A pack is a list of card models with, optionally, a quantity for each card model, and a number of cards per minted pack. According to the card models quantities and the number of cards per pack, the contract will deduce the pack max supply.
`pack max supply = sum of card models quantities / number of cards per pack`
Given that [cards created with the scarcity level `0` have an unlimited max supply](#cards-card-models-scarcities-and-seasons), it allows to create packs with only card models of scarcity level `0`, and so, to create packs with an unlimited max supply as well.
We are calling these packs **common packs**
##### `createPack`
Create a new pack with a deduced max supply, card models with any valid season and scarcity levels can be provided as long as the available supply of these card models is enough regarding to the pack card models quantities
```cairo
struct PackCardModel:
member card_model: CardModel
member quantity: felt
end
```
- ###### parameters
- `cards_per_pack: felt`
- `pack_card_models: PackCardModel*`
- `metadata: Metadata`: see [metadata section](#metadata)
- ###### return value
- `pack_id: Uint256`: id of the newly created pack. For packs with a limited max supply the nth created pack have the id `Uint256(low=n, high=0)`
##### `createCommonPack`
Create a new common pack, with all the present and future card models of scarcity level `0` of a given season.
- ###### parameters
- `cards_per_pack: felt`
- `season: felt`: must be a valid season and no other common pack of the same season must exist.
- `metadata: Metadata`: see [metadata section](#metadata)
- ###### return value
- `pack_id: Uint256`: id of the newly created pack. For common packs the id is such that `Uint256(low=0, high=season)`
### RulesTokens
`RulesToken` is the protocol's keystone, this ERC-1155 contract handles all the tokens logic.
Rules tokens are indivisible, cards have a max supply of 1 (basically, it's NFTs), and packs have a max supply calculated by the [`RulesPacks`](#rulespacks) contract.
##### `createAndMintCard`
Create a card in [`RulesCards`](#rulescards) and mint its associated token to the recipient address.
- ###### parameters
- `card: Card`: the card to create and mint, it must be unique and not minted yet.
- `metadata: Metadata*`: see [metadata section](#metadata).
- `to: felt`: address to send the newly minted card token.
##### `openPackTo`
Pack opening is the mechanism by which a pack token will be burned and `cards_per_pack` card tokens will be minted.
The transfer of cards to the recipient address is not safe, this is done to avoid a Reetrancy attack which could allow a malicious
contract to make the pack opening fail during the transfer acceptance check, if the selected cards does not suit it.
Also, to ensure the impossibility of invalidating an opening transaction in progress, it is important to make sure that the pack
has been moved to a secure pack opening contract. See the pack opener contract at [periphery](https://github.com/ruleslabs/periphery)
for more information.
- ###### parameters
- `to: felt`: address to send the newly minted cards.
- `pack_id: felt`: the id of the pack to open.
- `cards: Card*`: the cards to mint. Like [`createAndMintCard`](#createandmintcard) does, they will be created in
[`RulesCards`](#rulescards) first, then, corresponding card tokens will be minted.
- `metadata: Metadata*`: see [metadata section](#metadata).
### Metadata
### Access Control
## Local development
### Compile contracts
```bash
nile compile src/ruleslabs/contracts/Rules*/Rules*.cairo --directory src
```
### Run tests
```bash
tox tests/test.py
```
### Deploy contracts
```bash
starknet declare artifacts/RulesData.json
starknet declare artifacts/RulesCards.json
starknet declare artifacts/RulesPacks.json
starknet declare artifacts/RulesTokens.json
nile deploy Proxy [RULES_DATA_CLASS_HASH]
nile deploy RulesCards [RULES_CARDS_CLASS_HASH]
nile deploy RulesPacks [RULES_PACKS_CLASS_HASH]
nile deploy RulesTokens [RULES_TOKENS_CLASS_HASH]
```
| /ruleslabs-core-1.0.1.tar.gz/ruleslabs-core-1.0.1/README.md | 0.582847 | 0.947769 | README.md | pypi |
import itertools
import json
import ruly
from ruly_dmn import common
class DMN:
"""Class that contains the DMN implementation.
Args:
handler (ruly_dmn.ModelHandler): model handler
rule_factory_cb (Optional[Callable]): function that creates a rule
factory - if None, a factory that uses console is used. Signature
should match the signature of :func:`ruly_dmn.rule_factory_cb`"""
def __init__(self, handler, rule_factory_cb=None):
self._handler = handler
self._knowledge_base = ruly.KnowledgeBase(*handler.rules)
self._factory_cb = rule_factory_cb
all_outputs = set(itertools.chain(*handler.dependencies.keys()))
all_inputs = set(itertools.chain(*handler.dependencies.values()))
self._inputs = all_inputs - all_outputs
@property
def inputs(self):
"""List[str]: input variables for all available decisions"""
return self._inputs
def decide(self, inputs, decision):
"""Attempts to solve for decision based on given inputs. May create
new rules if the factory creates them.
Args:
inputs (Dict[str, Any]): name-value pairs of all inputs
decision (str): name of the decision that should be resolved
Returns:
Any: calculated decision
Raises:
ruly_dmn.HitPolicyViolation: raised if hit policy violation is
detected"""
rules = list(self._knowledge_base.rules)
if self._factory_cb is None:
rule_factory = _ConsoleRuleFactory(self._handler)
else:
rule_factory = self._factory_cb(self._handler)
def post_eval_cb(state, output_name, fired_rules):
fired_rules = _resolve_hit_policy(
fired_rules, self._handler.hit_policies[output_name])
new_rule = rule_factory.create_rule(state, fired_rules,
output_name)
if new_rule is not None and new_rule not in rules:
if len(fired_rules) == 0:
rules.append(new_rule)
else:
rules.insert(rules.index(fired_rules[0]), new_rule)
raise _CancelEvaluationException()
elif len(fired_rules) > 0:
state = dict(state, **fired_rules[0].consequent)
return state
state = None
rule_count = len(rules)
rules_changed = False
while state is None:
try:
state = ruly.backward_chain(self._knowledge_base, decision,
post_eval_cb=post_eval_cb,
**inputs)
except _CancelEvaluationException:
if len(rules) == rule_count:
break
else:
rules_changed = True
self._knowledge_base = ruly.KnowledgeBase(*rules)
if rules_changed:
self._handler.update(self._knowledge_base)
return state[decision]
def rule_factory_cb(handler):
"""Placeholder function containing the signature for rule factory callbacks
Args:
handler (ruly_dmn.ModelHandler): model handler
Returns:
ruly_dmn.RuleFactory: rule factory"""
class HitPolicyViolation(Exception):
"""Exception raised when a hit policy is violated"""
class _ConsoleRuleFactory(common.RuleFactory):
def __init__(self, handler):
self._rejections = []
self._handler = handler
def create_rule(self, state, fired_rules, output_name):
input_names = self._handler.dependencies[output_name]
input_values = {name: state[name] for name in input_names
if state[name] is not None}
if (input_values, output_name) in self._rejections:
return None
if not self._show_prompts(fired_rules, state, input_names):
return None
if len(fired_rules) == 0:
question = (f'Unable to decide {output_name} for inputs '
f'{input_values}, generate new rule?')
else:
question = (f'Fired rules for {output_name} did not use all '
f'available input decisions - {input_values}, would '
f'you like to create a new rule that does?')
if not self._confirm_creation(question):
self._rejections.append((input_values, output_name))
return None
rule = self._create_prompt(input_values, output_name)
print('Created rule:', rule)
return rule
def _show_prompts(self, fired_rules, state, input_names):
available_inputs = {name for name in input_names
if state[name] is not None}
for rule in fired_rules:
rule_deps = set(ruly.get_rule_depending_variables(rule))
if (rule_deps & available_inputs) == available_inputs:
return False
return True
def _confirm_creation(self, question):
answer = input(f'{question} (Y/n) ').lower() or 'y'
while answer not in ('y', 'n'):
answer = input('Please type y or n. (Y/n)').lower() or 'y'
if answer == 'n':
return False
return True
def _create_prompt(self, input_values, output_name):
antecedent = ruly.Expression(
ruly.Operator.AND, tuple(ruly.EqualsCondition(name, value)
for name, value in input_values.items()
if value is not None))
print('Please type the expected output values (JSON)')
print(f'IF {antecedent} THEN')
assignments = {}
value = None
while value is None:
value_json = input(f'{output_name} = ')
try:
value = json.loads(value_json)
except json.JSONDecodeError:
answer = input(f'JSON parsing failed, is the expected value '
f'string "{value_json}"? (Y/n)') or 'y'
if answer == 'n':
continue
else:
value = value_json
break
assignments[output_name] = value
return ruly.Rule(antecedent, assignments)
class _CancelEvaluationException(Exception):
pass
def _resolve_hit_policy(fired_rules, hit_policy):
if hit_policy == common.HitPolicy.UNIQUE:
if len(fired_rules) > 1:
raise HitPolicyViolation(f'multiple rules fired for a decision '
f'with unique hit policy: {fired_rules}')
return fired_rules
if hit_policy == common.HitPolicy.FIRST:
return [fired_rules[0]] if len(fired_rules) > 0 else []
if hit_policy == common.HitPolicy.ANY:
if not all(r.consequent == fired_rules[0].consequent
for r in fired_rules):
raise HitPolicyViolation(f'rules with different outputs '
f'satisfied, while hit policy is any: '
f'{fired_rules}')
return [fired_rules[0]] | /ruly-dmn-0.0.6.tar.gz/ruly-dmn-0.0.6/ruly_dmn/dmn.py | 0.702938 | 0.317373 | dmn.py | pypi |
import json
import ruly
import uuid
import xml.etree.ElementTree
from ruly_dmn import common
_tags = {
'decision': '{https://www.omg.org/spec/DMN/20191111/MODEL/}decision',
'decisionTable': '{https://www.omg.org/spec/DMN/20191111/MODEL/}'
'decisionTable',
'input': '{https://www.omg.org/spec/DMN/20191111/MODEL/}input',
'output': '{https://www.omg.org/spec/DMN/20191111/MODEL/}output',
'rule': '{https://www.omg.org/spec/DMN/20191111/MODEL/}rule',
'inputExpression': '{https://www.omg.org/spec/DMN/20191111/MODEL/}'
'inputExpression',
'inputValues': '{https://www.omg.org/spec/DMN/20191111/MODEL/}inputValues',
'text': '{https://www.omg.org/spec/DMN/20191111/MODEL/}text',
'inputEntry': '{https://www.omg.org/spec/DMN/20191111/MODEL/}inputEntry',
'outputEntry': '{https://www.omg.org/spec/DMN/20191111/MODEL/}outputEntry'}
class CamundaModelerHandler(common.ModelHandler):
"""Implementation of the handler that expects a Camunda Modeler DMN file
Args:
path (pathlib.Path): path to the DMN file
dump_path (Optional[pathlib.Path]): path where updated DMN files will
be dumped. Can be the same as path. If None, they aren't dumped
anywhere"""
def __init__(self, path, dump_path=None):
self._dump_path = dump_path
tree = xml.etree.ElementTree.parse(path)
root = tree.getroot()
rules = []
dependencies = {}
hit_policies = {}
rule_ids = []
for decision in root.findall(_tags['decision']):
table = decision.find(_tags['decisionTable'])
inputs = [e.find(_tags['inputExpression']).find(_tags['text']).text
for e in table.findall(_tags['input'])]
outputs = [e.get('name') for e in table.findall(_tags['output'])]
output_name = outputs[0]
dependencies[output_name] = inputs
hit_policy_str = table.attrib.get('hitPolicy') or 'UNIQUE'
hit_policies[output_name] = common.HitPolicy[hit_policy_str]
for rule_element in table.findall(_tags['rule']):
input_values = [
e.find(_tags['text']).text
for e in rule_element.findall(_tags['inputEntry'])]
antecedent = ruly.Expression(
ruly.Operator.AND,
tuple(ruly.EqualsCondition(input_name, json.loads(value))
for input_name, value in zip(inputs, input_values)
if value is not None))
output_values = [
json.loads(e.find(_tags['text']).text)
for e in rule_element.findall(_tags['outputEntry'])]
rule = ruly.Rule(antecedent, {output_name: output_values[0]})
rules.append(rule)
rule_ids.append((rule, rule_element.attrib['id']))
self._tree = tree
self._dependencies = dependencies
self._hit_policies = hit_policies
self._rule_ids = rule_ids
self._rules = [rule for rule, _ in self._rule_ids]
@property
def dependencies(self):
return self._dependencies
@property
def rules(self):
return self._rules
@property
def hit_policies(self):
return self._hit_policies
def update(self, knowledge_base):
root = self._tree.getroot()
for decision in root.findall(_tags['decision']):
table = decision.find(_tags['decisionTable'])
inputs = [e.find(_tags['inputExpression']).find(_tags['text']).text
for e in table.findall(_tags['input'])]
outputs = [e.get('name') for e in table.findall(_tags['output'])]
output_name = outputs[0]
rules = [rule for rule in knowledge_base.rules
if set(rule.consequent) == set([output_name])]
rule_index_elem_iter = ((i, el) for i, el in
enumerate(table.getchildren())
if el.tag == _tags['rule'])
elem = None
for rule in rules:
if elem is None:
try:
elem = next(rule_index_elem_iter)
except StopIteration:
elem = None
rule_ids = [rule_id for saved_rule, rule_id in self._rule_ids
if saved_rule == rule]
if len(rule_ids) == 0:
rule_id = None
else:
rule_id = rule_ids[0]
if rule_id is not None:
elem = None
continue
rule_element = _rule_to_xml_element(rule, inputs, output_name)
self._rule_ids.append((rule, rule_element.attrib['id']))
if elem is None:
table.append(rule_element)
else:
table.insert(elem[0], rule_element)
if self._dump_path is not None:
self._tree.write(self._dump_path)
def _rule_to_xml_element(rule, inputs, output_name):
element = xml.etree.ElementTree.Element(
_tags['rule'],
attrib={'id': f'DecisionRule_{uuid.uuid1()}'})
for input_name in inputs:
condition = [c for c in rule.antecedent.children
if c.name == input_name]
input_entry_element = xml.etree.ElementTree.Element(
_tags['inputEntry'],
attrib={'id': f'UnaryTests_{uuid.uuid1()}'})
text_element = xml.etree.ElementTree.Element(_tags['text'])
if len(condition) == 1:
text_element.text = json.dumps(condition[0].value)
input_entry_element.append(text_element)
element.append(input_entry_element)
output_entry_element = xml.etree.ElementTree.Element(
_tags['outputEntry'],
attrib={'id': f'LiteralExpression_{uuid.uuid1()}'})
text_element = xml.etree.ElementTree.Element(_tags['text'])
text_element.text = json.dumps(rule.consequent[output_name])
output_entry_element.append(text_element)
element.append(output_entry_element)
return element | /ruly-dmn-0.0.6.tar.gz/ruly-dmn-0.0.6/ruly_dmn/handlers/camunda_modeler.py | 0.439747 | 0.280672 | camunda_modeler.py | pypi |
import abc
from collections import namedtuple
import enum
import json
class Rule(namedtuple('Rule', ['antecedent', 'consequent'])):
"""Knowledge base rule
Attributes:
antecedent (Union[ruly.Condition, ruly.Expression]): expression or a
condition that, if evaluated to True, fires assignment defined by
the consequent
consenquent (Dict[str, Any]): keys are variable names, values are
values assigned if rule fires
"""
def __repr__(self):
consequent = ' AND '.join(
f'{k} = {v}' for k, v in self.consequent.items())
return f'IF {self.antecedent} THEN {consequent}'
class Operator(enum.Enum):
AND = 1
class Expression(namedtuple('Expression', ['operator', 'children'])):
"""Logical expression, aggregation of conditions and
sub-expressions
Attributes:
operator (ruly.Operator): operator applied to all children when
evaluated
children (List[Union[ruly.Condition, ruly.Expression]]): list of
conditions or other expressions
"""
def __repr__(self):
return f' {self.operator.name} '.join([str(c) for c in self.children])
class Condition(abc.ABC):
"""Abstract class representing a condition that needs to be satisfied when
evaluating an expression."""
class EqualsCondition(namedtuple('EqualsCondition', ['name', 'value']),
Condition):
"""Condition that checks wheter variable value is
equal to what is written under the value attribute
Attributes:
name (str): variable name
value (Any): value against which the variable is compared to
"""
def __repr__(self):
return f'{self.name} = {json.dumps(self.value)}'
class Evaluation(namedtuple('Evaluation', ['state', 'unknowns'])):
"""Structure representing an evaluation result
Attributes:
state (Dict[str, Any]: values): all variable values
unknowns (Set[Unknown]): set of all found unknowns"""
def get_rule_depending_variables(rule):
"""Calculate all input variables in a rule
Returns:
List[str]: names of all input variables"""
if isinstance(rule.antecedent, Condition):
return set([rule.antecedent.name])
elif isinstance(rule.antecedent, Expression):
return set([c.name for c in rule.antecedent.children]) | /ruly-zlatsic-0.0.1.tar.gz/ruly-zlatsic-0.0.1/ruly/common.py | 0.774669 | 0.377828 | common.py | pypi |
from ruly import common
def backward_chain(knowledge_base, output_name, post_eval_cb=None, **kwargs):
"""Evaulates the output using backward chaining
The algorithm is depth-first-search, if goal variable assigment is
contained within a rule that has a depending derived variable, this
variable is solved for using the same function call.
Args:
knowledge_base (ruly.KnowledgeBase): knowledge base
output_name (str): name of the goal variable
post_eval_cb(Optional[Callable]): callback called after determining
which rules fired, signature should match :func:`post_eval_cb`.
Return value is changed state. If `None`, state is changed by using
assignemnt of first fired rule's consequent (or not changed if no
rules fired)
**kwargs (Dict[str, Any]): names and values of input variables
Returns:
Dict[str, Any]: state containing calculated values
"""
state = {
name: kwargs.get(name)
for name in knowledge_base.input_variables.union(
knowledge_base.derived_variables)}
if state[output_name] is not None:
return state
fired_rules = []
for rule in [r for r in knowledge_base.rules
if output_name in r.consequent]:
depending_variables = common.get_rule_depending_variables(rule)
for depending_variable in [var for var in depending_variables
if state[var] is None]:
if depending_variable in knowledge_base.input_variables:
break
eval_state = backward_chain(knowledge_base, depending_variable,
post_eval_cb=post_eval_cb, **state)
state = dict(state, **eval_state)
if state[depending_variable] is None:
break
if evaluate(state, rule.antecedent):
if post_eval_cb is None:
return dict(state, **rule.consequent)
fired_rules.append(rule)
if post_eval_cb:
return post_eval_cb(state, output_name, fired_rules)
return state
def post_eval_cb(state, output_name, fired_rules):
"""Placeholder function describing input and output arguments for the post
evaluation callbacks
Args:
state (Dict[str, Any]): state calculated during evaluation
output_name (str): name of the goal variable
fired_rules (List[ruly.Rule]): rules whose antecedents were satisfied
during the evaluation
Returns:
Dict[str, Any]: updated state"""
def evaluate(inputs, antecedent):
"""Evaluates truthiness of an antecedent
Args:
inputs (Dict[str, Any]): variable values
antecedent (Union[ruly.Expression, ruly.Condition]): rule
antecedent
Returns:
bool"""
if isinstance(antecedent, common.Condition):
return _evaluate_condition(antecedent, inputs[antecedent.name])
elif isinstance(antecedent, common.Expression):
return _evaluate_expression(antecedent, inputs)
def _evaluate_expression(expression, inputs):
if expression.operator == common.Operator.AND:
return all([evaluate(inputs, child) for child in expression.children])
def _evaluate_condition(condition, input_value):
if isinstance(condition, common.EqualsCondition):
return condition.value == input_value | /ruly-zlatsic-0.0.1.tar.gz/ruly-zlatsic-0.0.1/ruly/evaluator.py | 0.906818 | 0.545407 | evaluator.py | pypi |
from rumboot.images.imageFormatBase import ImageFormatBase
class ImageFormatV2(ImageFormatBase):
"""
This class works with version 2.0 images.
struct __attribute__((packed)) rumboot_bootheader {
uint32_t magic; /* 0xb0ldface */
uint8_t version;
uint8_t reserved;
uint8_t chip_id;
uint8_t chip_rev;
uint32_t data_crc32;
uint32_t datalen;
uint32_t entry_point[11];
uint32_t header_crc32;
const struct rumboot_bootsource *device;
char data[];
};
"""
name = "RumBootV2"
MAGIC = 0xb01dface
VERSION = 2
format = [
[4, "magic", "0x%x", "Magic"],
[1, "version", "0x%x", "Header Version"],
[1, "reserved", "0x%x", "Reserved"],
[1, "chip_id", "0x%x", "Chip ID"],
[1, "chip_rev", "0x%x", "Chip Revision"],
[4, "data_crc32", "0x%x", "Data CRC32"],
[4, "data_length", "%d", "Data Length"],
[4, "entry0", "0x%x", "Entry Point[0]"],
[4, "entry1", "0x%x", "Entry Point[1]"],
[4, "entry2", "0x%x", "Entry Point[2]"],
[4, "entry3", "0x%x", "Entry Point[3]"],
[4, "entry4", "0x%x", "Entry Point[4]"],
[4, "entry5", "0x%x", "Entry Point[5]"],
[4, "entry6", "0x%x", "Entry Point[6]"],
[4, "entry7", "0x%x", "Entry Point[7]"],
[4, "entry8", "0x%x", "Entry Point[8]"],
[4, "entry9", "0x%x", "Entry Point[9]"],
[4, "header_crc32", "0x%x", "Header CRC32"],
[4, "bootsource", "0x%x", ""],
]
header = {}
def dump_header(self, raw=False, format=False):
#Hide all unused entry points
for i in range(1, 10):
key = "entry" + str(i)
self.hide_field(key)
#Dump fields
return super().dump_header(raw, format)
def check(self):
if (super().check()):
return self.header["version"] == self.VERSION
return False
def __init__(self, inFile):
super().__init__(inFile)
def wrap(self):
super().wrap()
self.header["version"] = 2
self.write_header()
return True
def get_chip_id(self):
return self.header['chip_id']
def get_chip_rev(self):
return self.header['chip_rev'] | /rumboot-tools-0.9.30.tar.gz/rumboot-tools-0.9.30/rumboot/images/imageFormatV2.py | 0.560493 | 0.353317 | imageFormatV2.py | pypi |
from rumboot.images.imageFormatBase import ImageFormatBase
import os
class ImageFormatLegacyNM6408(ImageFormatBase):
MAGIC = 0x12345678
name = "NM6408 (Legacy)"
format = [
[4, "magic", "0x%x", "Magic"],
[4, "data_length", "%d", "Data Length"],
]
def __init__(self, inFile):
super().__init__(inFile)
self.header_size = self.get_header_length()
def get_header_length(self):
return 8
def dump_header(self, raw=False, format=False):
valid = super().dump_header(raw, format)
crc32 = self.read32(-4, os.SEEK_END)
self.header["data_crc32"] = crc32
if (crc32 == self.data_crc32):
hmatch = "Valid"
else:
hmatch = "Invalid, expected: 0x{:X}".format(self.data_crc32)
valid = False
self.dump_field([4, "data_crc32", "0x%x", "Data CRC32"], True, hmatch)
return valid
def read_header(self):
offset = 0
for f in self.format:
self.header[f[1]] = self.read_element(offset, f[0])
offset = offset + f[0]
if self.header['magic'] != self.MAGIC:
return
if (self.header["data_length"] == 0):
self.data_length = self.file_size - self.get_header_length()
else:
self.data_length = self.header["data_length"]
#Only compute data crc32 for a valid header checksum
self.data_crc32 = self.crc32(self.get_header_length(), self.get_header_length() + self.data_length - 4)
def fix_length(self):
self.header["data_length"] = (self.file_size - self.get_header_length()) & 0xFFFFFF
self.data_length = self.header["data_length"]
self.fix_checksums()
def fix_checksums(self, calc_data = True):
self.header["data_length"] = self.data_length
crc32 = self.crc32(self.get_header_length(), self.get_header_length() + self.data_length - 4)
self.write_header()
self.write32(-4, crc32, os.SEEK_END)
def get_chip_id(self):
return 6
def get_chip_rev(self):
return 1
def wrap(self):
self.write32(-4, 0, os.SEEK_END)
super().wrap()
return True
# Because somebody fucked up implementing a usual crc32 in bootrom, we have
# to reinvent the wheel.
def crc32(self, from_byte, to_byte=-1):
crc32_std = super().crc32(from_byte, to_byte)
self.fd.seek(from_byte, os.SEEK_SET)
if (to_byte == -1):
to_byte = self.file_size
polynom = 0x04C11DB7
crc = 0xffffffff
pos = from_byte
while (pos < to_byte):
data = self.read32(pos)
pos = pos + 4
crc = crc ^ data
for j in range(0, 32):
if (crc & 0x80000000):
crc = crc << 1
crc = crc ^ polynom
else:
crc = crc << 1
crc = crc & 0xffffffff
crc = crc ^ 0xffffffff
return crc | /rumboot-tools-0.9.30.tar.gz/rumboot-tools-0.9.30/rumboot/images/imageFormatLegacyNM6408.py | 0.486819 | 0.293253 | imageFormatLegacyNM6408.py | pypi |
from rumboot.ops.base import base
import tqdm
import time
class basic_uploader(base):
formats = {
"first_upload" : "boot: host: Hit '{}' for X-Modem upload",
"first_upload_basis" : "boot: host: Hit 'X' for xmodem upload",
"upload_uboot": "Trying to boot from UART",
"uboot_xmodem": "## Ready for binary (xmodem) download to {} at {} bps..."
}
def __init__(self, term):
super().__init__(term)
def sync(self, syncword, short = False):
ser = self.term.ser
if self.term.replay:
return
while True:
ser.write(syncword.encode())
if short:
break
while True:
tmp1 = ser.read(1)
tmp2 = ser.read(1)
if tmp1 == b"C" and tmp2 == b"C":
return
break
def action(self, trigger, result):
if trigger != "upload_uboot" and self.term.xfer.how == "xmodem":
self.sync("X")
if not self.term.xfer.push(self.term.chip.spl_address):
print("Upload failed")
return 1
return True
class smart_uploader(basic_uploader):
formats = {
"upload" : "UPLOAD to {:x}. 'X' for X-modem, 'E' for EDCL"
}
def action(self, trigger, result):
if (self.term.xfer.how == "xmodem"):
self.sync("X", True)
if not self.term.xfer.push(result[0]):
print("Upload failed")
return 1
if (self.term.xfer.how != "xmodem"):
self.sync('E', True)
return True
class smart_downloader(basic_uploader):
formats = {
"upload" : "DOWNLOAD: {:d} bytes from {:x} to {}. 'X' for X-modem, 'E' for EDCL"
}
def action(self, trigger, result):
arg = result[2]
if arg in self.term.plusargs:
fl = self.term.plusargs[arg]
else:
fl = arg
stream = open(fl, 'wb')
if (self.term.xfer.how == "xmodem"):
self.sync("X", True)
self.term.xfer.recv(stream, result[1], result[0])
if (self.term.xfer.how != "xmodem"):
self.sync("E", True)
pass
class tcl_dl(basic_uploader):
formats = {
"upload" : "DOWNLOAD: {:d} bytes from {:x} to {}. 'R' for RAW"
}
def action(self, trigger, result):
arg = result[2]
size = result[0]
if arg in self.term.plusargs:
fl = self.term.plusargs[arg]
else:
fl = arg
self.term.write("R".encode())
self.term.progress_start(f"Downloading file: {fl}", size)
stream = open(fl, 'wb')
total = size
while size > 0:
toread = 4096
if toread > size:
toread = size
data = self.term.read(toread)
stream.write(data)
size -= len(data)
self.term.progress_update(total, total - size, len(data))
stream.close()
self.term.progress_end()
return True
class runtime(basic_uploader):
formats = {
"runtime" : "UPLOAD: {} to {:x}. 'X' for X-modem, 'E' for EDCL",
}
def action(self, trigger, result):
arg = result[0]
fl = self.term.plusargs[arg]
stream = open(fl, 'rb')
if (self.term.xfer.how == "xmodem"):
self.sync("X", True)
ret = self.term.xfer.send(result[1], stream, "Uploading")
stream.close()
if (self.term.xfer.how != "xmodem"):
self.sync('E', True)
return ret
class runtime_tcp_ul(basic_uploader):
formats = {
"runtime" : "UPLOAD: {} to 0x{:x}. 'R' for raw upload",
}
def stream_size(self, stream):
pos = stream.tell()
stream.seek(0,2)
ln = stream.tell()
stream.seek(pos)
return ln - pos
def action(self, trigger, result):
print(trigger,result)
arg = result[0]
fl = self.term.plusargs[arg]
stream = open(fl, 'rb')
self.term.write('R'.encode())
self.sync("R", True)
size = self.stream_size(stream)
self.term.write(f'UPLOAD SIZE: {size:d} bytes\n'.encode())
self.term.progress_start(f"Uploading file: {fl}", size)
pos = 0
while True:
data = stream.read(4096)
if data == b'':
break
self.term.write(data)
self.term.progress_update(size, pos, len(data))
pos += len(data)
stream.close()
self.term.progress_end()
return True
class incremental(basic_uploader):
formats = {
"incremental_upload": "boot: host: Back in rom, code {}",
}
def action(self, trigger, result):
ret = int(result[0])
if ret != 0:
return ret
if self.term.next_binary(True) == None:
print("No more files, exiting")
return ret
if (self.term.xfer.how == "xmodem"):
self.sync("X")
if not self.term.xfer.push(self.term.chip.spl_address):
print("Upload failed")
return 1
return True
class flasher(basic_uploader):
formats = {
"flash_upload" : "boot: Press '{}' and send me the image"
}
def action(self, trigger, result):
self.term.xfer.selectTransport("xmodem-128")
desc = "Writing image"
self.sync("X")
if not self.term.xfer.push(self.term.chip.spl_address):
print("Upload failed")
return 1
return True | /rumboot-tools-0.9.30.tar.gz/rumboot-tools-0.9.30/rumboot/ops/xfer.py | 0.486332 | 0.169715 | xfer.py | pypi |
import argparse
from distutils.util import strtobool
import rumboot_packimage
import rumboot
from rumboot.ImageFormatDb import ImageFormatDb
class RumbootPackimage:
"""RumbootPackimage tool frontend"""
def __init__(self, opts):
pass
def cli():
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description="rumboot-packimage {} - Universal RumBoot Image Manipulation Tool\n".format(rumboot.__version__) +
rumboot.__copyright__)
parser.add_argument("-f", "--file",
help="image file",
type=argparse.FileType("r+b"),
required=True)
parser.add_argument("-i", "--info",
help="Show information about the image",
action="store_true")
parser.add_argument("-c", "--checksum",
help='''This option will modify the file! Calculates valid checksums to the header.
The length is set to cover the full length of file ONLY if it's zero.
''',
action="store_true")
parser.add_argument("-C", "--checksum_fix_length",
help="This option will modify the file! The same as --checksum/-c, but always overrides length to covert the full length of file",
action="store_true")
parser.add_argument("-r", "--raw",
help="Display raw header field names",
action="store_true")
parser.add_argument('-R', '--relocate',
nargs=1, metavar=('relocation'),
help='''Tell bootrom to relocate the image at the specified address before executing it. Only RumBootV3 and above
''')
parser.add_argument('-Z', '--compress',
action="store_true",
help='''Compress image data with heatshrink algorithm (V3 or above only)
''')
parser.add_argument('-U', '--decompress',
action="store_true",
help='''Decompress image data with heatshrink algorithm (V3 or above only)
''')
parser.add_argument('-z', '--add_zeroes',
nargs=1, metavar=('value'),
help='''This option will add N bytes of zeroes to the end of the file (after checksummed area).
This is required to avoid propagating 'X' during the next image check during simulation.
Normally, you do not need this option
''')
parser.add_argument('-a', '--align',
nargs=1, metavar=('value'),
help='''Pad resulting image size to specified alignment. Remember to add -C to have correct checksums!
''')
parser.add_argument('-F', '--flag',
action="append",
nargs=2, metavar=('value'),
help='''Set image flag to a desired value. Only RumBootV3 or above
''')
parser.add_argument('--set-data',
action="append",
nargs=2, metavar=('offset', 'value'),
help='''Sets data at byte 'offset' to value 'offset'
''')
parser.add_argument('-g', '--get',
nargs=1, metavar=('key'),
help='''Get a single field from header. Nothing else will be printed.
NOTE: The value will be formatted as hex
''')
parser.add_argument('-s', '--set',
nargs=2, metavar=('key', 'value'),
help='''This option will modify the file! Set a header key to specified value.
Use -r flag on an existing image to find out what keys exist.
Use -c to update the checksums
''')
parser.add_argument('-e', '--reverse-endianness',
action="store_true",
help='''Use this option to reverse endianness of all headers. This will not touch data.
For testing only
''')
parser.add_argument('-E', '--reverse-data-endianness',
action="store_true",
help='''Use this option to reverse endianness of data section. This will not touch header.
This might be required for booting some nmc chips.
''')
parser.add_argument('-w', '--wrap',
nargs=1,
help='''Use this option to wrap arbitrary data to V1/V2/V3 images.
''')
opts = parser.parse_args()
formats = ImageFormatDb("rumboot.images");
t = formats.guess(opts.file)
if opts.wrap:
if t != False:
print(f"File {opts.file.name} already wrapped into rumboot format ({t})")
return 1
t = formats.wrap(opts.file, opts.wrap[0])
print(f"Wrapped file {opts.file.name} with {t.name} header")
calc_data = True
if (t == False):
if not opts.wrap:
print("ERROR: Not a valid image, see README.md")
else:
print("ERROR: Failed to wrap data into image for some reason")
return 1
if opts.set != None:
t.set(opts.set[0], opts.set[1])
#FixMe: Hack
if (opts.set[0] == "data_crc32"):
calc_data = False
opts.info = True
print("Setting " + opts.set[0] + " to " + opts.set[1])
if not opts.checksum:
print("WARNING: Add -c option to update checksums!")
if opts.compress:
if not hasattr(t, "compress"):
print("ERROR: Image (de)compression is not supported by this image format")
return 1
t.compress()
opts.checksum_fix_length = True
opts.checksum = True
opts.info = True
if opts.decompress:
if not hasattr(t, "decompress"):
print("ERROR: Image (de)compression is not supported by this image format")
return 1
t.decompress()
opts.checksum_fix_length = True
opts.checksum = True
opts.info = True
if opts.relocate:
if not hasattr(t, "relocate"):
print("ERROR: Relocation is not supported by this image format")
return 1
t.relocate(opts.relocate[0])
opts.info = True
if not opts.checksum:
print("WARNING: Add -c option to update checksums!")
if opts.get:
print("0x%x" % t.get(opts.get[0]))
return 0
if opts.reverse_endianness:
t.swap_endian()
if opts.set_data:
for f in opts.set_data:
t.write8(t.get_header_length() + int(f[0]), int(f[1]))
if opts.flag:
for f in opts.flag:
if not hasattr(t, "flag"):
print("ERROR: Image flags are not supported by this image format")
return 1
t.flag(f[0],bool(strtobool(f[1])))
if opts.align:
t.align(opts.align[0])
opts.checksum_fix_length = True
if opts.reverse_data_endianness:
print("Reversing data endianness. I hope you know what you are doing.")
t.reverse_data_endianness(8)
opts.checksum_fix_length = True
if (opts.checksum_fix_length):
t.fix_length()
opts.info = True
if opts.checksum or opts.checksum_fix_length:
t.fix_checksums(calc_data)
print("Wrote valid checksums to image header")
opts.info = True
if opts.add_zeroes:
t.add_zeroes(opts.add_zeroes[0])
if opts.info:
t.read_header()
#Return non-zero on invalid data/header crc, except for error-injection cases
if not t.dump_header(opts.raw) and not opts.set:
return 1
return 0 | /rumboot-tools-0.9.30.tar.gz/rumboot-tools-0.9.30/rumboot_packimage/frontend.py | 0.517571 | 0.158663 | frontend.py | pypi |
from decimal import Decimal
import requests
from . import exceptions
TIMEOUT = 3
API_HOST = 'https://rumetr.com/api/v1/'
class ApptList(dict):
"""
Abstract list of flats. Useful for working with a plain list of flats
"""
def add(self, complex: str, house: str, id, **kwargs):
self._get_house(complex, house)
self[complex][house][id] = kwargs
def _get_complex(self, complex: str) -> dict:
try:
return self[complex]
except KeyError:
self[complex] = {}
return self[complex]
def _get_house(self, complex: str, house: str) -> dict:
self._get_complex(complex)
try:
return self[complex][house]
except KeyError:
self[complex][house] = {}
return self[complex][house]
class Rumetr:
"""
The client for the rumetr.com internal database. Use it to update our data with your scraper.
"""
def complex_exists(self, complex: str) -> bool:
"""
Shortcut to check if complex exists in our database.
"""
try:
self.check_complex(complex)
except exceptions.RumetrComplexNotFound:
return False
return True
def house_exists(self, complex: str, house: str) -> bool:
"""
Shortcut to check if house exists in our database.
"""
try:
self.check_house(complex, house)
except exceptions.RumetrHouseNotFound:
return False
return True
def appt_exists(self, complex: str, house: str, appt: str) -> bool:
"""
Shortcut to check if appt exists in our database.
"""
try:
self.check_appt(complex, house, appt)
except exceptions.RumetrApptNotFound:
return False
return True
def __init__(self, auth_key: str, developer: str, api_host=API_HOST):
self._initialize_cache()
self.api_host = api_host
self.developer = developer
self.headers = {
'Authorization': 'Token %s' % auth_key,
'Content-Type': 'application/json',
'Accept': 'application/json',
}
def _initialize_cache(self):
self._last_checked_developer = None
self._checked_complexes = set()
self._checked_houses = set()
self._checked_appts = set()
def _format_url(self, endpoint):
"""
Append the API host
"""
return (self.api_host + '/%s/' % endpoint).replace('//', '/').replace(':/', '://')
def post(self, url: str, data: str, expected_status_code=201):
"""
Do a POST request
"""
r = requests.post(self._format_url(url), json=data, headers=self.headers, timeout=TIMEOUT)
self._check_response(r, expected_status_code)
return r.json()
def put(self, url: str, data: str, expected_status_code=200):
"""
Do a PUT request
"""
r = requests.put(self._format_url(url), json=data, headers=self.headers, timeout=TIMEOUT)
self._check_response(r, expected_status_code)
return r.json()
def get(self, url):
"""
Do a GET request
"""
r = requests.get(self._format_url(url), headers=self.headers, timeout=TIMEOUT)
self._check_response(r, 200)
return r.json()
def _check_response(self, response, expected_status_code):
if response.status_code == 404:
raise exceptions.Rumetr404Exception()
if response.status_code == 403:
raise exceptions.Rumetr403Exception()
if response.status_code != expected_status_code:
raise exceptions.RumetrBadServerResponseException('Got response code %d, expected %d, error: %s' % (response.status_code, expected_status_code, response.text))
@staticmethod
def _format_decimal(decimal: str) -> str:
rounded = Decimal(decimal).quantize(Decimal('0.01'))
return str(rounded)
def check_developer(self) -> bool:
"""
Check if a given developer exists in the rumetr database
"""
if self._last_checked_developer == self.developer:
return True
try:
self.get('developers/%s/' % self.developer)
except exceptions.Rumetr404Exception:
raise exceptions.RumetrDeveloperNotFound('Bad developer id — rumetr server does not know it. Is it correct?')
self._last_checked_developer = self.developer
return True
def check_complex(self, complex: str) -> bool:
"""
Check if a given complex exists in the rumetr database
"""
self.check_developer()
if complex in self._checked_complexes:
return True
try:
self.get('developers/{developer}/complexes/{complex}/'.format(
developer=self.developer,
complex=complex,
))
except exceptions.Rumetr404Exception:
raise exceptions.RumetrComplexNotFound('Unknown complex — maybe you should create one?')
self._checked_complexes.add(complex)
return True
def check_house(self, complex: str, house: str) -> bool:
"""
Check if given house exists in the rumetr database
"""
self.check_complex(complex)
if '%s__%s' % (complex, house) in self._checked_houses:
return True
try:
self.get('developers/{developer}/complexes/{complex}/houses/{house}/'.format(
developer=self.developer,
complex=complex,
house=house,
))
except exceptions.Rumetr404Exception:
raise exceptions.RumetrHouseNotFound('Unknown house (complex is known) — may be you should create one?')
self._checked_houses.add('%s__%s' % (complex, house))
return True
def check_appt(self, complex: str, house: str, appt: str) -> bool:
"""
Check if given appartment exists in the rumetr database
"""
self.check_house(complex, house)
if '%s__%s__%s' % (complex, house, appt) in self._checked_appts:
return True
try:
self.get('developers/{developer}/complexes/{complex}/houses/{house}/appts/{appt}'.format(
developer=self.developer,
complex=complex,
house=house,
appt=appt,
))
except exceptions.Rumetr404Exception:
raise exceptions.RumetrApptNotFound('Unknown appt (house is known) — may be you should create one?')
self._checked_appts.add('%s__%s__%s' % (complex, house, appt))
return True
def add_complex(self, **kwargs):
"""
Add a new complex to the rumetr db
"""
self.check_developer()
self.post('developers/%s/complexes/' % self.developer, data=kwargs)
def add_house(self, complex: str, **kwargs):
"""
Add a new house to the rumetr db
"""
self.check_complex(complex)
self.post('developers/{developer}/complexes/{complex}/houses/'.format(developer=self.developer, complex=complex), data=kwargs)
def add_appt(self, complex: str, house: str, price: str, square: str, **kwargs):
"""
Add a new appartment to the rumetr db
"""
self.check_house(complex, house)
kwargs['price'] = self._format_decimal(price)
kwargs['square'] = self._format_decimal(square)
self.post('developers/{developer}/complexes/{complex}/houses/{house}/appts/'.format(
developer=self.developer,
complex=complex,
house=house,
), data=kwargs)
def update_house(self, complex: str, id: str, **kwargs):
"""
Update the existing house
"""
self.check_house(complex, id)
self.put('developers/{developer}/complexes/{complex}/houses/{id}'.format(
developer=self.developer,
complex=complex,
id=id,
), data=kwargs)
def update_appt(self, complex: str, house: str, price: str, square: str, id: str, **kwargs):
"""
Update existing appartment
"""
self.check_house(complex, house)
kwargs['price'] = self._format_decimal(price)
kwargs['square'] = self._format_decimal(square)
self.put('developers/{developer}/complexes/{complex}/houses/{house}/appts/{id}'.format(
developer=self.developer,
complex=complex,
house=house,
id=id,
price=self._format_decimal(price),
), data=kwargs) | /rumetr-client-0.2.4.tar.gz/rumetr-client-0.2.4/rumetr/roometr.py | 0.71721 | 0.253618 | roometr.py | pypi |
import hashlib
import re
from scrapy.spiders import XMLFeedSpider
from rumetr.scrapy.item import ApptItem as Item
class YandexFeedSpider(XMLFeedSpider):
"""Base Spider to parse yandex-realty feed"""
name = 'spider'
namespaces = [('yandex', 'http://webmaster.yandex.ru/schemas/feed/realty/2010-06')]
itertag = 'yandex:offer'
iterator = 'xml'
def parse_node(self, response, node):
item = dict()
item.update(dict(
id=self.get_internal_id(node),
square=self.get_square(node),
room_count=node.xpath('yandex:rooms/text()')[0].extract(),
price=node.xpath('yandex:price/yandex:value/text()')[0].extract(),
house_id=self.get_house_id(node),
house_name=self.get_house_id(node),
complex_id=self.get_complex_id(node),
complex_name=node.xpath('yandex:building-name/text()')[0].extract(),
complex_url=node.xpath('yandex:url/text()')[0].extract(),
floor=node.xpath('yandex:floor/text()')[0].extract(),
addr=self.build_address(node),
is_studio=self.is_studio(node),
))
item = self.append_feed_data(node, item)
yield Item(**item)
def append_feed_data(self, node, item):
return item
def is_studio(self, node):
try:
return bool(node.xpath('yandex:studio/text()')[0].extract())
except IndexError:
return False
def build_address(self, node):
try:
location = node.xpath('yandex:location/yandex:region/text()')[0].extract()
except IndexError:
location = None
city = node.xpath('yandex:location/yandex:locality-name/text()')[0].extract()
addr = node.xpath('yandex:location/yandex:address/text()')[0].extract()
return ', '.join(x for x in [location, city, addr] if x is not None and len(x))
def get_internal_id(self, node):
id = node.xpath('@internal-id')[0].extract()
return re.sub(r'[^\w+]', '', id)
def get_square(self, node):
square = node.xpath('yandex:area/yandex:value/text()')[0].extract()
return square.replace(',', '.')
def get_house_id(self, node):
try:
return node.xpath('yandex:building-section/text()')[0].extract()
except IndexError:
return node.xpath('yandex:building-name/text()')[0].extract()
def get_complex_id(self, node):
try:
return node.xpath('yandex:yandex-building-id/text()')[0].extract()
except IndexError:
return self._hash(node.xpath('yandex:building-name/text()')[0].extract())
def _hash(self, input):
return hashlib.md5(input.encode('utf-8')).hexdigest() | /rumetr-client-0.2.4.tar.gz/rumetr-client-0.2.4/rumetr/scrapy/yandex.py | 0.402862 | 0.159021 | yandex.py | pypi |
# rumi
> Not the ones speaking the same language, but the ones sharing the same feeling understand each other. —Rumi
Rumi is a static site translation monitoring tool designed to support the localization (l10n) and internationalization (i18n) of documentation, and to facilitation the long-term maintenance of translated documentation.
Rumi currently supports two workflows for translation monitoring: file-based monitoring and message-based monitoring, both of which are described below.
## File-based Translation Monitoring Workflow
**File-based translation flow exemplified with Hugo site**

### 1. Create reader
```python
reader = FileReader(
repo_path=".",
branch="main",
langs="",
content_paths=["content"],
extensions=[".md"],
pattern="folder/",
src_lang="en",
use_cache=True
)
```
Parameters:
`repo_path`: Path to the repository for translation monitoring.
`branch`: Name of the branch to read the github history from.
`content_paths`: List of paths from the root of the repository to the directory that contains contents for translation, e.g., ["content", "data", "i18n"].
`extensions`: List of extensions of the target files for translation monitoring.
`pattern`: Two types of patterns in which the static site repository is organized: "folder (organizing contents from each locale into one folder of the locale name, e.g. en/filename.md, fr/filename.md) and ".lang" (organizing contents from each locale by tagging the file name with the locale name, e.g. filename.en.md, filename.fr.md)
`langs`: Language codes joint by a white space as specified by the user. If not specified, FileReader will try to get languages from the filenames in the current repository for monitoring.
`src_lang`: Default source language set by user.
`use_cache`: Whether to use cached commit history datastructure.
### 2. Set targets
The target files for translation monitoring are initialized using `content_paths` and `extensions`, and it can also be specified by adding or deleting single filename.
```python
reader.add_target(filename)
reader.del_target(filename)
```
### 3. Calculate commits
```python
commits = reader.parse_history() # Structured commit history
```
### 4. Create reporter
```python
reporter = FileReporter(
repo_path=reader.repo_path,
src_lang=detail_src_lang,
tgt_lang=detail_tgt_lang
)
```
`src_lang`: Language code of the source language (the original language of contents) to be reported. If not specified, all source language will be reported.
`tgt_lang`: Language code of the target language (language to translate contents
into) to be reported. If not specified, all target language will be reported.
### 5. Report stats and details
stats mode: displays the number of Open (hasn't been translated), Updated (source file has been updated after translation), Completed (source file has been translated for all target languages). E.g.:
```python
stats = reporter.get_stats(commits)
reporter.print_stats(stats)
"""
| Target Language | Total | Open | Updated | Completed |
|-------------------+---------+--------+-----------+-------------|
| fr | 0 | 0 | 0 | 0 |
| en | 1 | 0 | 0 | 1 |
| zh | 1 | 0 | 1 | 0 |
| ja | 1 | 1 | 0 | 0 |
"""
```
detail mode: displays translation work required for each target file together with more details. E.g.:
```python
details = reporter.get_details(commits)
reporter.print_details(details)
"""
| File | Status | Source Language | Word Count | Target Language | Percent Completed | Percent Updated |
|---------+-----------+-----------------+------------+-----------------+-------------------+-----------------|
| file.md | completed | fr | 4 | en | 100.0% | 0% |
| file.md | updated | fr | 4 | zh | 50.0% | 50.0% |
| file.md | open | fr | 4 | ja | 0% | 100.0% |
"""
```
Here `Word Count` reports number of words in the source file. `Percent Completed` is estimated by number of lines in the translation file divided by that in the source file. `Percent Updated` is number of lines inserted in the source file since the latest edit of the translation file.
### 6. Additional resources for the SDE steps
For more about setting up a Hugo site, check out the documentation about [Hugo in multilingual mode](https://gohugo.io/content-management/multilingual/).
## Message-based Translation Monitoring Workflow
**Message-based translation flow exemplified with React App**

### 1. Create reader
```python
reader = MsgReader(
repo_path=".",
branch="main",
content_paths=["content"],
extensions=[".po"],
src_lang="en",
use_cache=True
)
```
### 2. Set targets
```python
reader.add_target(filename)
reader.del_target(filename)
```
### 3. Calculate commits
```python
commits = reader.parse_history()
```
### 4. Create reporter
```python
reporter = MsgReporter()
```
### 5. Report stats and details
stats mode: Print out a summary of the translation.
```python
stats = reporter.get_stats(commits, src_lang)
reporter.print_stats(stats)
"""
| Language | Total | Open | Updated | Completed |
|------------+---------+--------+-----------+-------------|
| en | 2 | 0 | 0 | 0 |
| fr | 2 | 1 | 1 | 0 |
| ja | 2 | 0 | 1 | 1 |
"""
```
detail mode: Print out the details of messages needing translations for each language and provide word count.
```python
details = reporter.get_details(commits, src_lang)
reporter.print_details(details)
"""
----------------------------------------------------------------------
ja Open: 2
msgid1
msgid2
----------------------------------------------------------------------
zh Open: 0
----------------------------------------------------------------------
de Open: 0
----------------------------------------------------------------------
fr Open: 1
msgid1
----------------------------------------------------------------------
en Open: 0
----------------------------------------------------------------------
"""
```
### 6. Rumi Download
Rumi can help you download the new messages from `Lingui Extract` results:
```python
reporter.download_needs(details, lang, path=".")
```
### 7. Rumi Insert Translated
Rumi can also insert the new translations back into the old ones, to support the next `Lingui Compile` step.
```python
reporter.insert_translations("new_translations.txt", "old_messages.po")
```
### 8. Additional Resources for the SDE steps
Here are some additional resources for getting set up with Lingui on your React project:
- UI Dev: Setup Lingui.js
- Installation: [Setup Lingui with React project](https://lingui.js.org/tutorials/setup-react.html)
- Wrap Messages: Wrap UI text message according to [Lingui patterns](https://lingui.js.org/tutorials/react-patterns.html)
- Lingui Extract: `npm run extract` or `yarn extract`
- Lingui Compile: `npm run compile` or `yarn compile`
## Github Action
```yaml
name: Rumi translation monitoring
on: push
jobs:
rumi:
runs-on: ubuntu-latest
steps:
- name: Clone target repository
run: |
git clone [url of the target repository]
- name: Run Action
uses: tl6kk/rumi_action@main # to be changed after rumi publication
with:
which_rumi: "file" # "file" for file-based or "msg" for message-based
repo_path: "path_to_repo"
branch: "main"
content_paths: "content1, content2, content3"
extensions: ".md, .txt"
target_files: "target1, target2, target3"
pattern: "folder/" # "folder/" or ".lang" depending on the setup of file-based project
langs: "en fr zh ja" # You can specify the languages to monitor with language codes
src_lang: "en"
detail_src_lang: ""
detail_tgt_lang: ""
stats_mode: "True"
details_mode: "True"
use_cache: "True"
``` | /rumi-i18n-0.1.3a1.post1.tar.gz/rumi-i18n-0.1.3a1.post1/README.md | 0.426083 | 0.875148 | README.md | pypi |
# Basic Usage
## Overview
Rummage is designed to be easy to pick up. Its interface consists of three tabs: Search, Files, and Content. In the
**Search** tab, a user specifies where they want to search, what they want to search for, and optionally what they want
to replace it with. Search features can be tweaked with various options. The files that get searched can also be
narrowed with patterns and filters.
Rummage uses the default regular expression library ([Re][re]) that comes with Python. It also optionally works with the
3rd party [Regex][regex] library (if installed).
As matches are found, general info about the matches will be displayed in the **Files** and **Content** tabs. You can
double click files to open them in your favorite editor (see [Editor Preferences](./preferences.md#editor) to configure
Rummage for your editor).
Rummage also comes with a simple regular expression tester to test out patterns. It also provides a feature where
patterns can be saved for later and/or frequent use. You can even create chains that will apply a series of saved
searches.
## Running
Once Rummage is installed, you can run it from the command line (assuming your Python scripts/bin folder is in your
system path):
```bash
rummage
```
If you have multiple Python versions installed, you can call Rummage for that specific Python version by appending the
major and minor Python version to the end:
```bash
rummage3.6
```
In some environments, it may make sense to run Rummage with `pythonw` which is mainly for launching GUI scripts
(`pythonw` is not available on Linux). In some environments, it may be required (see
[Running in Anaconda](./installation.md#running-in-anaconda)).
```bash
pythonw -m rummage
```
## Searching & Replacing

Search and replaces are configured in the **Search** tab. The search tab can essentially be broken up into two sections:
text search configuration and file search configuration.
### Configuring Text Search

The first part of the **Search** tab contains mostly text search inputs, with the exception of the very first control,
which is used to configure where to search. The second text box is used to specify what we are searching for in the
content of each file. The last text box specified what we want to replace the found text with. Each text box retains a
limited history of recent inputs that can be accessed via the drop down control to the right.
The replace text box is only needed if you are performing a replace. The search input can also be omitted, and if so,
Rummage will simply return files that match the provided file patterns (covered in
[Configuring File Search](#configuring-file-search)).
Below the text boxes is a collapsible panel that contains the text search options. The options consist of various
checkboxes and controls that enable/disable search and replace features. The available features will vary depending on
which regular expression engine you are using. Each feature is documented in
[Search Options](./search.md#search-options).

Lastly, Rummage provides buttons to launch a [regular expression tester](#regular-expression-tester), dialogs to
[save or load](#saving-and-loading-regular-expressions) frequently used regular expressions, and a dialog to create and
manage [regular expression chains](#search-chains).

### Configuring File Search

The bottom part of the search tab focuses on controlling which files get searched. Various checkboxes and inputs are
available that can narrow the actual files that get searched. You can filter out hidden files, symlinks, files of
specific sizes, or creation/modification dates.
You can also restrict which files get searched by providing wild card patterns (or regular expression if preferred). By
default, the patterns are applied to the base file or folder name. See [File Patterns](./search.md#wildcard) to learn
more about accepted wild card pattern syntax and how to configure optional file pattern features.
/// tip | Hidden Files
Rummage assumes dot files as hidden on all systems. Additionally, on Windows and macOS, it will also look at a
file's filesystem attributes to determine if the system is potentially hiding the file as well.
///
/// new | New 4.4.0
Added symlink following via the **Follow symlinks** toggle.
///
### Results
Once a search or replace is initiated, the results will begin to appear in the **Files** and **Content** tabs. You can
then double click a file to open it in your editor, or right click them to bring up a context menu with additional
options.


/// tip | Column Options
You can hide/show columns by right clicking the list header to get a special context menu. You can then deselect or
select the the column(s) you wish to hide/show respectively. You can also reorder the columns if desired.
///
## Regular Expression Tester

Rummage comes with a simple regular expression tester. It has a simple text box to place content to search, and another
text box that will show the final results after the find and replace is applied. Below those text boxes, there are two
text input boxes for the find and replace patterns. Lastly, all search and replace flag options are found under the
pattern input boxes.
To use the tester, simply enter the content to search, set your desired options, and input your find and replace
pattern. As you change your pattern or options, matches will be updated and highlighted, and the result box will be
updated with any replacements.
When you are satisfied with your result, click the `Use` button, and your pattern and settings will be populated in the
main window.
## Saving and Loading Regular Expressions
Regular expressions can be very complex, and sometimes you might want to save them for future use.
When you have a pattern configured that you want to save, simply click the `Save Search` button and a dialog will pop up
asking you to name the search. When done, click the `Save` button on the dialog and your search patterns and options
will be saved.
You'll notice that there are two input boxes. The first requires a unique name (only word characters, underscores, and
hyphens are allowed). The second is an optional comment in case you wish to elaborate on what the pattern is for.
Underneath the inputs will be the actual search settings being saved.

To load a pattern that was saved previously, click the `Load Search` button. You will be presented with a dialog
showing all your saved searches. Highlight the pattern you want to load and click the `Load` button. Your pattern and
options will be populated in the main dialog.
If you wish to edit the name or comment of a search, you can double click the entry or click the "Edit" button.

## Search Chains
There are times you may have a task that requires you to do multiple find and replaces that are all related, but are too
difficult to represent as a single find and replace. This is where search chains can be helpful.
Search chains are essentially a sequence of multiple [saved search and replace patterns](#saving-and-loading-regular-expressions).
You can create a search chain by clicking the `Search Chains` button which will bring up the search chain manager.

Here you can create or delete search chains.

To use search chains, you must put Rummage in "search chain" mode by selecting the check box named `Use search chains`
in the main window. When "search chain" mode is enabled, all controls that don't apply to search chains will be
disabled, and the search box will be replaced with a drop down for selecting existing chains you've already created.
When a search is performed, Rummage will iterate over each file with all the saved searches in the chain.

## Replace plugins
Regular expressions are great, but sometimes regular expressions aren't enough. If you are dealing with a replace task
that requires logic that cannot be represented in a simple replace pattern, you can create a "replace plugin".
Replace plugins are written in Python and are loaded by first selecting the `Use replace plugin` check box in the main
dialog.

Then the main dialog's `Replace with` text box will become the `Replace plugin` text box with an associated file picker.
Here you can point to your replace plugin file.
Replace plugins aren't meant to be full, complex modules that import lots of other relative files. They are meant to be
a single, compact script, but inside that script, you can import anything that is *already* installed in your Python
environment.

### Writing a Plugin
Replace plugins should contain two things:
1. A plugin class derived from the `rummage.lib.rumcore.ReplacePlugin` class.
2. A function called `get_replace` that returns your class.
The plugin class is fairly straight forward and is shown below.
```py3
class ReplacePlugin(object):
"""Rummage replace plugin."""
def __init__(self, file_info, flags):
"""Initialize."""
self.file_info = file_info
self.flags = flags
self.on_init()
def on_init(self):
"""Override this function to add initialization setup."""
def get_flags(self):
"""Get flags."""
return self.flags
def get_file_name(self):
"""Get file name."""
return self.file_info.name
def is_binary(self):
"""Is a binary search."""
return self.file_info.encoding.encode == 'bin'
def is_literal(self):
"""Is a literal search."""
return self.flags & LITERAL
def replace(self, m):
"""Make replacement."""
return m.group(0)
```
`ReplacePlugin`'s `replace` function will receive the parameter `m` which is either a `regex` or `re` match object
(depending on what regular expression engine is selected). The return value must be either a Unicode string or byte
string (for binary files).
The `ReplacePlugin`'s `file_info` property is a named tuple providing information about the current file such as name,
size, creation date, etc.
```py3
class FileInfoRecord(namedtuple('FileInfoRecord', ['id', 'name', 'size', 'modified', 'created', 'encoding'])):
"""A record for tracking file info."""
```
The `ReplacePlugin`'s `flags` property contains only Rummage search related flags (the flags are abstracted at this
level and are converted to the appropriate regular expression flags later). They can also be accessed from
`rummage.lib.rumcore`. The flags are shown below.
```py3
# Common regular expression flags (re|regex)
IGNORECASE = 0x1 # (?i)
DOTALL = 0x2 # (?s)
MULTILINE = 0x4 # (?m)
UNICODE = 0x8 # (?u)
# Regex module flags
ASCII = 0x10 # (?a)
FULLCASE = 0x20 # (?f)
WORD = 0x40 # (?w)
BESTMATCH = 0x80 # (?b)
ENHANCEMATCH = 0x100 # (?e)
REVERSE = 0x200 # (?r)
VERSION0 = 0x400 # (?V0)
VERSION1 = 0x800 # (?V1)
FORMATREPLACE = 0x1000 # Use {1} for groups in replace
POSIX = 0x2000 # (?p)
# Rumcore search related flags
LITERAL = 0x10000 # Literal search
```
/// example | Example Plugin
In the example below, we have a replace plugin that replaces the search result with the name of the file. It is
assumed this is not a binary replace, so a Unicode string is returned.
```py3
from __future__ import unicode_literals
from rummage.lib import rumcore
import os
class TestReplace(rumcore.ReplacePlugin):
"""Replace object."""
def replace(self, m):
"""Replace method."""
name = os.path.basename(self.get_file_name())
return name
def get_replace():
"""Get the replace object."""
return TestReplace
```
///
## Export to CSV or HTML

Rummage allows the exporting of the results to either CSV or HTML. Simply select **File-->Export** and pick either
**CSV** or **HTML**. The HTML output will be styled similar to the GUI interface with the results in tables with
sortable columns.
/// info | Large Result Sets
Really, really large sets of results will probably be best suited for CSV as a browser may have a hard time loading
the entire data set at once.
///
| /rummage-4.18.tar.gz/rummage-4.18/docs/src/markdown/usage.md | 0.71721 | 0.89783 | usage.md | pypi |
# Installation
## Requirements
Rummage, when installed via `pip`, will install all of your required dependencies, but there are a few optional
dependencies. If desired, you can install these dependencies manually, or install them automatically with
[`pip`](#installation_1).
Name | Details
---------------------- | -------
[`regex`][regex] | Regex is a great regular expression engine that adds some nice features such as fuzzy searching, nested char sets, better Unicode support, and more.
[`cchardet`][cchardet] | `cchardet` is high speed universal character encoding detector. Much faster than the default `chardet`.
## Installation
On systems like Windows, installation is pretty straight forward as wheels are provided for all packages in `pip`. On
other systems, there may be some prerequisites. If on Linux, it is recommended to make sure you can install `wxpython`
first. This is due to the fact that installation of that library may require special instructions and will cause the
installation of Rummage to fail if `wxpython` fails due to not having the necessary prerequisites.
/// warning | Prerequisites
- [Linux](#linux-prerequisites)
- [macOS](#macos-prerequisites)
///
Assuming prerequisites are satisfied, installing Rummage is easy.
Install:
```shell-session
$ pip install rummage
```
Install with optional modules.
```shell-session
$ pip install rummage[extras]
```
Upgrade:
```shell-session
$ pip install --upgrade rummage
```
## Linux Prerequisites
Linux is by far the more involved system to install wxPython on, but it is getting easier.
### Recommended
#### Pre-built Wheels
The wxPython project has started providing wheels for certain distros. While not all distros have wheels, this may
be an attractive solution if you run one of the distros that do have pre-built wheels. The one downside is that the
wheels are not available through on PyPI. More information on why and details on installation can be found here:
https://www.wxpython.org/pages/downloads/.
Simplified instructions:
1. Find the folder for your distro over at https://extras.wxpython.org/wxPython4/extras/linux/.
2. Use `pip` and the server's location like so.
```shell-session
$ pip install -U -f https://extras.wxpython.org/wxPython4/extras/linux/gtk3/ubuntu-20.04 wxPython
```
While the wheel should install fine, when you actually run Rummage, you may see some libraries missing. A common one
on Ubuntu is `libSDL` libraries. If you see a complaint about a library not being found or loaded, and you are on
Ubuntu, you can install `apt-find` and search for the package containing the file, then you can install it.
```shell-session
$ sudo apt install apt-file
$ sudo apt-file update
$ apt-file search libSDL2-2.0.so.0
libsdl2-2.0-0: /usr/lib/x86_64-linux-gnu/libSDL2-2.0.so.0
libsdl2-2.0-0: /usr/lib/x86_64-linux-gnu/libSDL2-2.0.so.0.10.0
$ sudo apt install libsdl2-2.0-0
```
#### Pre-build Packages
If you have a recent Linux distro that has a pre-built, installable wxPython package for your version of Python, then
it may make sense to just install the pre-built package via your Linux package manager. The version must meet the
version requirements of the Rummage package you are installing.
### Manual
If you have installed a version of Python on your machine that does not have a pre-built wxPython package, or are using
a distro that does not have a pre-built wheel, you may have to build it.
You can build the package by installing with `pip`, but you may find that it won't build until you get all the
dependencies installed. Once the dependencies for building are in place, you can run pip to install the package.
We do not have updated lists of prerequisites for distros. The last updated list was from Ubuntu 18.04 and Fedora 26.
What you must install may vary depend on what is already installed on your distro out of the box. Also, the version of
each prerequisites may vary from distro to distro or from distro release to distro release.
Usually the requirements deal with `gstreamer`, `gtk`, `libsdl`, etc. Below are some examples, but are most likely out
of date:
/// define
Ubuntu 18.04
-
```shell-session
$ sudo apt-get install python3.6-dev dpkg-dev build-essential libwebkitgtk-dev libjpeg-dev libtiff-dev libsdl1.2-dev libgstreamer-plugins-base1.0-dev libnotify-dev freeglut3 freeglut3-dev libgtk-3-dev libwebkitgtk-3.0-dev
```
Fedora 26
-
```shell-session
$ sudo dnf install gcc-c++ wxGTK-devel gstreamer-devel webkitgtk-devel GConf2-devel gstreamer-plugins-base-devel
```
///
Once dependencies are in place, you can finally install wxPython with pip (`pip install wxpython`). Be patient when
installing wxPython manually as Linux must build the package, and it won't give much in the way of status while it
builds. If it fails and complains about a missing library, you may have to install more dependencies.
For a complete list of dependencies please check wxPython's official documentation on dependencies before installing.
Particularly under [this section][wxpython-prereq]. If they are out of date, please contact the wxPython team for better
instructions.
## macOS Prerequisites
On macOS, Rummage uses either pure Python modules, or modules that provide wheels. What this means is that no C code
compilation is required to install Rummage; therefore, no prior steps are needed. But if you want to install `regex`,
there will be some C code compilation performed by `pip` which will require Xcode to be installed.
1. Download Xcode from the Mac App Store.
2. Navigate to Xcode > Preferences > Downloads tab.
3. Click the button to install the Command Line Tools.
4. Open Terminal (Applications/Terminal) and run `xcode-select --install`. You will be prompted to install the Xcode
Command Line Tools.
| /rummage-4.18.tar.gz/rummage-4.18/docs/src/markdown/installation.md | 0.487307 | 0.809728 | installation.md | pypi |
# Search Features
## Search Options
Rummage supports the default regular expression library ([Re][re]) that comes with Python and the 3rd party
[Regex][regex] library, and though the basic syntax and features are similar between the two, Regex provides many
additional features, some of which causes the syntax to deviate greatly from Re. If you are using Re, you will not see
all the options shown below. Please check out documentation for whichever engine you have chosen use in order to learn
more about its specific feature set. This documentation will only briefly cover the features that can be enabled in each
engine.
### Common Options
Both the Re and Regex engine have a couple of shared flags that are exposed in Rummage as checkboxes. These checkboxes
are found directly under the search and replace text boxes.
Toggle | Description
--------------------------- | -----------
Search\ with\ regex | Alters the behavior of `Search for` and `Replace with`. When this is checked, both text boxes require regular expression patterns opposed to literal string.
Search\ case-sensitive | Forces the search to be case-sensitive.
Dot\ matches\ newline | `.` will also match newlines in regular expressions.
Use\ Unicode\ properties | Changes the regular expression behavior of `\w`, `\W`, `\b`, `\B`, `\d`, `\D`, `\s`, `\S`, and Unicode properties (`\p{name}` or `[[:name]]`) to use characters from the Unicode property database instead of ASCII.
Format\ style\ replacements | Replace pattern will use [a string replace format][format-string] for replace. `#!py3 "{1} {1[-2]} {group_name[-3]}"` etc. This is not available for Re without Backrefs, and is limited when using Re with Backrefs. Read more about format mode [here][backrefs-format]. And remember that Rummage normalizes differences in Backrefs' and Regex's handling of back slash escapes in format replace mode.
### Regex Engine Options
If the Regex engine is being used for regular expressions, a couple of extra checkboxes will be available. Regex can be
run in either `VERSION0` or `VERSION1` mode.
`VERSION0` is compatible with Re regular expression patterns and has the extra `fullcase` toggle. `VERSION1` does not
have this toggle as it is enabled by default and can only be disabled inline via a pattern of `(?-f)`. `VERSION1` is not
directly compatible with Re patterns as it adds a number of changes to the syntax allowing for more advanced search
options.
Toggle | Description
--------------------------- | -----------
Best\ fuzzy\ match | If performing a fuzzy match, the *best* fuzzy match will be returned.
Improve\ fuzzy\ fit | Makes fuzzy matching attempt to improve the fit of the next match that it finds.
Unicode\ word\ breaks | Will use proper Unicode word breaks and line separators when Unicode is enabled. See Regex documentation for more info.
Use\ POSIX\ matching | Use the POSIX standard for regular expression, which is to return the leftmost longest match.
Search\ backwards | Search backwards. The result of a reverse search is not necessarily the reverse of a forward search.
Full\ case-folding | Use full case folding. For Regex `V0` only as it is enabled by default for `V1`.
### Rummage Options
Rummage has a couple of flags that are not specific to the regular expression engine.
Toggle | Description
----------------------- | -----------
Boolean\ match | Will check each file up until the first match and will halt searching further. No line context info will be gathered or displayed. Does not apply when performing replaces.
Count\ only | Will just count the number of matches in the file and will not display line context information. This has no effect when applying replaces.
Create\ backups | On replace, files with matches will be backed up before applying the replacements; backup files will have the `.rum-bak` extension.
Force\ <encoding> | Forces all files to be opened with the specified encoding opposed to trying to detect the encoding. Encoding is hard and slow, so this is the preferred method for fast searches. On failure, binary will be used instead.
Use\ chain\ search | Puts Rummage into ["search chain" mode](./usage.md#search-chains). When in "search chain" mode, rummage will only use saved search chains for search and replace.
Use\ replace\ plugin | When enabled, Rummage will use a [replace plugin](./usage.md#replace-plugins) instead of a replace pattern in order to do more advanced replaces.
/// tip | Encoding Guessing
It is always recommended, if you know the encoding, to use `Force encoding` as it will always be the fastest.
Encoding guessing can be slow and not always accurate.
Encoding guessing is performed by `chardet` which is a pure Python library and is, by far, the slowest option. If
you manually install `cChardet`, you will have a much faster guessing experience.
///
## File Patterns

Wildcard patterns are the default for file and folder exclude patterns, but regular expression patterns can be used
instead by selecting the `Regex` checkbox beside the pattern. Wildcard patterns and regular expression patterns will
each be covered separately.
### Wildcard
Rummage uses file patterns with optional folder exclude patterns to filter which files are searched. The default is to
use wild card patterns modeled after `fnmatch` and `glob`. Below is a list of the syntax that is accepted, but not all
features are enabled by default.
If you would prefer regular expression file patterns, please see [Regular Expression](#regular-expression) file
patterns.
- File patterns are case insensitive by default, even for Linux/Unix systems. Case sensitivity can be enabled in
[Preferences](./preferences.md#search).
- Slashes are generally treated as normal characters, but on windows they will be normalized: `/` will become `\\`.
There is no need to explicitly use `\\` in patterns on Windows, but if you do, it will be handled.
- `.` is always matched by `*`, `?`, `[]`, etc. To prevent hidden files from being matched, you should uncheck the
"Include hidden" option.
#### Basic Wildcard syntax
Rummage uses the [`wcmatch`][wcmatch] library to implement a specialized version of [`fnmatch`][wcmatch-fnmatch]
wildcard patterns for file name matching.
Pattern | Meaning
----------------- | -------
`*` | Matches everything.
`?` | Matches any single character.
`[seq]` | Matches any character in seq.
`[!seq]` | Matches any character not in seq.
`[[:alnum:]]` | POSIX style character classes inside sequences. The `C` locale is used for byte strings and Unicode properties for Unicode strings. See [POSIX Character Classes][posix] in `wcmatch`'s documentation for more info.
`\` | Escapes characters. If applied to a meta character, it will be treated as a normal character.
`|` | Multiple patterns can be provided by separating them with `|`.
`-` / `!` | By default, if `-` is found at the start of a pattern, it will match the inverse. This can be changed to use `!` instead in [Preferences](./preferences.md#search).
`\xhh` | By specifying `\x` followed by the hexadecimal byte value, you can specify characters directly.
`\uhhhh` | By specifying `\u` with the four value hexadecimal character value, you can specify Unicode characters directly.
`\Uhhhhhhhh` | By specifying `\U` with the eight value hexadecimal character value, you can specify wide Unicode characters directly.
`\N{name}` | By specifying `\N{name}`, where `name` is a valid Unicode character name, you can specify Unicode characters directly.
`\a` | ASCII Bell (BEL).
`\b` | ASCII Backspace (BS).
`\f` | ASCII Formfeed (FF).
`\n` | ASCII Linefeed (LF).
`\r` | ASCII Carriage Return (CR).
`\t` | ASCII Horizontal Tab (TAB).
`\v` | ASCII Vertical Tab (VT).
/// example | Example Patterns
Used in the `Files which match` box, this would match all Python files of `.py` extensions excluding `__init__.py`:
```
*.py|-__init__.py
```
Used in the `Files which match` box, this would match any file type that is not `.py`.
```
-*.py
```
Used in the `Exclude folders`, this would exclude all folders with `name` followed by a single digit, except `name3`
which we will always be included.
```
name[0-9]|-name3
```
Used in the `Exclude folders`, this would exclude all folders except `name3`.
```
-name3
```
If you need to escape `-` or `|`, you can put them in a sequence: `[-|]`. Remember to place `-` at the beginning of
a sequence as `-` is also used to specify character ranges: `[a-z]`.
///
#### Extended Match Syntax
In [Preferences](./preferences.md#search), you can also enable extended match patterns. Extended match patterns allow
you to provide pattern lists to provide more advanced logic.
Pattern | Meaning
----------------- | -------
`?(pattern_list)` | The pattern matches if zero or one occurrences of any of the patterns in the `pattern_list` match the input string. Requires extended match feature to be enabled.
`*(pattern_list)` | The pattern matches if zero or more occurrences of any of the patterns in the `pattern_list` match the input string. Requires extended match feature to be enabled.
`+(pattern_list)` | The pattern matches if one or more occurrences of any of the patterns in the `pattern_list` match the input string. Requires extended match feature to be enabled.
`@(pattern_list)` | The pattern matches if exactly one occurrence of any of the patterns in the `pattern_list` match the input string. Requires extended match feature to be enabled.
`!(pattern_list)` | The pattern matches if the input string cannot be matched with any of the patterns in the `pattern_list`. Requires extended match feature to be enabled.
`{}` | Bash style brace expansions. This is applied to patterns before anything else. Requires brace expansion feature to be enabled.
/// example | Example Extended Match Patterns
For example, if we wanted to match files `this-file.txt` and `that-file.txt`, we could provide the following pattern:
```
@(this|that)-file.txt
```
The `|` contained within an extended match group will not split the pattern. So it is safe to combine with other patterns:
```
@(this|that)-file.txt|*.py
```
///
/// tip | `!` and Extended Match Syntax
If you have changed Rummage to use `!` instead of `-` for exclusion patterns and have enabled extended match
patterns, you must escape `(` at the start of a file if you want the pattern to be recognized as an exclusion
pattern instead of treating it as the start of an extended match pattern (`!(...)`).
///
#### Brace Expansion Syntax
In [Preferences](./preferences.md#search), you can enables Bash style brace expansion.
Brace expansion is applied before anything else. When applied, a pattern will be expanded into multiple patterns. Each
pattern will then be parsed separately.
This is great for specifying complex combinations of patterns: `a{b,{c,d}}` --> `ab ac ad`. For simple patterns, it may
make more sense to use extended match patterns which will only generate a single pattern and be quicker to evaluate:
`@(ab|ac|ad)`.
Be careful with patterns such as `{1..100}` which would generate one hundred patterns that will all get individually
parsed. Sometimes you really need such a pattern, but be mindful that it will be slower as you generate larger sets of
patterns.
Pattern | Meaning
----------------- | -------
`{,}` | Bash style brace expansions. This is applied to patterns before anything else. Requires brace expansion feature to be enabled.
`{n1..n2[..i]}` | Bash style sequences that expands a range of numbers or alphabetic characters by an optional increment.
/// example | Example Brace Expansion
- `a{b,{c,d}}` --> `ab ac ad`
- `{1..3}` --> `1 2 3`
- `{a..d}` --> `a b c d`
- `{2..4..2}` --> `2 4`
- `{a..e..2}` --> `a c e`
///
#### Full Path Matching
In [Preferences](./preferences.md#search), you can enable full path search for either file patterns and/or folder
exclude patterns. This will allow for matching against a full path instead of the base file name. While it is referred
to as "full path", it is still relative to the provided base path.
Assuming you Provided a base folder of `/My/base/path` to search, and as Rummage was crawling directories, it needed to
evaluate the file `/My/base/path/some/file.txt`, normally your provided file pattern would match against `file.txt`, but
with full path enabled, you'd match against `some/file.txt` (which is relative portion to your base path). This means
you'd have to use pattern like `*/*.txt` instead of `*.txt`.
When full path matching is enabled for a pattern, slashes are generally treated special. Slashes will not be matched in
`[]`, `*`, `?`, or in extended patterns like `*(...)`. Slashes can be matched by `**` if the "globstar (`**`)"" option
is enabled in [Preferences](./preferences.md#search).
When full path matching is not enabled, wildcard patterns use base matching. That is to say, the wildcard patterns are
applied to the base filename instead of the full path. If you enable base matching for full paths in [Preferences](./preferences.md#search),
if a pattern has no slashes, it will perform base matching, and if there are slashes, it will perform a full path match.
This allows you to have the best of both worlds. For instance, the following pattern would match all Markdown files
under the document directory, but would exclude any file in any subdirectory under docs whose name starts with `c`:
`docs/**/*.md|-c*`. Full path is used for the `docs/**/*.md` pattern while base matching is used for `-c*`.
Full path matching can be enabled for both file the file pattern box and the folder exclude box. Each can be controlled
separately in [Preferences](./preferences.md#search).
To learn more about full path matching with regular expression, checkout the regular expression [section](#full-path-matching_1).
#### Pattern Limit
Glob style patterns, by default, allow expanding a pattern by splitting on `|` or expanding the pattern with brace
expansion: `a{b,c}` --> `ab ac`. This can turn one pattern into many patterns. The underlying expansion code limits
expansion to `1000` patterns. This limit can be configured in [Preferences](./preferences.md#search). To raise or lower
the limit, simply set the value higher or lower. To disable the limit entirely, set it to `0`.
### Regular Expression
Wildcard patterns are the default for file and folder exclude patterns, but regular expression patterns can be used
instead by selecting the `Regex` checkbox beside the pattern. The regular expression engine set in [Preferences](./preferences.md#search)
is what will be used for file patterns. It will also respect the case sensitivity setting in [Preferences](./preferences.md#search)
for **File/Folder Matching**.
#### Full Path Matching
In [Preferences](./preferences.md#search), you can enable full path search for either file patterns and/or folder
exclude patterns. This will allow for matching against a full path instead of the base file name. While it is referred
to as "full path", it is still relative to the provided base path.
Assuming you Provided a base folder to search of `/My/base/path`, and as Rummage was crawling directories, it needed to
evaluate the file `/My/base/path/some/file.txt`, normally your file pattern would match against `file.txt`, but with
full path enabled, you'd match against `some/file.txt`. This means you'd have to use a pattern like `.*/.*.txt` instead
of `.*.txt`.
## Backrefs
Rummage has the option of using a special wrapper called Backrefs. Backrefs can be applied to either Re or Regex. It
adds various back references that are known to some regular expression engines, but not to Python's Re or Regex modules.
The supported back references actually vary depending on whether it is being applied to Re or Regex. For instance,
Backrefs only adds Unicode Properties to Re since Regex already has Unicode properties. To learn more about what
Backrefs adds, read the official [Backrefs documentation][backrefs]. You can enable extended back references in the
[Preferences](./preferences.md#search) dialog.
| /rummage-4.18.tar.gz/rummage-4.18/docs/src/markdown/search.md | 0.4856 | 0.905865 | search.md | pypi |
import copy
import logging
import itertools
import numpy
from scipy.sparse import csc_array
import pandas
import networkx as nx
from pyvis.network import Network
logger = logging.getLogger(__name__)
SIZE=500
NX_OPTIONS_DEFAULT = dict(
height=f'{SIZE}px', width=f'{SIZE}px', bgcolor='#05131e', font_color='white', notebook=True, cdn_resources='in_line'
)
class RumorView(object):
"""
Module to help you explore column value relationships
"""
def __init__(self, df):
"""
Args:
df (pandas.DataFrame): Dataframe you want to analyze column value relationships. All the columns are considered as categorical. If you have numerical columns it is recommended to convert it categorical.
"""
self.n_nodes = 0
self.column_names = df.columns
self.node_names = []
self.index_dict = {}
self._make_index(df)
self.freq_matrix = self._calc_freq_matrix(df)
self.co_matrix = self.freq_matrix.transpose().dot(self.freq_matrix).todense()
self.cprob_df = pandas.DataFrame(
1 / self.co_matrix.diagonal() * self.co_matrix,
index=self.node_names, columns=self.node_names
)
self.node_prob = self.freq_matrix.sum(axis=0) / len(df)
self.lift_df = pandas.DataFrame(
numpy.diag((1 / self.node_prob)).dot(self.cprob_df.values),
index=self.node_names, columns=self.node_names
)
def show_columns(
self, target_column="", n_hops=2, columns=None, min_lift=1.5,
min_size=0.01, physics=False, **nx_options
):
"""
Visualize columns to help you understand which pair would have relationships
Args:
target_column (str): if specified, result contains only nodes within `n_hops` from specified column
n_hops (int): if `target_column` is specified this specifies how far nodes from `target_column` should be displayed.
columns (list[str]): if specified, result contains only specified columns
min_lift (float): minimum lift value to show edge. default=1.5
min_size (float): minimum probability of column value to be considered for lift. default=0.01
physics (bool): whether to use physics model for visualization results. default=False
"""
column_summary = self._create_column_summary(min_size)
if columns is None:
columns = self.column_names
column_summary = column_summary[
(column_summary["left"].isin(columns))&(column_summary["right"].isin(columns))
]
nxg = nx.Graph()
for c in columns:
color = "#fe6708" if c == target_column else "#a7c0f7"
nxg.add_node(c, title=f"{c}\n cardinality={len(self.index_dict[c])}", color=color)
for row_ind, row in column_summary.iterrows():
if row["max(lift)"] >= min_lift:
nxg.add_edge(
row["left"], row["right"], value=row["max(lift)"],
title=f'lift={row["max(lift)"]:.1%}'
)
print(f"showing edges with lift >= {min_lift}")
if target_column:
nxg = nx.generators.ego_graph(nxg, target_column, n_hops)
print(f"showing nodes within {n_hops} edges from {target_column}")
nx_args = _overwrite_dict(NX_OPTIONS_DEFAULT, nx_options)
g = Network(**nx_args)
g.from_nx(nxg)
g.toggle_physics(physics)
return(g.show("column_plot.html"))
def show_relations(self, c1, c2, min_lift=1.5, show_df=True, **nx_options):
"""
Visualize value relationship between columns
Args:
c1 (str): column for condition
c2 (str): column for result
min_lift (float): minimum lift value to show edge. default=1.5
show_df (bool): whether to output DataFrame of probability calculated
"""
nx_args = _overwrite_dict(NX_OPTIONS_DEFAULT, nx_options)
g = Network(directed=True, **nx_args)
c1_ind = sorted(self.index_dict[c1].values())
c1_nodes = [self.node_names[c] for c in c1_ind]
c2_ind = sorted(self.index_dict[c2].values())
c2_nodes = [self.node_names[c] for c in c2_ind]
for i, (c1_i, c1_node) in enumerate(zip(c1_ind, c1_nodes)):
g.add_node(
c1_node, c1_node, title=f"{c1_node}\n size={self.node_prob[c1_i]:.1%}",
x=-SIZE/2, y=i/len(c1_nodes)*SIZE, value=self.node_prob[c1_i]
)
for i, (c2_i, c2_node) in enumerate(zip(c2_ind, c2_nodes)):
g.add_node(
c2_node, c2_node, title=f"{c2_node}\n size={self.node_prob[c2_i]:.1%}",
x=SIZE/2, y=i/len(c2_nodes)*SIZE, value=self.node_prob[c2_i]
)
for c1_node in c1_nodes:
for c2_node in c2_nodes:
if self.lift_df.loc[c2_node, c1_node] > min_lift:
title = f"{c1_node}->{c2_node}\n"
title += f"prob={self.cprob_df.loc[c2_node, c1_node]:.1%}\n"
title += f"lift={self.lift_df.loc[c2_node, c1_node]:.1%}"
g.add_edge(
c1_node, c2_node, title=title, value=self.lift_df.loc[c2_node, c1_node]
)
g.toggle_physics(False)
if show_df:
display(self.cprob_df.loc[c2_nodes, c1_nodes])
print(f"showing edges with lift >= {min_lift}")
return(g.show("relations.html"))
def _make_index(self, df):
for c in df.columns:
self.index_dict[c] = {}
values = sorted(df[c].unique()) # change order based on numeric / categorical
for v in values:
self.index_dict[c][v] = self.n_nodes
self.node_names.append(f"{c}-{v}")
self.n_nodes += 1
def _calc_freq_matrix(self, df):
freq_matrix = csc_array((len(df), self.n_nodes))
for i, ind in enumerate(df.index):
for c in df.columns:
j = self.index_dict[c][df.loc[ind, c]]
freq_matrix[i, j] = 1
return freq_matrix
def _extract_matrix_for_2columns(self, df, c1, c2):
c1_ind = list(self.index_dict[c1].values())
c2_ind = list(self.index_dict[c2].values())
return df.iloc[c1_ind, c2_ind]
def _create_column_summary(self, min_size):
left_list = []
right_list = []
lift_list = []
for c1, c2 in itertools.combinations(self.index_dict.keys(), 2):
c1_ind = sorted(self.index_dict[c1].values())
c2_ind = sorted(self.index_dict[c2].values())
c1_nodes = [self.node_names[c] for c in c1_ind if self.node_prob[c] > min_size]
c2_nodes = [self.node_names[c] for c in c2_ind if self.node_prob[c] > min_size]
lift_df = self.lift_df.loc[c1_nodes, c2_nodes]
max_lift = lift_df.max().max()
left_list.append(c1)
right_list.append(c2)
lift_list.append(max_lift)
return pandas.DataFrame({
"left": left_list,
"right": right_list,
"max(lift)": lift_list
})
def _overwrite_dict(d1, d2):
ret_dict = copy.deepcopy(d1)
for k, v in d2.items():
ret_dict[k] = v
return ret_dict | /rumor_view-0.0.3.tar.gz/rumor_view-0.0.3/rumor_view/view.py | 0.5794 | 0.439747 | view.py | pypi |
import argparse
from typing import List
from run_across_america import (
RunAcrossAmerica,
Team,
Activity,
Goal,
Member,
MemberStats,
User,
)
def main() -> None:
parser = argparse.ArgumentParser(
description="Lookup info from `Run Across America`."
)
subparsers = parser.add_subparsers(dest="command")
user_id_parser = subparsers.add_parser(
"user_id",
help="Get the `user_id` for the user code.",
)
user_id_parser.add_argument(
"user_code",
help="User invitation code emailed after sign-up.",
)
user_parser = subparsers.add_parser(
"user",
help="Get info about a user.",
)
user_parser.add_argument("user_id")
teams_parser = subparsers.add_parser(
"teams",
help="Get all the teams known to a user.",
)
teams_parser.add_argument("user_id")
team_goals_parser = subparsers.add_parser(
"goals",
help="Get the distance goal for the specified team.",
)
team_goals_parser.add_argument("team_id")
members_parser = subparsers.add_parser(
"members",
help="Get all the members of the specified team.",
)
members_parser.add_argument("team_id")
leaderboard_parser = subparsers.add_parser(
"leaderboard",
help="Get all current leaderboard of the specified team.",
)
leaderboard_parser.add_argument("team_id")
feed_parser = subparsers.add_parser(
"feed",
help="Get the current feed of the specified team.",
)
feed_parser.add_argument("team_id")
args = parser.parse_args()
client = RunAcrossAmerica()
if args.command == "user_id":
user_id: str = client.user_id(args.user_code)
print(user_id)
elif args.command == "user":
user: User = client.user(args.user_id)
print(user)
elif args.command == "teams":
if args.user_id == "-":
teams: List[Team] = list(client.all_teams())
else:
teams: List[Team] = list(client.teams(args.user_id))
for team in teams:
print(team)
elif args.command == "goals":
goal: Goal = client.goals(args.team_id, include_progress=True)
print(goal)
elif args.command == "members":
members: List[Member] = list(client.members(args.team_id))
for member in members:
print(member)
elif args.command == "leaderboard":
leaderboard: List[MemberStats] = client.leaderboard(args.team_id)
for member in leaderboard:
print(member)
elif args.command == "feed":
feed: List[Activity] = list(client.feed(args.team_id))
for activity in feed:
print(activity)
else:
parser.print_help()
if __name__ == "__main__":
main() | /run-across-america-0.1.0.tar.gz/run-across-america-0.1.0/run_across_america/cli.py | 0.612889 | 0.228307 | cli.py | pypi |
import os
import pathlib
class DirectoryHelper:
def __init__(self, top_dir, param_dict):
"""Small class for manipulating a standard directory structure for BRER
runs.
Parameters
----------
top_dir :
the path to the directory containing all the ensemble members.
param_dict :
a dictionary specifying the ensemble number, the iteration,
and the phase of the simulation.
Examples
--------
>>> .
>>> ├── 0
>>> │ ├── converge_dist
>>> │ │ ├── state.cpt
>>> │ │ ├── state_prev.cpt
>>> │ │ └── traj_comp.part0001.xtc
>>> │ ├── production
>>> │ │ ├── confout.part0005.gro
>>> │ │ ├── state.cpt
>>> │ │ ├── state_prev.cpt
>>> │ │ ├── state_step4622560.cpt
>>> │ │ ├── traj_comp.part0002.xtc
>>> │ └── training
>>> │ ├── state.cpt
>>> │ ├── state_prev.cpt
>>> │ └── traj_comp.part0001.xtc
>>> ├── state.json
>>> ├── state_{iteration}.json
>>> ├── submit.slurm
>>> └── syx.tpr
"""
self._top_dir = str(top_dir)
self._required_parameters = ['ensemble_num', 'iteration', 'phase']
for required in self._required_parameters:
if required not in param_dict:
raise ValueError('Must define {}'.format(required))
self._param_dict = param_dict
def get_dir(self, level: str) -> str:
"""Get the directory for however far you want to go down the directory
tree.
Parameters
----------
level :
one of 'top', 'ensemble_num', 'iteration', or 'phase'.
See the directory structure example provided at the beginning of this class.
Returns
-------
the path to the specified directory 'level' as a str.
"""
pdict = self._param_dict
if level == 'top':
return_dir = self._top_dir
elif level == 'ensemble_num':
return_dir = '{}/mem_{}'.format(self._top_dir, pdict['ensemble_num'])
elif level == 'iteration':
return_dir = '{}/mem_{}/{}'.format(self._top_dir,
pdict['ensemble_num'],
pdict['iteration'])
elif level == 'phase':
return_dir = '{}/mem_{}/{}/{}'.format(self._top_dir,
pdict['ensemble_num'],
pdict['iteration'],
pdict['phase'])
else:
raise ValueError('{} is not a valid directory type for BRER '
'simulations'.format('type'))
return return_dir
def build_working_dir(self):
"""Checks to see if the working directory for current state of BRER
simulation exists. If it does not, creates the directory.
"""
phase_dir = pathlib.Path(self.get_dir('phase')).absolute()
os.makedirs(phase_dir, mode=0o755, exist_ok=True)
def change_dir(self, level):
"""Change to directory specified by level.
Parameters
----------
level : str
How far to go down the directory tree.
Can be one of 'top', 'ensemble_num', 'iteration', or 'phase'.
"""
next_dir = self.get_dir(level)
os.chdir(next_dir)
return next_dir | /run_brer-2.0.0b2-py3-none-any.whl/run_brer/directory_helper.py | 0.831896 | 0.391639 | directory_helper.py | pypi |
import json
import typing
from run_brer.metadata import MetaData
from run_brer.pair_data import PairData
class GeneralParams(MetaData):
"""Stores the parameters that are shared by all restraints in a single
simulation.
These include some of the "Voth" parameters: tau, A, tolerance
.. versionadded:: 2.0
The *end_time* parameter is only available with sufficiently recent versions of
https://github.com/kassonlab/brer_plugin (late 2.0 beta). Otherwise,
*end_time* will always be 0.0
"""
def __init__(self):
super().__init__('general')
self.set_requirements([
'A',
'end_time',
'ensemble_num',
'iteration',
'num_samples',
'phase',
'production_time',
'sample_period',
'start_time',
'tau',
'tolerance',
])
def set_to_defaults(self):
"""Sets general parameters to their default values."""
self.set_from_dictionary(self.get_defaults())
@staticmethod
def get_defaults():
return {
'A': 50,
'end_time': 0.,
'ensemble_num': 1,
'iteration': 0,
'num_samples': 50,
'phase': 'training',
'production_time': 10000, # 10 ns
'sample_period': 100,
'start_time': 0.,
'tau': 50,
'tolerance': 0.25,
}
class PairParams(MetaData):
"""Stores the parameters that are unique to a specific restraint."""
def __init__(self, name):
super().__init__(name)
self.set_requirements(['sites', 'logging_filename', 'alpha', 'target'])
def set_to_defaults(self):
self.set(alpha=0., target=3.)
def load_sites(self, sites: list):
"""Loads the atom ids for the restraint. This also sets the logging
filename, which is named using the atom ids.
Parameters
----------
sites : list
A list of the atom ids for a single restraint.
Example
-------
>>> load_sites([3673, 5636])
"""
self.set(sites=sites, logging_filename="{}.log".format(self.name))
class RunData:
"""Stores (and manipulates, to a lesser extent) all the metadata for a BRER
run."""
def __init__(self):
"""The full set of metadata for a single BRER run include both the
general parameters and the pair-specific parameters."""
self.general_params = GeneralParams()
self.general_params.set_to_defaults()
self.pair_params: typing.MutableMapping[str, PairParams] = {}
self.__names = []
def set(self, name=None, **kwargs):
"""method used to set either general or a pair-specific parameter.
Parameters
----------
name : str, optional
restraint name.
These are the same identifiers that are used in the RunConfig, by default None
Raises
------
ValueError
if you provide a name and try to set a general parameter or
don't provide a name and try to set a pair-specific parameter.
"""
if len(kwargs) == 0:
raise TypeError(
f'Invalid signature: {self.__class__.__qualname__}.set() called without naming any '
f'parameters.')
for key, value in kwargs.items():
# If a restraint name is not specified, it is assumed that the parameter is
# a "general" parameter.
if not name:
if key in self.general_params.get_requirements():
self.general_params.set(key, value)
else:
raise ValueError(
'You have not provided a name; this means you are probably trying '
'to set a '
'general parameter. {} is pair-specific'.format(key))
else:
if key in self.pair_params[name].get_requirements():
self.pair_params[name].set(key, value)
else:
raise ValueError('{} is not a pair-specific parameter'.format(key)
+ ' but you have provided a name.')
def get(self, key, *, name=None):
"""get either a general or a pair-specific parameter.
Parameters
----------
key : str
the parameter to get.
name : str
if getting a pair-specific parameter, specify the restraint name. (Default
value = None)
Returns
-------
the parameter value.
"""
if key in self.general_params.get_requirements():
return self.general_params.get(key)
elif name:
return self.pair_params[name].get(key)
else:
raise ValueError(
'You have not provided a name, but are trying to get a pair-specific '
'parameter. '
'Please provide a pair name')
def as_dictionary(self):
"""Get the run metadata as a heirarchical dictionary:
Returns
-------
type
heirarchical dictionary of metadata
Examples
--------
>>> ├── pair parameters
>>> │ ├── name of pair 1
>>> │ │ ├── alpha
>>> │ │ ├── target
>>> │ │ └── ...
>>> │ ├── name of pair 2
>>> |
>>> ├── general parameters
>>> ├── A
>>> ├── tau
>>> ├── ...
"""
pair_param_dict = {}
for name in self.pair_params.keys():
pair_param_dict[name] = self.pair_params[name].get_as_dictionary()
return {
'general parameters': self.general_params.get_as_dictionary(),
'pair parameters': pair_param_dict
}
def from_dictionary(self, data: dict):
"""Loads metadata into the class from a dictionary.
Parameters
----------
data : dict
RunData metadata as a dictionary.
"""
self.general_params.set_from_dictionary(data['general parameters'])
for name in data['pair parameters'].keys():
self.pair_params[name] = PairParams(name)
self.pair_params[name].set_from_dictionary(data['pair parameters'][name])
def from_pair_data(self, pd: PairData):
"""Load some of the run metadata from a PairData object. Useful at the
beginning of a run.
Parameters
----------
pd : PairData
object from which metadata are loaded
"""
name = pd.name
self.pair_params[name] = PairParams(name)
self.pair_params[name].load_sites(pd.get('sites'))
self.pair_params[name].set_to_defaults()
def clear_pair_data(self):
"""Removes all the pair parameters, replace with empty dict."""
self.pair_params = {}
def save_config(self, fnm='state.json'):
"""Saves the run parameters to a log file.
Parameters
----------
fnm : str, optional
log file for state parameters, by default 'state.json'
"""
json.dump(self.as_dictionary(), open(fnm, 'w'))
def load_config(self, fnm='state.json'):
"""Load state parameters from file.
Parameters
----------
fnm : str, optional
log file of state parameters, by default 'state.json'
"""
self.from_dictionary(json.load(open(fnm))) | /run_brer-2.0.0b2-py3-none-any.whl/run_brer/run_data.py | 0.825906 | 0.454896 | run_data.py | pypi |
import json
import warnings
from abc import ABC
class MetaData(ABC):
def __init__(self, name):
"""Construct metadata object. and give it a name.
Parameters
----------
name :
Give your MetaData class a descriptive name.
"""
self.__name = name
self.__required_parameters = ()
self._metadata = {}
@property
def name(self):
"""Name that associates the class with a pair or a particular function
(such as with the Plugins, which are named either 'training',
'convergence' or 'production').
Returns
-------
str
the name of the class
"""
return self.__name
@name.getter
def name(self):
"""getter for name.
Returns
-------
str
"""
return self.__name
@name.setter
def name(self, name):
"""setter for name.
Parameters
----------
name : str
name of the class
"""
self.__name = name
def set_requirements(self, list_of_requirements: list):
"""Defines a set of required parameters for the class. This is quite
useful for checking if there are any missing parameters before
beginning a run.
Parameters
----------
list_of_requirements : list
list of required parameters for the class (a list of strings)
"""
self.__required_parameters = tuple(list_of_requirements)
def get_requirements(self):
"""Gets the set of required parameters for the class. This is quite
useful for checking if there are any missing parameters before
beginning a run.
Returns
-------
list
required parameters of the class
"""
return self.__required_parameters
def set(self, key=None, value=None, **kwargs):
"""Sets a parameter of the class. Checks whether or not the parameter
is required and reports information about requirements. You can pass
the key,value pairs either as a key and value or as a set of ``**kwargs``.
Parameters
----------
key : str, optional
parameter name, by default None
value : any, optional
parameter value, by default None
"""
if key is not None and value is not None:
if key not in self.__required_parameters and key != "name":
warnings.warn(
f"{key} is not a required parameter of {self.name}: setting anyway")
self._metadata[key] = value
for key, value in kwargs.items():
if key not in self.__required_parameters and key != "name":
warnings.warn(
f"{key} is not a required parameter of {self.name}: setting anyway")
self._metadata[key] = value
def get(self, key):
"""Get the value of a parameter of the class.
Parameters
----------
key : str
The name of the parameter you wish to get.
Returns
-------
The value of the parameter associated with the key.
"""
return self._metadata[key]
def set_from_dictionary(self, data: dict):
"""Another method that essentially performs the same thing as "set",
but just takes a dictionary. Probably should be deprecated...
Parameters
----------
data : dict
A dictionary of parameter names and values to set.
"""
self.set(**data)
def get_as_dictionary(self):
"""Get all of the current parameters of the class as a dictionary.
Returns
-------
dict
dictionary of class parameters
"""
return self._metadata
def get_missing_keys(self):
"""Gets all of the required parameters that have not been set.
Returns
-------
list
A list of required parameter names that have not been set.
"""
missing = []
for required in self.__required_parameters:
if required not in self._metadata.keys():
missing.append(required)
return missing
class MultiMetaData(ABC):
"""A single class that handles multiple MetaData classes (useful when
restraining multiple atom-atom pairs)."""
def __init__(self):
self._metadata_list = []
self.__names = []
@property
def names(self):
"""A list of names that associate each class with a pair or a
particular function (such as with the Plugins, which are named either
'training', 'convergence' or 'production').
Returns
-------
list
a list of names
"""
return list(self.__names)
@names.setter
def names(self, names: list):
"""setter for names.
Parameters
----------
names : list
"""
self.__names = list(names)
@names.getter
def names(self):
"""getter for names.
Returns
-------
list
list of names
Raises
------
IndexError
Raise IndexError if no metadata have been loaded.
"""
if not self.__names:
if not self._metadata_list:
raise IndexError('Must import a list of metadata before retrieving names')
self.__names = [metadata.name for metadata in self._metadata_list]
return list(self.__names)
def add_metadata(self, metadata: MetaData):
"""Appends new MetaData object to self._metadata.
Parameters
----------
metadata : MetaData
metadata to append
"""
self._metadata_list.append(metadata)
self.__names.append(metadata.name)
def name_to_id(self, name):
"""Converts the name of one of the MetaData classes to it's associated
list index (it's idx in self._metadata)
Parameters
----------
name : str
the name of a MetaData class.
Returns
-------
int
the index of the MetaData class in the self._metadata list.
Raises
------
IndexError
Raise IndexError if no metadata have been loaded.
"""
if not self.__names:
raise IndexError('{} is not a valid name.'.format(name))
return self.__names.index(name)
def id_to_name(self, id):
"""Converts the index of one of the MetaData classes to it's associated
name.
Parameters
----------
id : int
The index of the MetaData class in the list self._metadata
Returns
-------
str
the name of the metadata class
"""
return self.__names[id]
def __getitem__(self, item):
return self._metadata_list[item]
def get_as_single_dataset(self):
""""""
single_dataset = {}
for metadata in self._metadata_list:
single_dataset[metadata.name] = metadata.get_as_dictionary()
return single_dataset
def write_to_json(self, filename='state.json'):
"""Writes state to json.
Parameters
----------
filename : str
(Default value = 'state.json')
"""
json.dump(self.get_as_single_dataset(), open(filename, 'w'))
def read_from_json(self, filename='state.json'):
"""Reads state from json.
Parameters
----------
filename : str
(Default value = 'state.json')
"""
# TODO: decide on expected behavior here if there's a pre-existing list of
# data. For now, overwrite
self._metadata_list = []
self.__names = []
data = json.load(open(filename, 'r'))
for name, metadata in data.items():
self.__names.append(name)
metadata_obj = MetaData(name=name)
metadata_obj.set_from_dictionary(metadata)
self._metadata_list.append(metadata_obj) | /run_brer-2.0.0b2-py3-none-any.whl/run_brer/metadata.py | 0.77223 | 0.264186 | metadata.py | pypi |
from pytorch_lightning import Trainer
from pytorch_lightning import loggers as pl_loggers
from pytorch_lightning.strategies import DDPStrategy
import argparse
import os
from run_crom.simulation import SimulationDataModule
from run_crom.cromnet import CROMnet
from run_crom.callbacks import *
def prepare_Trainer(args):
output_path = os.getcwd() + '/outputs'
time_string = getTime()
weightdir = output_path + '/weights/' + time_string
checkpoint_callback = CustomCheckPointCallback(verbose=True, dirpath=weightdir, save_last=True)
lr_monitor = LearningRateMonitor(logging_interval='step')
epoch_timer = EpochTimeCallback()
custom_progress_bar = LitProgressBar()
callbacks=[lr_monitor, checkpoint_callback, epoch_timer, custom_progress_bar]
logdir = output_path + '/logs'
logger = pl_loggers.TensorBoardLogger(save_dir=logdir, name='', version=time_string, log_graph=False)
trainer = Trainer.from_argparse_args(args, gpus=findEmptyCudaDeviceList(args.gpus), default_root_dir=output_path, callbacks=callbacks, logger=logger, max_epochs= np.sum(args.epo), log_every_n_steps=1, strategy=DDPStrategy(find_unused_parameters=False))
return trainer
def main():
parser = argparse.ArgumentParser(description='Neural Representation training')
# Mode for script
parser.add_argument('-mode', help='train or test',
type=str, required=True)
# Network arguments
parser.add_argument('-lbl', help='label length',
type=int, required=False, default=6)
parser.add_argument('-scale_mlp', help='scale mlp',
type=int, required=False, default=10)
parser.add_argument('-ks', help='scale mlp',
type=int, required=False, default=6)
parser.add_argument('-strides', help='scale mlp',
type=int, required=False, default=4)
parser.add_argument('-siren_dec', help='use siren - decoder',
action='store_true')
parser.add_argument('-siren_enc', help='use siren - encoder',
action='store_true')
parser.add_argument('-dec_omega_0', help='dec_omega_0',
type=float, required=False, default=30)
parser.add_argument('-enc_omega_0', help='enc_omega_0',
type=float, required=False, default=0.3)
# Network Training arguments
parser.add_argument('-m', help='path to weight',
type=str, required=False)
parser.add_argument('-d', help='path to the dataset',
type=str, required=False)
parser.add_argument('-verbose', help='verbose',
action='store_false')
parser.add_argument('-initial_lr', help='initial learning rate',
type=float, nargs=1, required=False, default=8e-4)
parser.add_argument('-lr', help='adaptive learning rates',
type=float, nargs='*', required=False)
parser.add_argument('-epo', help='adaptive epoch sizes',
type=int, nargs='*', required=False)
parser.add_argument('-batch_size', help='batch size',
type=int, required=False, default=16)
# Trainer arguments
parser = Trainer.add_argparse_args(parser)
args = parser.parse_args()
trainer = prepare_Trainer(args)
if args.mode == "train":
if args.d:
data_path = args.d
dm = SimulationDataModule(data_path, args.batch_size, num_workers=64)
data_format, example_input_array = dm.get_dataFormat()
preprop_params = dm.get_dataParams()
network_kwargs = get_validArgs(CROMnet, args)
net = CROMnet(data_format, preprop_params, example_input_array, **network_kwargs)
else:
exit('Enter data path')
trainer.fit(net, dm)
elif args.mode == "test":
if args.m:
weight_path = args.m
net = CROMnet.load_from_checkpoint(weight_path, loaded_from=weight_path)
dm = SimulationDataModule(net.data_format['data_path'], net.batch_size, num_workers=64)
else:
exit('Enter weight path')
trainer.test(net, dm)
if __name__ == "__main__":
main() | /run_crom-1.0.0-py3-none-any.whl/run_crom/run_crom.py | 0.450601 | 0.390069 | run_crom.py | pypi |
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from pytorch_lightning.callbacks import LearningRateMonitor, Callback, TQDMProgressBar
from pytorch_lightning.utilities import rank_zero_info
from pytorch_lightning.utilities.rank_zero import rank_zero_only
import time
import warnings
from run_crom.util import *
from run_crom.cromnet import *
from run_crom.simulation import SimulationDataset
class Exporter(object):
def __init__(self, weight_path):
self.weight_path = weight_path
def export(self, ):
net = CROMnet.load_from_checkpoint(self.weight_path)
device = findEmptyCudaDevice()
# tracing
#print('tracing begin')
net_enc = net.encoder.to(device)
net_dec = net.decoder.to(device)
#data_list = DataList(net.data_format['data_path'], 1.0)
#dataset = SimulationDataset(net.data_format['data_path'], data_list.data_list)
#trainloader = DataLoader(dataset, batch_size=1)
data_batched = net.example_input_array
encoder_input = data_batched.to(device)
state = encoder_input[:,:, :net.data_format['o_dim']]
x0 = encoder_input[:, :, net.data_format['o_dim']:]
net_enc_jit = net_enc.to_torchscript(method = 'trace', example_inputs = state, check_trace=True, check_tolerance=1e-20)
xhat = net_enc.forward(state).detach()
xhat_jit = net_enc_jit.forward(state)
assert(torch.norm(xhat-xhat_jit)<1e-10)
#print("encoder trace finished")
xhat = xhat.expand(xhat.size(0), net.data_format['npoints'], xhat.size(2))
x = torch.cat((xhat, x0), 2)
batch_size_local = x.size(0)
x = x.view(x.size(0)*x.size(1), x.size(2))
x_original = x
x = x.detach()
x.requires_grad_(True)
q = net_dec(x)
q = q.view(batch_size_local, -1, q.size(1)).detach()
net_dec_jit = net_dec.to_torchscript(method = 'trace', example_inputs = x, check_trace=True, check_tolerance=1e-20)
q_jit = net_dec_jit.forward(x)
q_jit = q_jit.view(batch_size_local, -1, q_jit.size(1))
assert(torch.norm(q-q_jit)<1e-10)
#print("decoder trace finished")
encoder_input = data_batched.to(device)
output_regular, _, _ = net.forward(encoder_input)
assert(torch.norm(output_regular-q_jit)<1e-10)
#print("full network trace finished")
enc_jit_path = os.path.splitext(self.weight_path)[0]+"_enc.pt"
dec_jit_path = os.path.splitext(self.weight_path)[0]+"_dec.pt"
print('decoder torchscript path: ', enc_jit_path)
torch.jit.save(net_enc_jit, enc_jit_path)
#net_enc_jit.save(enc_jit_path)
print('decoder torchscript path: ', dec_jit_path)
torch.jit.save(net_dec_jit, dec_jit_path)
#net_dec_jit.save(dec_jit_path)
# trace grad
x = x_original
num_sample = 10
x = x[0:num_sample, :]
net_dec_func_grad = NetDecFuncGrad(net_dec)
net_dec_func_grad.to(device)
grad, y = net_dec_func_grad(x)
grad = grad.clone() # output above comes from inference mode, so we need to clone it to a regular tensor
y = y.clone()
grad_gt, y_gt = net_dec.computeJacobianFullAnalytical(x)
outputs_local, _, decoder_input = net.forward(encoder_input)
grad_gt_auto = computeJacobian(decoder_input, outputs_local)
grad_gt_auto = grad_gt_auto.view(grad_gt_auto.size(0)*grad_gt_auto.size(1), grad_gt_auto.size(2), grad_gt_auto.size(3))
grad_gt_auto = grad_gt_auto[0:num_sample, :, :]
criterion = nn.MSELoss()
assert(criterion(grad_gt_auto, grad_gt)<1e-10)
assert(criterion(grad, grad_gt)<1e-10)
assert(criterion(y, y_gt)<1e-10)
# grad, y = net_auto_dec_func_grad(x)
with torch.jit.optimized_execution(True):
net_dec_func_grad_jit = net_dec_func_grad.to_torchscript(method = 'trace', example_inputs = x, check_trace=True, check_tolerance=1e-20)
grad_jit, y_jit = net_dec_func_grad_jit(x)
assert(torch.norm(grad-grad_jit)<1e-10)
assert(torch.norm(y-y_jit)<1e-10)
#print("decoder gradient trace finished")
dec_func_grad_jit_path = os.path.splitext(self.weight_path)[0]+"_dec_func_grad.pt"
print('decoder gradient torchscript path: ', dec_func_grad_jit_path)
net_dec_func_grad_jit.save(dec_func_grad_jit_path)
net_dec_func_grad.cpu()
net_dec_func_grad_jit = net_dec_func_grad.to_torchscript(method = 'trace', example_inputs = x, check_trace=True, check_tolerance=1e-20)
dec_func_grad_jit_path = os.path.splitext(self.weight_path)[0]+"_dec_func_grad_cpu.pt"
#print('decoder gradient torchscript path (cpu): ', dec_func_grad_jit_path)
net_dec_func_grad_jit.save(dec_func_grad_jit_path)
net_enc_jit_load = torch.jit.load(enc_jit_path)
net_dec_jit_load = torch.jit.load(dec_jit_path)
encoder_input = data_batched.to(device)
state = encoder_input[:,:, :net.data_format['o_dim']]
xhat_jit_load = net_enc_jit_load.forward(state)
assert(torch.norm(xhat_jit_load-xhat_jit)<1e-10)
x = x_original
q_jit_load = net_dec_jit_load.forward(x)
q_jit_load = q_jit_load.view(batch_size_local, -1, q_jit_load.size(1))
assert(torch.norm(q_jit_load-q_jit)<1e-10)
net_enc.cpu()
state = state.cpu()
net_enc_jit = net_enc.to_torchscript(method = 'trace', example_inputs = state, check_trace=True, check_tolerance=1e-20)
enc_jit_path = os.path.splitext(self.weight_path)[0]+"_enc_cpu.pt"
print('encoder torchscript path (cpu): ', enc_jit_path)
net_enc_jit.save(enc_jit_path)
net_dec.cpu()
x = x.cpu()
net_dec_jit = net_dec.to_torchscript(method = 'trace', example_inputs = x, check_trace=True, check_tolerance=1e-20)
dec_jit_path = os.path.splitext(self.weight_path)[0]+"_dec_cpu.pt"
print('decoder torchscript path (cpu): ', dec_jit_path)
net_dec_jit.save(dec_jit_path)
class CustomCheckPointCallback(ModelCheckpoint):
CHECKPOINT_NAME_LAST='{epoch}-{step}'
@rank_zero_only
def on_train_end(self, trainer, pl_module):
super().on_train_end(trainer, pl_module)
filename = self.last_model_path
print("\nmodel path: " + filename)
ex = Exporter(filename)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ex.export()
class EpochTimeCallback(Callback):
def on_train_epoch_start(self, trainer, pl_module):
self.start_time = time.time()
def on_train_epoch_end(self, trainer, pl_module):
self.log("epoch_time", (time.time() - self.start_time), prog_bar=True)
class LitProgressBar(TQDMProgressBar):
def get_metrics(self, trainer, model):
# don't show the version number
items = super().get_metrics(trainer, model)
items.pop("v_num", None)
return items | /run_crom-1.0.0-py3-none-any.whl/run_crom/callbacks.py | 0.484624 | 0.547343 | callbacks.py | pypi |
import numpy as np
from numpy import ndarray
from typing import Optional
from skfem.mesh import Mesh, MeshTri, MeshQuad, MeshTet, MeshHex
from dataclasses import replace
MESH_TYPE_MAPPING = {
MeshTet: '504',
MeshHex: '808',
MeshTri: '303',
MeshQuad: '404',
}
BOUNDARY_TYPE_MAPPING = {
MeshTet: '303',
MeshHex: '404',
MeshTri: '202',
MeshQuad: '202',
}
def to_file(mesh: Mesh, path: str):
"""The mesh is written to four files.
The files are ``<path>/mesh.{header,nodes,elements,boundary}``.
Parameters
----------
mesh
The mesh object to export.
path
The path of the directory, e.g., `/home/user/case`
"""
filename = path + ('/' if path[-1] != '/' else '') + 'mesh'
npts = mesh.nvertices
nt = mesh.nelements
nfacets = mesh.nfacets
mesh_type = type(mesh)
# build t_id and boundary_id
t_id = -1 * np.ones(nt, dtype=np.int64)
boundary_id = -1 * np.ones(nfacets, dtype=np.int64)
if mesh.subdomains is not None:
for i, key in enumerate(mesh.subdomains):
t_id[mesh.subdomains[key]] = i + 1
if mesh.boundaries is not None:
for i, key in enumerate(mesh.boundaries):
boundary_id[mesh.boundaries[key]] = i + 1
if (t_id == -1).any():
raise Exception("Each element must be part of some body.")
if isinstance(mesh, MeshHex):
mesh = replace(mesh, t=mesh.t[[1, 5, 3, 0, 4, 7, 6, 2]])
# filename.header
with open(filename + '.header', 'w') as handle:
handle.write("{} {} {}\n".format(npts,
nt,
len(mesh.boundary_facets())))
handle.write("2\n")
handle.write("{} {}\n".format(
MESH_TYPE_MAPPING[mesh_type],
nt
))
handle.write("{} {}\n".format(
BOUNDARY_TYPE_MAPPING[mesh_type],
len(mesh.boundary_facets())
))
# filename.nodes
with open(filename + '.nodes', 'w') as handle:
for itr in range(npts):
handle.write("{} -1 {} {} {}\n".format(
itr + 1,
mesh.p[0, itr],
mesh.p[1, itr],
mesh.p[2, itr] if mesh.p.shape[0] > 2 else 0.
))
# filename.elements
with open(filename + '.elements', 'w') as handle:
for itr in range(nt):
handle.write(("{} {} {}"
+ (" {}" * mesh.t.shape[0])
+ "\n").format(
itr + 1,
t_id[itr],
MESH_TYPE_MAPPING[mesh_type],
*(mesh.t[:, itr] + 1)
))
# filename.boundary
with open(filename + '.boundary', 'w') as handle:
for i, ix in enumerate(mesh.boundary_facets()):
handle.write(("{} {} {} {} {}"
+ " {}" * mesh.facets.shape[0]
+ "\n").format(
i + 1,
boundary_id[ix],
mesh.f2t[0, ix] + 1,
mesh.f2t[1, ix] + 1,
BOUNDARY_TYPE_MAPPING[mesh_type],
*(mesh.facets[:, ix] + 1)
)) | /run_elmer-0.2.0.tar.gz/run_elmer-0.2.0/run_elmer/export.py | 0.866118 | 0.364184 | export.py | pypi |
import numpy as np
from .run import run
from skfem import Mesh, MeshTri, MeshTet, MeshQuad, MeshHex
def mesh(arg1=None, arg2=None):
if arg2 is None:
if isinstance(arg1, str):
return Mesh.load(arg1)
if isinstance(arg1, list) and isinstance(arg2, list):
arg1 = np.array(arg1, np.float64)
arg2 = np.array(arg2, np.int64)
assert isinstance(arg1, np.ndarray)
assert isinstance(arg2, np.ndarray)
if arg1.shape[0] > arg1.shape[1]:
arg1 = arg1.T
arg2 = arg2.T
if arg1.shape[0] == 2:
if arg2.shape[0] == 3:
m = MeshTri(arg1, arg2)
elif arg2.shape[0] == 4:
m = MeshQuad(arg1, arg2)
elif arg1.shape[0] == 3:
if arg2.shape[0] == 4:
m = MeshTet(arg1, arg2)
elif arg2.shape[0] == 8:
m = MeshHex(arg1, arg2)
return m
def target_boundaries(mesh, *keys):
boundaries = list(mesh.boundaries.keys())
keylist = []
for key in keys:
keylist.append(str(boundaries.index(key) + 1))
return "Target Boundaries({}) = {}".format(
len(keylist),
' '.join(keylist)
)
def targets(mesh):
targets = {}
if mesh.boundaries is not None:
boundaries = list(mesh.boundaries.keys())
targets['boundaries'] = {}
for key in boundaries:
targets['boundaries'][key] = "Target Boundaries({}) = {}".format(
1,
boundaries.index(key) + 1,
)
if mesh.subdomains is not None:
subdomains = list(mesh.subdomains.keys())
targets['bodies'] = {}
for key in subdomains:
targets['bodies'][key] = "Target Bodies({}) = {}".format(
1,
subdomains.index(key) + 1,
)
return targets
def plot(mesh, x, edges=False):
from skfem.visuals.matplotlib import plot, draw
if len(x.shape) > 1:
x = x.flatten()
if edges:
ax = draw(mesh)
return plot(mesh, x, ax=ax, shading='gouraud')
return plot(mesh, x, shading='gouraud') | /run_elmer-0.2.0.tar.gz/run_elmer-0.2.0/run_elmer/__init__.py | 0.551091 | 0.540136 | __init__.py | pypi |
import os
import tarfile
import tempfile
import json
from typing import Optional
import docker
import meshio
def get_container(image: str, tag: str, verbose: bool):
"""Pull and/or start a container that has `ElmerSolver`.
Parameters
----------
image
The container image name to use.
tag
Returns
-------
An object representing the container.
"""
client = docker.from_env()
for line in client.api.pull(image,
tag=tag,
stream=True,
decode=True):
if verbose:
if "status" in line:
print(line["status"])
ctr = client.containers.create("{}:{}".format(image, tag),
command='sleep infinity',
detach=True)
ctr.start()
return ctr
def clean_container(ctr):
"""Kill and remove the container."""
ctr.kill()
ctr.remove()
def write_to_container(ctr,
content: str,
filename: str = None,
suffix: str = ".sif") -> str:
"""Write a given string to a file inside the container.
Parameters
----------
ctr
content
filename
suffix
Returns
-------
The filename of the file written inside the container.
"""
# write string to a temporary file on host
tmpfile = tempfile.NamedTemporaryFile(suffix=suffix,
mode='w',
delete=False)
tmpfile.write(content)
tmpfile.seek(0)
tmpfile.close()
# create a tar archive
tarname = tmpfile.name + ".tar"
tar = tarfile.open(tarname, mode='w')
if filename is None:
filename = tmpfile.name
try:
tar.add(tmpfile.name,
arcname=os.path.basename(filename))
finally:
tar.close()
os.remove(tmpfile.name)
# unpack tar contents to container root
with open(tarname, 'rb') as fh:
ctr.put_archive("/", fh.read())
os.remove(tarname)
return "/" + os.path.basename(tmpfile.name)
def fetch_from_container(ctr, filename: str):
"""Fetch a file from container.
Parameters
----------
ctr
filename
Returns
-------
Mesh object from `meshio`.
"""
tmpfile = tempfile.NamedTemporaryFile(suffix=".tar",
mode='wb',
delete=False)
bits, _ = ctr.get_archive("{}".format(filename))
basename = os.path.basename(filename)
try:
for chunk in bits:
tmpfile.write(chunk)
tmpfile.seek(0)
tmpfile.close()
tar = tarfile.open(tmpfile.name, mode='r')
tar.extract(basename, tmpfile.name + "_out")
finally:
tar.close()
os.remove(tmpfile.name)
mesh = meshio.read(tmpfile.name + "_out/" + basename)
os.remove(tmpfile.name + "_out/" + basename)
os.rmdir(tmpfile.name + "_out")
return mesh
def run(filename,
sif: str,
outfile: str,
verbose: bool,
image: str = 'ghcr.io/kinnala/elmer',
tag: str = 'devel-ba15974'):
"""Run the case in Docker.
Parameters
----------
filename
The mesh filename.
sif
outfile
verbose
image
tag
"""
ctr = get_container(image, tag, verbose)
for ext in ['header', 'nodes', 'elements', 'boundary']:
with open("{}.{}".format(filename, ext), 'r') as handle:
_ = write_to_container(ctr,
handle.read(),
filename="mesh.{}".format(ext))
if verbose:
cmd = "cat mesh.{}".format(ext)
print(cmd)
res = ctr.exec_run(cmd)
print(res.output.decode('utf-8'))
filename = write_to_container(ctr, sif)
_ = write_to_container(ctr, filename, filename="ELMERSOLVER_STARTINFO")
res = ctr.exec_run("ElmerSolver",
stream=False,
demux=False)
if verbose:
print(res.output.decode('utf-8'))
mio = fetch_from_container(ctr, "/{}".format(outfile))
clean_container(ctr)
return mio | /run_elmer-0.2.0.tar.gz/run_elmer-0.2.0/run_elmer/runners/docker.py | 0.753739 | 0.189071 | docker.py | pypi |
import logging
import math
import signal
import sys
import timeit
import traceback
import memory_profiler
import mock
from six import StringIO
from run_lambda import context as context_module
def run_lambda(handle, event, context=None, timeout_in_seconds=None, patches=None):
"""
Run the Lambda function ``handle``, with the specified arguments and
parameters.
:param function handle: Lambda function to call
:param dict event: dictionary containing event data
:param MockLambdaContext context: context object. If not provided, a
default context object will be used.
:param int timeout_in_seconds: timeout in seconds. If not provided, the
function will be called with no timeout
:param dict patches: dictionary of name-to-value mappings that will be
patched inside the Lambda function
:return: value returned by Lambda function
:rtype: LambdaResult
"""
if context is None:
context = context_module.MockLambdaContext.Builder().build()
patches_list = [] if patches is None \
else [mock.patch(name, value) for name, value in patches.items()]
for patch in patches_list:
patch.start()
setup_timeout(context, timeout_in_seconds)
builder = None
result = None
try:
builder = LambdaCallSummary.Builder(context)
value = handle(event, context)
result = LambdaResult(builder.build(), value=value)
except LambdaTimeout:
result = LambdaResult(builder.build(), timed_out=True)
except Exception as e:
traceback.print_exc(file=builder.log)
result = LambdaResult(builder.build(), exception=e)
finally:
signal.alarm(0) # disable any pending alarms
for patch in patches_list:
patch.stop()
return result
def setup_timeout(context, timeout_in_seconds=None):
if timeout_in_seconds is not None:
def on_timeout(signum, frame):
raise LambdaTimeout()
signal.signal(signal.SIGALRM, on_timeout)
signal.alarm(timeout_in_seconds)
context.activate(timeout_in_seconds)
class LambdaTimeout(BaseException):
pass
class LambdaResult(object):
"""
Represents the result of locally running a Lambda function.
"""
def __init__(self, summary, value=None, timed_out=False, exception=None):
self._summary = summary
self._value = value
self._timed_out = timed_out
self._exception = exception
@property
def summary(self):
"""
:property: Summary of call to Lambda function
:rtype: LambdaCallSummary
"""
return self._summary
@property
def value(self):
"""
:property: The value returned by the call to the Lambda function, or
``None`` if no value was returned.
:rtype: any
"""
return self._value
@property
def timed_out(self):
"""
:property: Whether the call to the Lambda function timed out
:rtype: bool
"""
return self._timed_out
@property
def exception(self):
"""
:property: The exception raised by the call to the Lambda function, or
``None`` if no exception was raised
:rtype: Exception
"""
return self._exception
def __str__(self):
return "{{summary={s}; value={v}; timed_out={t}; exception={e}}}"\
.format(s=str(self._summary), v=self._value,
t=self._timed_out, e=repr(self._exception))
def display(self, outfile=None):
if outfile is None:
outfile = sys.stdout
if self._timed_out:
outfile.write("Timed out\n\n")
elif self._exception is not None:
outfile.write("Raised an exception: {}\n\n".format(repr(self._exception)))
else:
outfile.write("Returned value {}\n\n".format(self._value))
self._summary.display(outfile=outfile)
class LambdaCallSummary(object):
def __init__(self, duration_in_millis, max_memory_used_in_mb, log):
self._duration_in_millis = duration_in_millis
self._max_memory_used_in_mb = max_memory_used_in_mb
self._log = log
@property
def duration_in_millis(self):
"""
Duration of call, in milliseconds. This value may vary from the
duration the call would have taken if actually run in AWS.
:property: Duration of call, in milliseconds
:rtype: int
"""
return self._duration_in_millis
@property
def max_memory_used_in_mb(self):
"""
Maximum amount of memory used during call to Lambda function,
in megabytes. This value is an estimate of how much memory the call
would have used if actually run in AWS. We have found that these
estimates are almost always within 5MB of the amount of memory used by
corresponding remote calls.
:property: Maximum amount of memory used during call to Lambda function,
in megabytes.
:rtype: int
"""
return self._max_memory_used_in_mb
@property
def log(self):
"""
:property: The contents of the log for this lambda function.
:rtype: str
"""
return self._log
def __str__(self):
return "{{duration={d} milliseconds; max_memory={m} MB; log={l}}}"\
.format(d=self._duration_in_millis,
m=self._max_memory_used_in_mb,
l=repr(self._log))
def display(self, outfile=None):
if outfile is None:
outfile = sys.stdout
outfile.write("Duration: {} ms\n\n".format(self._duration_in_millis))
outfile.write("Max memory used: {} MB\n\n"
.format(self._max_memory_used_in_mb))
outfile.write("Log:\n")
outfile.write(self._log)
class Builder(object):
def __init__(self, context):
self._context = context
self._start_mem = memory_profiler.memory_usage()[0]
self._log = StringIO()
self._log.write("START RequestId: {r} Version: {v}\n".format(
r=context.aws_request_id, v=context.function_version
))
self._start_time = timeit.default_timer()
self._previous_stdout = sys.stdout
handler = logging.StreamHandler(stream=self._log)
logging.getLogger().addHandler(handler)
sys.stdout = self._log
def build(self):
end_time = timeit.default_timer()
end_mem = memory_profiler.memory_usage()[0]
sys.stdout = self._previous_stdout
self._log.write("END RequestId: {r}\n".format(
r=self._context.aws_request_id))
duration_in_millis = int(math.ceil(1000 * (end_time - self._start_time)))
# The memory overhead of setting up the AWS Lambda environment
# (when actually run in AWS) is roughly 14 MB
max_memory_used_in_mb = (end_mem - self._start_mem) / 1048576 + 14
self._log.write(
"REPORT RequestId: {r}\tDuration: {d} ms\t"
"Max Memory Used: {m} MB\n"
.format(r=self._context.aws_request_id,
d=duration_in_millis,
m=max_memory_used_in_mb))
log = self._log.getvalue()
return LambdaCallSummary(duration_in_millis, max_memory_used_in_mb, log)
@property
def log(self):
return self._log | /run_lambda-0.1.7.2.tar.gz/run_lambda-0.1.7.2/run_lambda/call.py | 0.629775 | 0.183392 | call.py | pypi |
from dataclasses import astuple, dataclass
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
import yaml
from gql import gql
from run_logger import HasuraLogger
@dataclass
class NewParams:
config_params: Optional[dict]
sweep_params: Optional[dict]
load_params: Optional[dict]
def get_config_params(config: Union[str, Path]) -> dict:
"""
Reads a ``yaml`` config file and returns a dictionary of parameters.
"""
if isinstance(config, str):
config = Path(config)
with Path(config).open() as f:
config = yaml.load(f, yaml.FullLoader)
return config
def get_load_params(load_id: int, logger: HasuraLogger) -> dict:
"""
Returns the parameters of an existing run.
:param load_id: The ID of an existing run whose parameters you want to access.
:param logger: A HasuraLogger object associated with the database where the run is stored.
"""
return logger.execute(
gql(
"""
query GetParameters($id: Int!) {
run_by_pk(id: $id) {
metadata(path: "parameters")
}
}"""
),
variable_values=dict(id=load_id),
)["run_by_pk"]["metadata"]
def create_run(
logger: Optional[HasuraLogger] = None,
config: Optional[Union[Path, str]] = None,
charts: Optional[List[dict]] = None,
metadata: Optional[Dict] = None,
sweep_id: Optional[int] = None,
load_id: Optional[int] = None,
) -> NewParams:
"""
Creates a new run. It registers the run in the database
(using
:py:meth:`HasuraLogger.create_run <run_logger.hasura_logger.HasuraLogger.create_run>`)
and returns a ``NewParams`` object, which provides parameters from three sources:
- a config file (if provided)
- a sweep (if the run is enrolled in a sweep)
- the parameters from an existing run (if ``load_id`` is provided)
:param logger: A HasuraLogger object. If ``None``, the run is not registered in the database.
:param config: A path to a ``yaml`` config file.
:param charts: A list of charts to be added to the database, associated with this run.
:param sweep_id: The ID of the sweep in which the run is enrolled (if any).
:param load_id: The ID of an existing run whose parameters you want to access.
"""
config_params = None
sweep_params = None
load_params = None
if config is not None:
config_params = get_config_params(config)
if logger is not None:
if charts is None:
charts = []
sweep_params = logger.create_run(
metadata=metadata,
sweep_id=sweep_id,
charts=charts,
)
if load_id is not None:
load_params = get_load_params(load_id=load_id, logger=logger)
return NewParams(
config_params=config_params,
sweep_params=sweep_params,
load_params=load_params,
)
def update_params(
logger: Optional[HasuraLogger],
new_params: NewParams,
name: str,
**params,
) -> dict:
"""
This is a convenience wrapper :py:meth:`HasuraLogger.update_metadata <run_logger.hasura_logger.HasuraLogger.update_metadata>`
Updates the existing parameters of a run (``params``) with new parameters using the Hasura
`_append <https://hasura.io/blog/postgres-json-and-jsonb-type-support-on-graphql-41f586e47536/#mutations-append>`_
operator.
Parameters are updated with the following precedence:
1. Load parameters (parameters corresponding to an existing run, specified by ``load_id``) if any.
2. sweep parameters (parameters issued by a sweep, specified by ``sweep_id``) if any.
3. config parameters (parameters specified in a config file, specified by ``config``) if any.
That is, sweep parameters will overwrite config parameters and load parameters will overwrite sweep parameters.
Note that this function does mutate the metadata stored in the database.
:param logger: A HasuraLogger object associated with the database containing the run whose parameters need to be updated.
:param new_params: The new parameters.
:param name: A name to be given to the run.
:param params: Existing parameters (e.g. command line defaults).
:return: Updated parameters.
"""
for p in astuple(new_params):
if p is not None:
params.update(p)
if logger is not None:
logger.update_metadata(dict(parameters=params, run_id=logger.run_id, name=name))
return params
def initialize(
graphql_endpoint: Optional[str] = None,
config: Optional[Union[Path, str]] = None,
charts: Optional[List[dict]] = None,
metadata: Optional[Dict] = None,
name: Optional[str] = None,
sweep_id: Optional[int] = None,
load_id: Optional[int] = None,
**params,
) -> Tuple[dict, Optional[HasuraLogger]]:
"""
The main function to initialize a run.
It creates a new run and returns the parameters and a HasuraLogger object, which
is a handle for accessing the database.
:param graphql_endpoint: The endpoint of the Hasura GraphQL API, e.g. ``https://server.university.edu:1200/v1/graphql``. If this value is ``None``, the run will not be logged in the database.
:param config: An optional path to a ``yaml`` config file file containing parameters. See the section on :ref:`Config files` for more details.
:param charts: A list of `Vega <https://vega.github.io/>`_ or `Vega-Lite <https://vega.github.io/vega-lite/>`_ graphical specifications, to be displayed by `run-visualizer <https://github.com/run-tracker/run-visualizer>`_.
:param metadata: Any JSON-serializable object to be stored in the database.
:param name: An optional name to be given to the run.
:param sweep_id: An optional sweep ID, to enroll this run in a sweep.
:param load_id: An optional run ID, to load parameters from an existing run.
:param params: Existing (usually default) parameters provided for the run (and updated by :py:func:`update_params <run_logger.main.update_params>`).
:return: A tuple of parameters and a HasuraLogger object.
"""
logger = HasuraLogger(graphql_endpoint)
new_params = create_run(
logger=logger,
config=config,
charts=charts,
metadata=metadata,
sweep_id=sweep_id,
load_id=load_id,
)
params = update_params(
logger=logger,
new_params=new_params,
name=name,
**params,
)
return params, logger | /run_logger-0.1.8-py3-none-any.whl/run_logger/main.py | 0.944715 | 0.328637 | main.py | pypi |
import time
from dataclasses import dataclass
from itertools import cycle, islice
from pathlib import Path
from typing import List, Optional
import numpy as np
from gql import Client as GQLClient
from gql import gql
from gql.transport.requests import RequestsHTTPTransport
from run_logger.logger import Logger
from run_logger.params import param_generator, param_sampler
def jsonify(value):
"""
Convert a value to a JSON-compatible type.
In addition to standard JSON types, handles
- ``pathlib.Path`` (converts to ``str``)
- ``np.nan`` (converts to ``null``)
- ``np.ndarray`` (converts to ``list``)
:param value: a ``str``, ``Path``, ``np.ndarray``, ``dict``, or ``Iterable``.
:return: value converted to JSON-serializable object
"""
if isinstance(value, str):
return value
elif isinstance(value, Path):
return str(value)
elif np.isscalar(value):
if np.isnan(value):
return None
try:
return value.item()
except AttributeError:
return value
elif isinstance(value, np.ndarray):
return jsonify(value.tolist())
elif isinstance(value, dict):
return {jsonify(k): jsonify(v) for k, v in value.items()}
else:
try:
return [jsonify(v) for v in value]
except TypeError:
return value
@dataclass
class Client:
graphql_endpoint: str
def __post_init__(self):
transport = RequestsHTTPTransport(url=self.graphql_endpoint)
self.client = GQLClient(transport=transport)
def execute(self, query: str, variable_values: dict):
sleep_time = 1
while True:
try:
return self.client.execute(
query,
variable_values=jsonify(variable_values),
)
except Exception as e:
print(e)
breakpoint()
time.sleep(sleep_time)
sleep_time *= 2
@dataclass
class HasuraLogger(Logger):
"""
HasuraLogger is the main logger class for this library.
:param graphql_entrypoint:
The endpoint of the Hasura GraphQL API, e.g. ``https://server.university.edu:1200/v1/graphql``.
:param seed:
The seed for the random number generator. Used for selecting random parameters
in conjunction with sweeps. See `sweep-logger <https://github.com/run-tracker/sweep-logger>`_ for details about
creating sweeps.
:param debounce_time:
If your application expects to perform many log operations in rapid succession, debouncing
collects the log data over the course of this time interval to perform a single large API call,
instead of several small ones which might jam the server.
"""
graphql_endpoint: str
seed: int = 0
_run_id: Optional[int] = None
debounce_time: int = 0
insert_new_run_mutation = gql(
"""
mutation insert_new_run($metadata: jsonb = {}, $charts: [chart_insert_input!] = []) {
insert_run_one(object: {charts: {data: $charts}, metadata: $metadata}) {
id
}
}
"""
)
add_run_to_sweep_mutation = gql(
"""
mutation add_run_to_sweep($metadata: jsonb = {}, $sweep_id: Int!, $charts: [chart_insert_input!] = []) {
insert_run_one(object: {charts: {data: $charts}, metadata: $metadata, sweep_id: $sweep_id}) {
id
sweep {
parameter_choices {
Key
choice
}
}
}
update_sweep(where: {id: {_eq: $sweep_id}}, _inc: {grid_index: 1}) {
returning {
grid_index
}
}
}
"""
)
update_metadata_mutation = gql(
"""
mutation update_metadata($metadata: jsonb!, $run_id: Int!) {
update_run(
where: {id: {_eq: $run_id}}
_append: {metadata: $metadata}
) {
affected_rows
}
}
"""
)
insert_run_logs_mutation = gql(
"""
mutation insert_run_logs($objects: [run_log_insert_input!]!) {
insert_run_log(objects: $objects) {
affected_rows
}
}
"""
)
insert_run_blobs_mutation = gql(
"""
mutation insert_run_blobs($objects: [run_blob_insert_input!]!) {
insert_run_blob(objects: $objects) {
affected_rows
}
}
"""
)
def __post_init__(self):
self.random = np.random.default_rng(seed=self.seed)
assert self.graphql_endpoint is not None
self.client = Client(graphql_endpoint=self.graphql_endpoint)
self._log_buffer = []
self._blob_buffer = []
self._last_log_time = None
self._last_blob_time = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
@property
def run_id(self):
return self._run_id
def create_run(
self,
metadata: Optional[dict],
charts: Optional[List[dict]] = None,
sweep_id: Optional[int] = None,
) -> Optional[dict]:
"""
Creates a new run in the Hasura database.
:param metadata: Any useful data about the run being created, e.g. git commit, parameters used, etc. ``run-logger`` makes no assumptions about the content of ``metadata``, except that it is JSON-compatible (or convertible to JSON-compatible by :py:func:`jsonify <run_logger.hasura_logger.jsonify>`).
:param charts: A list of `Vega <https://vega.github.io/>`_ or `Vega-Lite <https://vega.github.io/vega-lite/>`_ graphical specifications, to be displayed by `run-visualizer <https://github.com/run-tracker/run-visualizer>`_.
:param sweep_id: The ID of the sweep that this run is associated with, if any. See `sweep-logger <https://github.com/run-tracker/sweep-logger>`_ for details about creating sweeps.
:return: A dictionary of new parameter values assigned by sweep, if run is associated with one
(otherwise `None`).
"""
variable_values = dict(metadata=metadata)
if charts is not None:
variable_values.update(
charts=[
dict(spec=spec, order=order) for order, spec in enumerate(charts)
]
)
if sweep_id is None:
mutation = self.insert_new_run_mutation
else:
mutation = self.add_run_to_sweep_mutation
variable_values.update(sweep_id=sweep_id)
data = self.execute(mutation, variable_values=variable_values)
insert_run_response = data["insert_run_one"]
self._run_id = insert_run_response["id"]
if sweep_id is not None:
param_choices = {
d["Key"]: d["choice"]
for d in insert_run_response["sweep"]["parameter_choices"]
}
grid_index = data["update_sweep"]["returning"][0]["grid_index"]
assert param_choices, "No parameter choices found in database"
for k, v in param_choices.items():
assert v, f"{k} is empty"
if grid_index is None:
# random search
choice = param_sampler(param_choices, self.random)
else:
# grid search
iterator = cycle(param_generator(param_choices))
choice = next(islice(iterator, grid_index, None))
return choice
def update_metadata(self, metadata: dict):
"""
This will combine given metadata with existing run metadata
using the Hasura
`_append <https://hasura.io/blog/postgres-json-and-jsonb-type-support-on-graphql-41f586e47536/#mutations-append>`_
operator.
You must call :meth:`HasuraLogger.create_run` before calling this method.
"""
assert self.run_id is not None, "add_metadata called before create_run"
self.execute(
self.update_metadata_mutation,
variable_values=dict(
metadata=metadata,
run_id=self.run_id,
),
)
def log(self, **log):
"""
Create a new log object to be added to the `logs` database table.
This populates the data that `run-visualizer <https://github.com/run-tracker/run-visualizer>`_
will pass to Vega charts specs.
Specifically, run-visualizer will insert the array of logs into ``data: values: [...]``.
You must call :meth:`HasuraLogger.create_run` before calling this method.
"""
assert self.run_id is not None, "log called before create_run"
self._log_buffer.append(dict(log=log, run_id=self.run_id))
if (
self._last_log_time is None
or time.time() - self._last_log_time > self.debounce_time
):
self.execute(
self.insert_run_logs_mutation,
variable_values=dict(objects=self._log_buffer),
)
self._last_log_time = time.time()
self._log_buffer = []
def blob(self, blob: str, metadata: dict):
"""
Store a blob object in database. "Blobs" typically store large objects
such as images. `Run-visualizer <https://github.com/run-tracker/run-visualizer>`_
does not pull blobs from the Hasura database and they will not congest the
visualizer web interface.
You must call :py:func:`create_run <run_logger.hasura_logger.create_run>` before calling this method.
:param blob: This is expected to be a `bytea <https://www.postgresql.org/docs/current/datatype-binary.html#:~:text=The%20bytea%20type%20supports%20two,bytea_output%3B%20the%20default%20is%20hex.>`_ datatype.
:param metadata: any JSON-compatible metadata to be stored with blob.
"""
assert self.run_id is not None, "blob called before create_run"
self._blob_buffer.append(dict(blob=blob, metadata=metadata, run_id=self.run_id))
if (
self._last_blob_time is None
or time.time() - self._last_blob_time > self.debounce_time
):
self.execute(
self.insert_run_blobs_mutation,
variable_values=dict(objects=self._blob_buffer),
)
self._last_blob_time = time.time()
self._blob_buffer = []
def execute(self, *args, **kwargs):
return self.client.execute(*args, **kwargs) | /run_logger-0.1.8-py3-none-any.whl/run_logger/hasura_logger.py | 0.864611 | 0.286482 | hasura_logger.py | pypi |
import re
import logging
import yaml
import marathon.cached as cached
log = logging.getLogger(__name__)
VAR_REGEX = re.compile("\${(.*?)}")
def get_marathon_config():
if not cached.marathon_config:
with open("run.yaml", "r") as f:
cached.marathon_config = yaml.safe_load(f)
return cached.marathon_config
def init_marathon_config():
return {
"project": "your_project",
"region": "your_default_region",
"allow-invoke": [
"user:your_user@domain.com"
],
"service1": {
"image": "gcr.io/${project}/service1:latest",
"dir": "apps/service1",
"authenticated": "false",
"concurrency": "30",
"links": [
"service2"
],
},
"service2": {
"image": "gcr.io/${project}/service2:latest",
"dir": "apps/service2",
"links": [
"service3"
],
},
"service3": {
"image": "gcr.io/${project}/service3:latest",
"dir": "apps/service3",
"cron": {
"schedule": "0 * * * *",
"http-method": "get",
},
},
}
def interpolate_var(string):
interpolated_string = str(string)
for match in set(re.findall(VAR_REGEX, str(string))):
try:
match_interpolation = get_marathon_config()[match]
interpolated_string = string.replace("${{{}}}".format(match), match_interpolation)
except KeyError:
log.error(f"Failed to interpolate {string} in run.yaml")
return interpolated_string
def service_iter():
for service in get_marathon_config().keys():
if service != "project" and service != "region" and service != "allow-invoke":
yield service
def service_dependencies():
deps_map = {}
nodeps_list = []
conf = get_marathon_config()
for service in service_iter():
if "links" in conf[service] and len(conf[service]["links"]) > 0:
deps_map[service] = set(conf[service]["links"])
else:
nodeps_list.append(service)
return deps_map, nodeps_list
def sanitize_service_name(service):
return service.lower().replace("_", "-") | /run_marathon-0.3.2-py3-none-any.whl/marathon/utils.py | 0.400046 | 0.17441 | utils.py | pypi |
def run_regressors(df, target_column):
'''
df: Data (dataFrame)
target_column: Target variable/column name (str)
1. All regression models are ranked by RMSE (Root Mean Squared Error)
2. Categorical variables are dummy encoded for regression models except for Catboost and LightGBM.
'''
import warnings
warnings.filterwarnings("ignore")
from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, GradientBoostingRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.svm import SVR
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from lightgbm import LGBMRegressor
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
import numpy as np
models = [DecisionTreeRegressor(), KNeighborsRegressor(), RandomForestRegressor(n_estimators=100), AdaBoostRegressor(), GradientBoostingRegressor(), XGBRegressor(objective='reg:squarederror')]
df = df.dropna()
#cat_cols = df.select_dtypes(include='object').columns
#num_cols = df.select_dtypes(exclude='object').columns
# Decison Trees, Random Forest, SVM, KNN, Gradient Boosting, XGBoost
X = df.drop(target_column, axis=1)
y = df[target_column]
X_dummy = pd.get_dummies(X)
X_train, X_test, y_train, y_test = train_test_split(X_dummy, y, test_size=0.3, random_state=42)
result={}
for model in models:
model.fit(X_train, y_train)
rmse = np.sqrt(mean_squared_error(y_test, model.predict(X_test))).astype(int)
result[model.__class__.__name__]=rmse
# Catboost:
X_train_c, X_test_c, y_train_c, y_test_c = train_test_split(X, y, test_size=0.3, random_state=42)
cat_cols = X.select_dtypes(include='object').columns
cat_indices = [X.columns.get_loc(col) for col in cat_cols]
cb = CatBoostRegressor(cat_features=cat_indices)
cb.fit(X_train_c,y_train_c, eval_set=(X_test_c, y_test_c),early_stopping_rounds=5, use_best_model=True, verbose=0 )
result[cb.__class__.__name__] = np.sqrt(mean_squared_error(y_test_c, cb.predict(X_test_c))).astype(int)
# Light GBM:
lb = LabelEncoder()
for col in cat_cols:
X[col] = lb.fit_transform(X[col])
lgb = LGBMRegressor()
X_train_l, X_test_l, y_train_l, y_test_l = train_test_split(X, y, test_size=0.3, random_state=42)
lgb.fit(X_train_l, y_train_l, categorical_feature=cat_cols.tolist())
result[lgb.__class__.__name__] = np.sqrt(mean_squared_error(y_test_l, lgb.predict(X_test_l))).astype(int)
b = pd.DataFrame.from_dict(result,orient='index').reset_index().rename(columns={'index':'Model',0:'RMSE'}).sort_values(by='RMSE', ascending=False)
fig, ax = plt.subplots(figsize=(10,6))
ax.barh(b.Model, b.RMSE, color='red')
plt.title("Baseline Model Performance")
for i, v in enumerate(b.RMSE):
ax.text(v/2, i, 'RMSE='+str(v), color='white', va='center', fontweight='bold')
return fig
def run_classifiers(df, target_column):
'''
df: Data (dataFrame)
target_column: Target variable/column name
1. All classification models are ranked by AUC
2. Categorical variables are dummy encoded for classification models except for Catboost and LightGBM.
'''
import warnings
warnings.filterwarnings("ignore")
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from catboost import CatBoostClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score
import numpy as np
models = [DecisionTreeClassifier(), KNeighborsClassifier(), SVC(gamma='auto', probability=True), RandomForestClassifier(n_estimators=100), AdaBoostClassifier(), GradientBoostingClassifier(), XGBClassifier()]
df = df.dropna()
#cat_cols = df.select_dtypes(include='object').columns
#num_cols = df.select_dtypes(exclude='object').columns
# Decison Trees, Random Forest, SVM, KNN, Gradient Boosting, XGBoost
X = df.drop(target_column, axis=1)
y = df[target_column]
X_dummy = pd.get_dummies(X)
X_train, X_test, y_train, y_test = train_test_split(X_dummy, y, test_size=0.3, random_state=42)
result={}
for model in models:
model.fit(X_train, y_train)
auc = np.round(roc_auc_score(y_test, model.predict_proba(X_test)[:,1]),2)
result[model.__class__.__name__]=auc
# Catboost:
X_train_c, X_test_c, y_train_c, y_test_c = train_test_split(X, y, test_size=0.3, random_state=42)
cat_cols = X.select_dtypes(include='object').columns
cat_indices = [X.columns.get_loc(col) for col in cat_cols]
cb = CatBoostClassifier(cat_features=cat_indices)
cb.fit(X_train_c,y_train_c, eval_set=(X_test_c, y_test_c),early_stopping_rounds=10, use_best_model=True, verbose=0 )
result[cb.__class__.__name__] = np.round(roc_auc_score(y_test_c, cb.predict_proba(X_test_c)[:,1]),2)
# Light GBM:
lb = LabelEncoder()
for col in cat_cols:
X[col] = lb.fit_transform(X[col])
lgb = LGBMClassifier()
X_train_l, X_test_l, y_train_l, y_test_l = train_test_split(X, y, test_size=0.3, random_state=42)
lgb.fit(X_train_l, y_train_l, categorical_feature=cat_cols.tolist())
result[lgb.__class__.__name__] = np.round(roc_auc_score(y_test_l, lgb.predict_proba(X_test_l)[:,1]),2)
b = pd.DataFrame.from_dict(result,orient='index').reset_index().rename(columns={'index':'Model',0:'AUC'}).sort_values(by='AUC', ascending=True)
fig, ax = plt.subplots(figsize=(10,6))
ax.barh(b.Model, b.AUC)
plt.title("Model Baseline Performance")
for i, v in enumerate(b.AUC):
ax.text(v/2, i, 'AUC='+str(v), color='white', va='center', fontweight='bold')
return fig | /run_models-0.0.4-py3-none-any.whl/run_models/__init__.py | 0.552298 | 0.696275 | __init__.py | pypi |
import configparser
import contextlib
import enum
import functools
import logging
import os
import random
import socket
import sys
import time
from os import path
from typing import Dict, Sequence, Optional, Union, KeysView
import grpc
from google.protobuf import duration_pb2
from grpc._channel import _InactiveRpcError
try:
import importlib.metadata as importlib_metadata
except ModuleNotFoundError:
import importlib_metadata
__version__ = importlib_metadata.version(__name__)
__license__ = "MIT"
rpc_timeout = 10
class DistributedIterator:
def __init__(
self,
keys: Union[Sequence[Union[str, int]], KeysView, Dict],
timeout: Union[int, float] = 0,
shuffle=False,
version=0,
chunksize=10,
random_seed=None,
):
"""Enables distributed iteration of unique string keys over network.
Each key is iterated only once across all processes and machines.
Progress is disk-backed and persistent between restarts. The intended
purpose is to avoid computing the same thing twice in distributed
pipeline jobs in a fault tolerant way.
Args:
keys: A list of unique string or integer keys to distribute. For
now, we only support those two types. If you need to iterate over
objects, consider serializing (not recommended) or passing URLs or
filenames instead.
timeout: Seconds to expiration. `notify_success(key)` must be called
before the lock expires.
shuffle: If False, the keys are guaranteed to be "queued" in the
given order.
version: An integer suffix appended to the key.
chunksize: The number of keys to query at a time. Useful in low
latency scenarios.
random_seed: A random seed used for shuffling the keys.
"""
assert keys and isinstance(keys, (KeysView, Dict, Sequence, range))
self.keys = list(keys)
if isinstance(self.keys[0], int):
self.keys = [str(item) for item in self.keys]
elif isinstance(self.keys[0], str):
self.keys = list(self.keys)
else:
ValueError(f"Unrecognized key type: {type(self.keys[0])}")
self.shuffle = shuffle
self.version = version
self.chunksize = chunksize
self.expiration_seconds = timeout
self.default_rpc_timeout = 30
self.random_seed = random_seed
self.random = random.Random(self.random_seed)
def __iter__(self):
self.current_index = 0
if self.shuffle:
random.shuffle(self.keys)
return self
def _acquire_next_available(self) -> Optional[str]:
"""Tries to acquire an exclusive lock and returns the corresponding
string key.
Other workers will not be able to acquire the same lock until it is
released or expired.
Side effects:
Modifies `self.current_index` and sends a write request to the
key-value lock server.
Returns:
A unique string key that corresponds to the acquired lock.
None if there is no acquirable lock.
"""
while True:
client = lock_service_client()
begin = self.current_index
end = self.current_index + self.chunksize
request = distlock_pb2.AcquireManyRequest(
requests=[
distlock_pb2.AcquireLockRequest(
lock=make_lock(
key=make_versioned_key(key, version=self.version),
expiration_seconds=self.expiration_seconds,
owner_name=worker_name(),
force_ascii=True,
)
)
for key in self.keys[begin:end]
],
max_acquired_locks=1,
)
responses: distlock_pb2.AcquireManyResponse = client.AcquireMany(
request, timeout=self.default_rpc_timeout
).responses
if len(responses) == 0:
return None
self.current_index += len(responses)
if responses[-1].HasField("acquired_lock"):
ret = responses[-1].acquired_lock.global_id
assert ret
return ret
# If no lock is acquired within [begin, end), it will try the next
# range.
def __next__(self) -> str:
next_key = _with_retry(
lambda: self._acquire_next_available(),
max_retry_count=20,
sleep_seconds=5,
)
if next_key is None:
raise StopIteration
return next_key
def distributed_task(key, timeout, version=0, suppress_exception=True):
"""A decorator factory for functions that run only once across all processes
and machines.
Can be used to implement a single iteration of DistributedIterator.
Args:
key: A unique string id representing the wrapped function.
timeout: Seconds to key expiration. The wrapped function should exit
without raising an exception within this time limit.
version: An integer suffix appended to the key.
suppress_exception: If True, exceptions will be logged and cause the
wrapped function to return None. The lock will be released.
Returns:
A decorator that invokes the wrapped function, which is prevented from
running again after a successful run.
"""
versioned_key = make_versioned_key(key=key, version=version)
def decorator_distributed_task(func):
@functools.wraps(func)
def wrapper_distributed_task(*args, **kwargs):
def try_lock_callable():
return try_lock(
lock=make_lock(
key=versioned_key,
expiration_seconds=timeout,
owner_name=worker_name(),
force_ascii=True,
),
force=False,
)
acquired_lock, existing_lock = _with_retry(
try_lock_callable,
max_retry_count=20,
sleep_seconds=5,
)
if acquired_lock is None:
# Sanity check. One of acquired or existing locks should exist.
assert existing_lock is not None
expires_in = existing_lock.expires_in
# If the existing lock is a permanent lock, assume it was
# confirmed successful.
if expires_in.seconds == 0 and expires_in.nanos == 0:
ret_status = Status.SKIP_OK
else:
ret_status = Status.SKIP_IN_PROGRESS
return ret_status, None
else:
has_lock = False
ret = None
try:
ret = func(*args, **kwargs)
has_lock = True
ret_status = Status.COMPUTE_OK
except Exception as ex:
ret_status = Status.COMPUTE_ERROR
if suppress_exception:
log.exception(
"Exception in function wrapped by "
f"@distributed_task. Key: {versioned_key}"
)
else:
release_lock_async(versioned_key)
raise ex
if has_lock:
lock, _ = try_lock(
make_lock(versioned_key, expiration_seconds=0),
force=True,
)
assert lock is not None
else:
release_lock_async(versioned_key)
return ret_status, ret
return wrapper_distributed_task
return decorator_distributed_task
class Status(enum.Enum):
COMPUTE_OK = 1
COMPUTE_ERROR = 2
SKIP_OK = 3
SKIP_IN_PROGRESS = 4
def notify_success(key, assert_unique=True):
"""Prevents `key` from being processed by anyone until manually released.
This function is intended to be called after task completion.
Args:
key: A string key to permanently lock.
"""
acquired_lock, existing_lock = try_lock(
make_lock(key, expiration_seconds=0), force=True
)
assert acquired_lock is not None
assert existing_lock is not None
if assert_unique:
expires_in = existing_lock.expires_in
if expires_in.seconds == 0 and expires_in.nanos == 0:
raise ValueError("Completed more than once: {}".format(key))
def notify_failure(key):
"""Releases a lock, removing the key from the database.
Does nothing if the key does not exist.
Args:
key: A string key to unlock.
"""
release_lock_async(key)
def make_versioned_key(key: str, version: int) -> str:
"""Makes a new string key by appending a version number. The intended
purpose is to easily create a new set of keys for when they need to be
iterated again from scratch.
Args:
key: A string key.
version: An integer suffix appended to the key. Consider the tuple
(key, version) to be the actual key stored in the database.
Returns:
A new string key.
"""
assert isinstance(key, str)
assert isinstance(version, int)
if version is None or version == 0:
return key
return f"{key}_v{version:03d}"
def make_duration(seconds: Union[int, float]):
"""Instantiates a Duration protobuf object.
Args:
seconds: An integer or floating point value to turn into a Duration,
in seconds.
Returns:
A Duration protobuf object.
"""
return duration_pb2.Duration(
seconds=int(seconds), nanos=int((seconds - int(seconds)) * 1e9)
)
def make_lock(
key: str,
expiration_seconds: Union[int, float] = 0,
owner_name: Optional[str] = None,
force_ascii=True,
):
"""Instantiates a Lock protobuf object.
expiration_seconds=0 means no expiration.
Args:
key: A string key to query or store in the database.
Usually a versioned key. See `make_versioned_key`.
expiration_seconds: The lock will be acquirable again after this amount
of time if not refreshed.
owner_name: Mostly intended for debugging, e.g. to check which worker
crashed and failed to release the lock. Currently does not affect
functionality.
force_ascii: Raises an AssertionError if True and `key` is not
ascii-encodable.
Returns:
A Lock protobuf object.
"""
if force_ascii:
assert is_ascii(key), key
return distlock_pb2.Lock(
global_id=key,
expires_in=make_duration(expiration_seconds),
last_owner_name=owner_name,
)
def search_keys_by_prefix(key_prefix: str, is_expired=None) -> Sequence[str]:
"""Scans the database to find a list of keys that start with `key_prefix`.
Does not include released keys, since they were removed from the database.
Args:
key_prefix: A string prefix to match.
is_expired: An optional boolean specifying whether we want to list
expired or unexpired keys.
Returns:
A list of matching string keys.
"""
client = lock_service_client()
# `end_key` is exclusive. The suffix makes sure it comes after other keys.
end_key = key_prefix + "\U0010fffe" # 244, 143, 191, 191
start_key = key_prefix
ret = []
if is_expired is None:
includes = None
else:
includes = [
distlock_pb2.LockMatchExpression(
global_id_regex=r".*", is_expired=is_expired
)
]
# Pagination.
while True:
request = distlock_pb2.ListLocksRequest(
start_key=start_key,
end_key=end_key,
includes=includes,
)
response: distlock_pb2.ListLocksResponse = client.ListLocks(
request, timeout=rpc_timeout
)
if len(response.locks) == 0:
break
elif len(response.locks) == 1:
ret.append(response.locks[0].global_id)
break
else:
start_key = response.locks[-1].global_id
ret.extend([item.global_id for item in response.locks[:-1]])
return ret
def count_keys_by_prefix(key_prefix: str) -> Dict[str, int]:
"""Scans the database to count keys that start with `key_prefix`.
Does not include released keys, since they were removed from the database.
Args:
key_prefix: A string prefix to match.
Returns:
A dict:
expired -> Number of expired keys.
in_progress -> Number of keys that may expire eventually.
success -> Number of keys that do not expire.
They are mutually exclusive.
"""
client = lock_service_client()
# `end_key` is exclusive. The suffix makes sure it comes after other keys.
end_key = key_prefix + "\U0010fffe" # 244, 143, 191, 191
start_key = key_prefix
request = distlock_pb2.CountLocksRequest(
start_key=start_key,
end_key=end_key,
)
response: distlock_pb2.ListLocksResponse = client.CountLocks(
request, timeout=rpc_timeout
)
ret = {
"expired": response.expired,
"in_progress": response.unexpired,
"success": response.no_expiration,
}
return ret
class GlogFormatter(logging.Formatter):
# Based on https://github.com/benley/python-glog/blob/master/glog.py
LEVEL_MAP = {
logging.FATAL: "F",
logging.ERROR: "E",
logging.WARN: "W",
logging.INFO: "I",
logging.DEBUG: "D",
}
def __init__(self):
logging.Formatter.__init__(self)
def format(self, record):
level = GlogFormatter.LEVEL_MAP.get(record.levelno, "?")
date = time.localtime(record.created)
date_usec = (record.created - int(record.created)) * 1e6
record_message = "%c%04d%02d%02d %02d:%02d:%02d.%06d %s %s:%d] %s" % (
level,
date.tm_year,
date.tm_mon,
date.tm_mday,
date.tm_hour,
date.tm_min,
date.tm_sec,
date_usec,
record.process if record.process is not None else "?????",
record.filename,
record.lineno,
self._format_message(record),
)
record.getMessage = lambda: record_message
return logging.Formatter.format(self, record)
def _format_message(self, record):
try:
record_message = "%s" % (record.msg % record.args)
except TypeError:
record_message = record.msg
return record_message
def set_logger_level(level: Union[int, str], logger: logging.Logger = None):
global log
if logger is None:
logger = log
for handler in logger.handlers:
handler.setLevel(level)
logger.setLevel(level)
def make_logger(name: str, level: Union[int, str]) -> logging.Logger:
ret = logging.getLogger(name)
ret.setLevel(logging.DEBUG)
ret.handlers.clear()
ret.propagate = False
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
handler.setFormatter(GlogFormatter())
ret.addHandler(handler)
set_logger_level(level=level, logger=ret)
return ret
log = make_logger(name="run_once", level="INFO")
@contextlib.contextmanager
def add_sys_path(p):
if p not in sys.path:
sys.path.insert(0, p)
try:
yield
finally:
if p in sys.path:
sys.path.remove(p)
with add_sys_path(path.abspath(path.join(__file__, "../generated/proto"))):
from generated.proto import distlock_pb2
from generated.proto import distlock_pb2_grpc
def worker_name():
return "{}_{:05d}_{:05d}".format(
socket.gethostname(),
os.getpid(),
os.getppid(),
)
class RetryTimeoutError(TimeoutError):
pass
def _with_retry(fn, max_retry_count=20, sleep_seconds=5):
retry_count = 0
while True:
try:
return fn()
except _InactiveRpcError as ex:
if retry_count > max_retry_count:
raise RetryTimeoutError("Max retries exceeded. {}".format(ex))
retry_count += 1
# lock_service_client() is expected to be called from `fn()`.
_prepare_client.cache_clear()
log.warning(
f"Could not connect to server. "
f"Retrying in {sleep_seconds} seconds"
)
time.sleep(sleep_seconds)
def try_lock(
lock: distlock_pb2.Lock, force=False
) -> Optional[distlock_pb2.Lock]:
client = lock_service_client()
request = distlock_pb2.AcquireLockRequest(
lock=lock,
overwrite=force,
)
response: distlock_pb2.AcquireLockResponse = client.AcquireLock(
request, timeout=rpc_timeout
)
has_acquired_lock = response.HasField("acquired_lock")
has_existing_lock = response.HasField("existing_lock")
ret_acquired = response.acquired_lock if has_acquired_lock else None
ret_existing = response.existing_lock if has_existing_lock else None
return ret_acquired, ret_existing
def force_lock_async(lock: distlock_pb2.Lock, callback=None):
client = lock_service_client()
request = distlock_pb2.AcquireLockRequest(
lock=lock,
overwrite=True,
)
future: grpc.Future = client.AcquireLock.future(request)
if callback is not None:
future.add_done_callback(callback)
assert isinstance(future, grpc.Future)
return future
def release_lock_async(key: str, callback=None):
assert isinstance(key, str), key
client = lock_service_client()
request = distlock_pb2.ReleaseLockRequest(
lock=make_lock(key=key),
return_released_lock=False,
)
future: grpc.Future = client.ReleaseLock.future(request)
if callback is not None:
future.add_done_callback(callback)
assert isinstance(future, grpc.Future)
return future
def is_ascii(s):
try:
s.encode("ascii")
except UnicodeEncodeError:
return False
else:
return True
def at_most_every(_func=None, *, seconds, key):
def decorator_at_most_every(func):
@functools.wraps(func)
def wrapper_at_most_every(*args, **kwargs):
acquired_lock, _ = try_lock(
lock=distlock_pb2.Lock(
global_id=key,
expires_in=seconds,
last_owner_name=worker_name(),
)
)
if acquired_lock is not None:
ret = _with_retry(
lambda: func(*args, **kwargs),
max_retry_count=20,
sleep_seconds=5,
)
return ret
else:
return
return wrapper_at_most_every
if _func is None:
return decorator_at_most_every
else:
return decorator_at_most_every(_func)
def lock_service_client(address: str = None, port: int = None):
config = configparser.ConfigParser(
defaults={"address": "127.0.0.1", "port": "22113"}
)
config.read(path.expanduser("~/.run_once.ini"))
if address is None:
address = config["DEFAULT"]["address"]
if port is None:
port = int(config["DEFAULT"]["port"])
assert isinstance(address, str), address
assert isinstance(port, int), port
ret = _prepare_client(address=address, port=port)
return ret
@functools.lru_cache(1)
def _prepare_client(address: str, port: int):
hostname = f"{address}:{port}"
channel = grpc.insecure_channel(
hostname,
options=(
("grpc.keepalive_time_ms", 10000),
("grpc.keepalive_timeout_ms", 5000),
("grpc.keepalive_permit_without_calls", True),
("grpc.http2.bdp_probe", True),
),
)
client = distlock_pb2_grpc.LockManagerServiceStub(channel)
return client
def _run_server():
"""The installer will generate a script that runs this function.
Returns:
Return code of the executable.
"""
executable = path.abspath(
path.join(__file__, "../cmake-build-release/distlock")
)
if not path.isfile(executable):
raise FileNotFoundError(
"Precompiled binary not found: {}".format(executable)
)
import subprocess
import shlex
args = " ".join(map(shlex.quote, sys.argv[1:]))
command = f"{executable} {args}"
log.info(f"Running command: {command}")
return subprocess.Popen(command, shell=True).wait() | /run_once-0.4.2.tar.gz/run_once-0.4.2/run_once.py | 0.731059 | 0.179836 | run_once.py | pypi |
import ast
from collections import defaultdict
import csv
from datetime import datetime, timezone
from itertools import tee
from pathlib import Path
from typing import Callable, Iterable, TypeVar
import uuid
import pandas as pd
from run_one.util.config import Config
_T = TypeVar('_T')
uid = str(uuid.uuid1())
counter = 0
class Action:
def __init__(self, row: dict, extra_data: dict) -> None:
self.row = row
self.extra_data = extra_data
def __repr__(self) -> str:
return f'row={self.row}\nextra_data={self.extra_data}'
def regenerate_time_fields(self, time_fields: list[str], time_function: Callable):
time_mapping = {}
for field in time_fields:
if field in self.row and self.row[field] != '*':
self.row[field] = time_mapping.setdefault(self.row[field], time_function())
def generate_random_id(value: str = ''):
global counter
counter += 1
if not value:
return str(uuid.uuid1())
return f'{uid[:len(value) - len(str(counter))]}{counter}'
def generate_time(time_format: str = '%Y-%m-%dT%H:%M:%S.%fZ'):
return datetime.now(timezone.utc).strftime(time_format)
def read_csv_matrix(filepath: str,
config: Config,
id_function: Callable = generate_random_id) -> dict[str, dict[str, list[Action]]]:
"""
Read matrix file(s)
:param str filepath: path to matrix file or directory with matrices
:param config: configuration class instance
:param id_function: function to transform ID-like fields
:return: matrices_data: collection of parsed matrices data: matrix file name to test cases data
(test case name to list of its actions)
"""
path = Path(filepath)
if path.is_dir():
matrices = sorted(path.glob('*.csv'))
elif path.is_file():
matrices = [path]
else:
raise FileNotFoundError('No matrices were found at specified path. Check `matrix_file` config parameter.')
result = {}
for matrix in matrices:
file = pd.read_csv(matrix, dtype=str)
data = defaultdict(list)
test_case_name = ''
test_case_transformed_ids = {}
id_mapping = {}
for index, row in file.iterrows():
seq = row.get('Seq')
if seq == 'TEST_CASE_START':
test_case_name = row.get('CaseName')
continue
elif seq == 'TEST_CASE_END':
test_case_transformed_ids.clear()
continue
action = row.get('Action')
if action in config.processed_actions:
row.dropna(inplace=True)
extracted_fields = row.get([field for field in config.fields_to_extract if field in row], [])
row.drop(config.fields_to_drop + config.fields_to_extract, errors='ignore', inplace=True)
row.rename(config.field_mapping, inplace=True)
for field in config.nested_fields:
if field in row and row[field] != '*':
row[field] = ast.literal_eval(row[field])
if id_function is not None:
for field in config.regenerate_id_fields:
if field in row and row[field] != '*':
field_value = row[field]
if field_value.startswith('!='):
key = field_value[2:]
row[field] = f'!={test_case_transformed_ids.setdefault(key, id_function(key))}'
else:
row[field] = test_case_transformed_ids.setdefault(field_value, id_function(field_value))
data[test_case_name].append(Action(row.to_dict(), extracted_fields.to_dict()))
id_mapping.update(test_case_transformed_ids)
matrix_name = matrix.stem
result[matrix_name] = data
with open(f'id_mapping_{matrix_name}.csv', 'w', newline='') as id_mapping_file:
csv_writer = csv.DictWriter(id_mapping_file, fieldnames=['old', 'new'])
csv_writer.writeheader()
csv_writer.writerows({'old': k, 'new': v} for k, v in id_mapping.items())
return result
def pairwise(iterable: Iterable[_T]) -> Iterable[tuple[_T]]:
"""
Return successive overlapping pairs taken from the input iterable.
pairwise('ABCDEFG') --> AB BC CD DE EF FG
"""
a, b = tee(iterable)
next(b, None)
return zip(a, b) | /run_one-1.0.17.tar.gz/run_one-1.0.17/run_one/util/util.py | 0.782953 | 0.302881 | util.py | pypi |
from builtins import range
import numpy as np
import pandas as pd
from sklearn import preprocessing
class Encode(object):
"""
Encode all columns where the values are categorical.
Parameters
----------
strategy: string, optional (default='oneHotEncoder')
available options: 'oneHotEncoder' and 'LabelEncoder'
Values of each column will be encoded based on the choosen strategy.
"""
def __init__(self, strategy= 'OneHotEncoder',
dataframe= pd.DataFrame()):
self.strategy = strategy
self.dataframe = dataframe
def find_columns(df):
"""
Find the columns for encoding
Parameters
----------
df: pandas dataframe
input dataframe
Returns
-------
A list of column names
"""
return encode_columns
def transform(self, df):
"""
Encode columns that are in the encode_columns attribute of the previous find_columns method
Parameters
----------
df: pandas dataframe
input dataframe
Returns
-------
transformed dataframe
"""
df_object = list(df.select_dtypes(include=['object']))
df = df.dropna(how = 'any', subset = df_object)
if self.strategy == 'OneHotEncoder':
categorical_data = df.select_dtypes(include=['object'])
categorical_columns = list(categorical_data)
df = pd.get_dummies(df, columns = categorical_columns)
return df
elif self.strategy == 'LabelEncoder':
categorical_data = df.select_dtypes(include=['object'])
le = preprocessing.LabelEncoder()
x_encoded = categorical_data.apply(le.fit_transform)
encoded_data = pd.DataFrame(x_encoded, columns=categorical_data.columns)
df.update(encoded_data) #updates the original DataFrame with updated column values
return df
def inverse_transform(self, df_dummies):
if self.strategy == 'OneHotEncoder':
# Original Dataframe
df = self.dataframe.copy()
# Numerical Columns
numerical_columns = list(df.select_dtypes(include=['float64', 'int64']))
# Categorical Columns List (Original)
categorical_columns_org = list(df.select_dtypes(include=['object']))
# Categorical Columns List (Dummies)
categorical_columns_dum = list(df_dummies)
# Create Dictionary with keys ad original columns names and values as dummies columns names
column_groups = {}
for column_org in categorical_columns_org:
temp =[]
for column_dum in categorical_columns_dum:
if column_org+'_' in column_dum:
temp.append(column_dum)
column_groups[column_org] = temp
# Create new dataframe
df_new = pd.DataFrame()
for column_org in categorical_columns_org:
df_temp = df_dummies[column_groups[column_org]]
x = df_temp.stack()
x= pd.Series(pd.Categorical(x[x!=0].index.get_level_values(1))).tolist()
df_new[column_org] = x
for columns in numerical_columns:
df_new[columns] = df[columns]
df_new = df_new.reset_index(drop= True)
return df_new | /run_regression-0.7.tar.gz/run_regression-0.7/run_regression/data_preprocessing/encode_categorical_data.py | 0.788543 | 0.558026 | encode_categorical_data.py | pypi |
from builtins import range
import numpy as np
import pandas as pd
class Outliers(object):
"""
remove all rows where the values of a certain column are within an specified
standard deviation from mean/median.
Parameters
----------
m: float, optional (default=3.0)
the outlier threshold with respect to the standard deviation
strategy: string, optional (default='median')
available options: 'mean' and 'median'
Values of each column will be compared to the 'mean' or 'median' of that column.
Attributes
----------
removed_rows_: numpy array of indices that have been removed
Notes
-----
We highly recommend you to remove constant columns first and then remove outliers.
"""
def __init__(self, m=2.0, strategy='median'):
self.m = m
self.strategy = strategy
def fit_transform(self, df):
"""
fit the method to the input dataframe and change it accordingly
Parameters
----------
df: pandas dataframe
input dataframe
Returns
-------
transformed dataframe
"""
# Separating"Numeric" and "Object" columns
df_object = df.select_dtypes(include=['object'])
df = df.select_dtypes(include=['float64', 'int64'])
if self.strategy == 'mean':
mask = ((df - df.mean()).abs() <= self.m * df.std(ddof=0)).T.all()
elif self.strategy == 'median':
mask = (((df - df.median()).abs()) <=
self.m * df.std(ddof=0)).T.all()
df = df.loc[mask, :]
self.removed_rows_ = np.array(mask[mask == False].index)
# Merging back "Object" columns
List = list(df.index)
df_object = df_object.loc[List]
df = pd.merge(df, df_object, left_index = True, right_index = True)
return df
def transform(self, df):
"""
find and remove rows/indices that are in the removed_rows_ attribute of the previous fit_transform method
Parameters
----------
df: pandas dataframe
input dataframe
Returns
-------
transformed dataframe
"""
df = df.drop(self.removed_rows_, 0)
return df | /run_regression-0.7.tar.gz/run_regression-0.7/run_regression/data_preprocessing/remove_outliers.py | 0.920153 | 0.657456 | remove_outliers.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.