id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
167,465 | import pandas as pd
import plotly.graph_objects as go
from greykite.common.constants import ACTUAL_COL
from greykite.common.constants import ANOMALY_COL
from greykite.common.constants import END_TIME_COL
from greykite.common.constants import PREDICTED_ANOMALY_COL
from greykite.common.constants import PREDICTED_COL
from greykite.common.constants import START_TIME_COL
from greykite.common.constants import TIME_COL
from greykite.common.features.adjust_anomalous_data import label_anomalies_multi_metric
from greykite.common.viz.colors_utils import get_distinct_colors
from greykite.common.viz.timeseries_plotting import plot_forecast_vs_actual
TIME_COL = "ts"
ACTUAL_COL = "actual"
PREDICTED_COL = "forecast"
ANOMALY_COL = "is_anomaly"
PREDICTED_ANOMALY_COL = "is_anomaly_predicted"
def plot_forecast_vs_actual(
df,
time_col=cst.TIME_COL,
actual_col=cst.ACTUAL_COL,
predicted_col=cst.PREDICTED_COL,
predicted_lower_col=cst.PREDICTED_LOWER_COL,
predicted_upper_col=cst.PREDICTED_UPPER_COL,
xlabel=cst.TIME_COL,
ylabel=cst.VALUE_COL,
train_end_date=None,
title=None,
showlegend=True,
actual_mode="lines+markers",
actual_points_color="rgba(250, 43, 20, 0.7)", # red
actual_points_size=2.0,
actual_color_opacity=1.0,
forecast_curve_color="rgba(0, 90, 181, 0.7)", # blue
forecast_curve_dash="solid",
ci_band_color="rgba(0, 90, 181, 0.15)", # light blue
ci_boundary_curve_color="rgba(0, 90, 181, 0.5)", # light blue
ci_boundary_curve_width=0.0, # no line
vertical_line_color="rgba(100, 100, 100, 0.9)", # black color with opacity of 0.9
vertical_line_width=1.0):
"""Plots forecast with prediction intervals, against actuals
Adapted from plotly user guide:
https://plot.ly/python/v3/continuous-error-bars/#basic-continuous-error-bars
Parameters
----------
df : `pandas.DataFrame`
Timestamp, predicted, and actual values
time_col : `str`, default `~greykite.common.constants.TIME_COL`
Column in df with timestamp (x-axis)
actual_col : `str`, default `~greykite.common.constants.ACTUAL_COL`
Column in df with actual values
predicted_col : `str`, default `~greykite.common.constants.PREDICTED_COL`
Column in df with predicted values
predicted_lower_col : `str` or None, default `~greykite.common.constants.PREDICTED_LOWER_COL`
Column in df with predicted lower bound
predicted_upper_col : `str` or None, default `~greykite.common.constants.PREDICTED_UPPER_COL`
Column in df with predicted upper bound
xlabel : `str`, default `~greykite.common.constants.TIME_COL`
x-axis label.
ylabel : `str`, default `~greykite.common.constants.VALUE_COL`
y-axis label.
train_end_date : `datetime.datetime` or None, default None
Train end date.
Must be a value in ``df[time_col]``.
title : `str` or None, default None
Plot title.
showlegend : `bool`, default True
Whether to show a plot legend.
actual_mode : `str`, default "lines+markers"
How to show the actuals.
Options: ``markers``, ``lines``, ``lines+markers``
actual_points_color : `str`, default "rgba(99, 114, 218, 1.0)"
Color of actual line/marker.
actual_points_size : `float`, default 2.0
Size of actual markers.
Only used if "markers" is in ``actual_mode``.
actual_color_opacity : `float` or None, default 1.0
Opacity of actual values points.
forecast_curve_color : `str`, default "rgba(0, 145, 202, 1.0)"
Color of forecasted values.
forecast_curve_dash : `str`, default "solid"
'dash' property of forecast ``scatter.line``.
One of: ``['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']``
or a string containing a dash length list in pixels or percentages
(e.g. ``'5px 10px 2px 2px'``, ``'5, 10, 2, 2'``, ``'10% 20% 40%'``)
ci_band_color : `str`, default "rgba(0, 145, 202, 0.15)"
Fill color of the prediction bands.
ci_boundary_curve_color : `str`, default "rgba(0, 145, 202, 0.15)"
Color of the prediction upper/lower lines.
ci_boundary_curve_width : `float`, default 0.0
Width of the prediction upper/lower lines.
default 0.0 (hidden)
vertical_line_color : `str`, default "rgba(100, 100, 100, 0.9)"
Color of the vertical line indicating train end date.
Default is black with opacity of 0.9.
vertical_line_width : `float`, default 1.0
width of the vertical line indicating train end date
Returns
-------
fig : `plotly.graph_objects.Figure`
Plotly figure of forecast against actuals, with prediction
intervals if available.
Can show, convert to HTML, update::
# show figure
fig.show()
# get HTML string, write to file
fig.to_html(include_plotlyjs=False, full_html=True)
fig.write_html("figure.html", include_plotlyjs=False, full_html=True)
# customize layout (https://plot.ly/python/v3/user-guide/)
update_layout = dict(
yaxis=dict(title="new ylabel"),
title_text="new title",
title_x=0.5,
title_font_size=30)
fig.update_layout(update_layout)
"""
if title is None:
title = "Forecast vs Actual"
if train_end_date is not None and not all(pd.Series(train_end_date).isin(df[time_col])):
raise Exception(
f"train_end_date {train_end_date} is not found in df['{time_col}']")
fill_dict = {
"mode": "lines",
"fillcolor": ci_band_color,
"fill": "tonexty"
}
data = []
if predicted_lower_col is not None:
lower_bound = go.Scatter(
name="Lower Bound",
x=df[time_col],
y=df[predicted_lower_col],
mode="lines",
line=dict(
width=ci_boundary_curve_width,
color=ci_boundary_curve_color),
legendgroup="interval" # show/hide with the upper bound
)
data.append(lower_bound)
# plotly fills between current and previous element in `data`.
# Only fill if lower bound exists.
forecast_fill_dict = fill_dict if predicted_lower_col is not None else {}
if predicted_upper_col is not None:
upper_bound = go.Scatter(
name="Upper Bound",
x=df[time_col],
y=df[predicted_upper_col],
line=dict(
width=ci_boundary_curve_width,
color=ci_boundary_curve_color),
legendgroup="interval", # show/hide with the lower bound
**forecast_fill_dict)
data.append(upper_bound)
# If `predicted_lower_col` and `predicted_upper_col`, then the full range
# has been filled in. If only one of them, then fill in between that line
# and forecast.
actual_params = {}
if "lines" in actual_mode:
actual_params.update(line=dict(color=actual_points_color))
if "markers" in actual_mode:
actual_params.update(marker=dict(color=actual_points_color, size=actual_points_size))
actual = go.Scatter(
name="Actual",
x=df[time_col],
y=df[actual_col],
mode=actual_mode,
opacity=actual_color_opacity,
**actual_params
)
data.append(actual)
forecast_fill_dict = fill_dict if (predicted_lower_col is None) != (predicted_upper_col is None) else {}
forecast = go.Scatter(
name="Forecast",
x=df[time_col],
y=df[predicted_col],
line=dict(
color=forecast_curve_color,
dash=forecast_curve_dash),
**forecast_fill_dict)
data.append(forecast)
layout = go.Layout(
xaxis=dict(title=xlabel),
yaxis=dict(title=ylabel),
title=title,
title_x=0.5,
showlegend=showlegend,
# legend order from top to bottom: Actual, Forecast, Upper Bound, Lower Bound
legend={'traceorder': 'reversed'}
)
fig = go.Figure(data=data, layout=layout)
fig.update()
# adds a vertical line to separate training and testing phases
if train_end_date is not None:
new_layout = dict(
# add vertical line
shapes=[dict(
type="line",
xref="x",
yref="paper", # y-reference is assigned to the plot paper [0,1]
x0=train_end_date,
y0=0,
x1=train_end_date,
y1=1,
line=dict(
color=vertical_line_color,
width=vertical_line_width)
)],
# add text annotation
annotations=[dict(
xref="x",
x=train_end_date,
yref="paper",
y=.97,
text="Train End Date",
showarrow=True,
arrowhead=0,
ax=-60,
ay=0
)]
)
fig.update_layout(new_layout)
return fig
The provided code snippet includes necessary dependencies for implementing the `plot_anomalies_over_forecast_vs_actual` function. Write a Python function `def plot_anomalies_over_forecast_vs_actual( df, time_col=TIME_COL, actual_col=ACTUAL_COL, predicted_col=PREDICTED_COL, predicted_anomaly_col=PREDICTED_ANOMALY_COL, anomaly_col=ANOMALY_COL, marker_opacity=1, predicted_anomaly_marker_color="rgba(0, 90, 181, 0.9)", anomaly_marker_color="rgba(250, 43, 20, 0.7)", **kwargs)` to solve the following problem:
Utility function which overlayes the predicted anomalies or anomalies on the forecast vs actual plot. The function calls the internal function `~greykite.common.viz.timeseries_plotting.plot_forecast_vs_actual` and then adds markers on top. Parameters ---------- df : `pandas.DataFrame` The input dataframe. time_col : `str`, default `~greykite.common.constants.TIME_COL` Column in ``df`` with timestamp (x-axis). actual_col : `str`, default `~greykite.common.constants.ACTUAL_COL` Column in ``df`` with actual values. predicted_col : `str`, default `~greykite.common.constants.PREDICTED_COL` Column in ``df`` with predicted values. predicted_anomaly_col : `str` or None, default `~greykite.common.constants.PREDICTED_ANOMALY_COL` Column in ``df`` with predicted anomaly labels (boolean) in the time series. `True` denotes a predicted anomaly. anomaly_col : `str` or None, default `~greykite.common.constants.ANOMALY_COL` Column in ``df`` with anomaly labels (boolean) in the time series. `True` denotes an anomaly. marker_opacity : `float`, default 0.5 The opacity of the marker colors. predicted_anomaly_marker_color : `str`, default "green" The color of the marker(s) for the predicted anomalies. anomaly_marker_color : `str`, default "red" The color of the marker(s) for the anomalies. **kwargs Additional arguments on how to decorate your plot. The keyword arguments are passed to `~greykite.common.viz.timeseries_plotting.plot_forecast_vs_actual`. Returns ------- fig : `plotly.graph_objs._figure.Figure` Plot figure.
Here is the function:
def plot_anomalies_over_forecast_vs_actual(
df,
time_col=TIME_COL,
actual_col=ACTUAL_COL,
predicted_col=PREDICTED_COL,
predicted_anomaly_col=PREDICTED_ANOMALY_COL,
anomaly_col=ANOMALY_COL,
marker_opacity=1,
predicted_anomaly_marker_color="rgba(0, 90, 181, 0.9)",
anomaly_marker_color="rgba(250, 43, 20, 0.7)",
**kwargs):
"""Utility function which overlayes the predicted anomalies or anomalies on the forecast vs actual plot.
The function calls the internal function `~greykite.common.viz.timeseries_plotting.plot_forecast_vs_actual`
and then adds markers on top.
Parameters
----------
df : `pandas.DataFrame`
The input dataframe.
time_col : `str`, default `~greykite.common.constants.TIME_COL`
Column in ``df`` with timestamp (x-axis).
actual_col : `str`, default `~greykite.common.constants.ACTUAL_COL`
Column in ``df`` with actual values.
predicted_col : `str`, default `~greykite.common.constants.PREDICTED_COL`
Column in ``df`` with predicted values.
predicted_anomaly_col : `str` or None, default `~greykite.common.constants.PREDICTED_ANOMALY_COL`
Column in ``df`` with predicted anomaly labels (boolean) in the time series.
`True` denotes a predicted anomaly.
anomaly_col : `str` or None, default `~greykite.common.constants.ANOMALY_COL`
Column in ``df`` with anomaly labels (boolean) in the time series.
`True` denotes an anomaly.
marker_opacity : `float`, default 0.5
The opacity of the marker colors.
predicted_anomaly_marker_color : `str`, default "green"
The color of the marker(s) for the predicted anomalies.
anomaly_marker_color : `str`, default "red"
The color of the marker(s) for the anomalies.
**kwargs
Additional arguments on how to decorate your plot.
The keyword arguments are passed to `~greykite.common.viz.timeseries_plotting.plot_forecast_vs_actual`.
Returns
-------
fig : `plotly.graph_objs._figure.Figure`
Plot figure.
"""
fig = plot_forecast_vs_actual(
df=df,
time_col=time_col,
actual_col=actual_col,
predicted_col=predicted_col,
**kwargs)
if anomaly_col is not None:
fig.add_trace(go.Scatter(
x=df.loc[df[anomaly_col].apply(lambda val: val is True), time_col],
y=df.loc[df[anomaly_col].apply(lambda val: val is True), actual_col],
mode="markers",
marker_size=10,
marker_symbol="square",
marker=go.scatter.Marker(color=anomaly_marker_color),
name=anomaly_col.title(),
showlegend=True,
opacity=marker_opacity))
if predicted_anomaly_col is not None:
fig.add_trace(go.Scatter(
x=df.loc[df[predicted_anomaly_col].apply(lambda val: val is True), time_col],
y=df.loc[df[predicted_anomaly_col].apply(lambda val: val is True), actual_col],
mode="markers",
marker_size=7,
marker_symbol="diamond",
marker=go.scatter.Marker(color=predicted_anomaly_marker_color),
name=predicted_anomaly_col.title(),
showlegend=True,
opacity=marker_opacity))
return fig | Utility function which overlayes the predicted anomalies or anomalies on the forecast vs actual plot. The function calls the internal function `~greykite.common.viz.timeseries_plotting.plot_forecast_vs_actual` and then adds markers on top. Parameters ---------- df : `pandas.DataFrame` The input dataframe. time_col : `str`, default `~greykite.common.constants.TIME_COL` Column in ``df`` with timestamp (x-axis). actual_col : `str`, default `~greykite.common.constants.ACTUAL_COL` Column in ``df`` with actual values. predicted_col : `str`, default `~greykite.common.constants.PREDICTED_COL` Column in ``df`` with predicted values. predicted_anomaly_col : `str` or None, default `~greykite.common.constants.PREDICTED_ANOMALY_COL` Column in ``df`` with predicted anomaly labels (boolean) in the time series. `True` denotes a predicted anomaly. anomaly_col : `str` or None, default `~greykite.common.constants.ANOMALY_COL` Column in ``df`` with anomaly labels (boolean) in the time series. `True` denotes an anomaly. marker_opacity : `float`, default 0.5 The opacity of the marker colors. predicted_anomaly_marker_color : `str`, default "green" The color of the marker(s) for the predicted anomalies. anomaly_marker_color : `str`, default "red" The color of the marker(s) for the anomalies. **kwargs Additional arguments on how to decorate your plot. The keyword arguments are passed to `~greykite.common.viz.timeseries_plotting.plot_forecast_vs_actual`. Returns ------- fig : `plotly.graph_objs._figure.Figure` Plot figure. |
167,466 | import warnings
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from plotly.colors import DEFAULT_PLOTLY_COLORS
from plotly.subplots import make_subplots
from greykite.common import constants as cst
from greykite.common.features.timeseries_features import build_time_features_df
from greykite.common.logging import LoggingLevelEnum
from greykite.common.logging import log_message
from greykite.common.python_utils import update_dictionary
from greykite.common.viz.colors_utils import get_color_palette
from greykite.common.viz.colors_utils import get_distinct_colors
def plot_multivariate(
df,
x_col,
y_col_style_dict="plotly",
default_color="rgba(0, 145, 202, 1.0)",
xlabel=None,
ylabel=cst.VALUE_COL,
title=None,
showlegend=True):
"""Plots one or more lines against the same x-axis values.
Parameters
----------
df : `pandas.DataFrame`
Data frame with ``x_col`` and columns named by the keys in ``y_col_style_dict``.
x_col: `str`
Which column to plot on the x-axis.
y_col_style_dict: `dict` [`str`, `dict` or None] or "plotly" or "auto" or "auto-fill", default "plotly"
The column(s) to plot on the y-axis, and how to style them.
If a dictionary:
- key : `str`
column name in ``df``
- value : `dict` or None
Optional styling options, passed as kwargs to `go.Scatter`.
If None, uses the default: line labeled by the column name.
See reference page for `plotly.graph_objects.Scatter` for options
(e.g. color, mode, width/size, opacity).
https://plotly.com/python/reference/#scatter.
If a string, plots all columns in ``df`` besides ``x_col`` against ``x_col``:
- "plotly": plot lines with default plotly styling
- "auto": plot lines with color ``default_color``, sorted by value (ascending)
- "auto-fill": plot lines with color ``default_color``, sorted by value (ascending), and fills between lines
default_color: `str`, default "rgba(0, 145, 202, 1.0)" (blue)
Default line color when ``y_col_style_dict`` is one of "auto", "auto-fill".
xlabel : `str` or None, default None
x-axis label. If None, default is ``x_col``.
ylabel : `str` or None, default ``VALUE_COL``
y-axis label
title : `str` or None, default None
Plot title. If None, default is based on axis labels.
showlegend : `bool`, default True
Whether to show the legend.
Returns
-------
fig : `plotly.graph_objects.Figure`
Interactive plotly graph of one or more columns
in ``df`` against ``x_col``.
See `~greykite.common.viz.timeseries_plotting.plot_forecast_vs_actual`
return value for how to plot the figure and add customization.
"""
if xlabel is None:
xlabel = x_col
if title is None and ylabel is not None:
title = f"{ylabel} vs {xlabel}"
auto_style = {"line": {"color": default_color}}
if y_col_style_dict == "plotly":
# Uses plotly default style
y_col_style_dict = {col: None for col in df.columns if col != x_col}
elif y_col_style_dict in ["auto", "auto-fill"]:
# Columns ordered from low to high
means = df.drop(columns=x_col).mean()
column_order = list(means.sort_values().index)
if y_col_style_dict == "auto":
# Lines with color `default_color`
y_col_style_dict = {col: auto_style for col in column_order}
elif y_col_style_dict == "auto-fill":
# Lines with color `default_color`, with fill between lines
y_col_style_dict = {column_order[0]: auto_style}
y_col_style_dict.update({
col: {
"line": {"color": default_color},
"fill": "tonexty"
} for col in column_order[1:]
})
data = []
default_style = dict(mode="lines")
for column, style_dict in y_col_style_dict.items():
# By default, column name in ``df`` is used to label the line
default_col_style = update_dictionary(default_style, overwrite_dict={"name": column})
# User can overwrite any of the default values, or remove them by setting key value to None
style_dict = update_dictionary(default_col_style, overwrite_dict=style_dict)
line = go.Scatter(
x=df[x_col],
y=df[column],
**style_dict)
data.append(line)
layout = go.Layout(
xaxis=dict(title=xlabel),
yaxis=dict(title=ylabel),
title=title,
title_x=0.5,
showlegend=showlegend,
legend={'traceorder': 'reversed'} # Matches the order of ``y_col_style_dict`` (bottom to top)
)
fig = go.Figure(data=data, layout=layout)
return fig
def update_dictionary(default_dict, overwrite_dict=None, allow_unknown_keys=True):
"""Adds default key-value pairs to items in ``overwrite_dict``.
Merges the items in ``default_dict`` and ``overwrite_dict``,
preferring ``overwrite_dict`` if there are conflicts.
Parameters
----------
default_dict: `dict`
Dictionary of default values.
overwrite_dict: `dict` or None, optional, default None
User-provided dictionary that overrides the defaults.
allow_unknown_keys: `bool`, optional, default True
If false, raises an error if ``overwrite_dict`` contains a key that is
not in ``default_dict``.
Raises
------
ValueError
if ``allow_unknown_keys`` is False and ``overwrite_dict``
has keys that are not in ``default_dict``.
Returns
-------
updated_dict : `dict`
Updated dictionary.
Returns ``overwrite_dicts``, with default values added
based on ``default_dict``.
"""
if overwrite_dict is None:
overwrite_dict = {}
if not allow_unknown_keys:
extra_keys = overwrite_dict.keys() - default_dict.keys()
if extra_keys:
raise ValueError(f"Unexpected key(s) found: {extra_keys}. "
f"The valid keys are: {default_dict.keys()}")
return dict(default_dict, **overwrite_dict)
def get_color_palette(num, colors=DEFAULT_PLOTLY_COLORS):
"""Returns ``num`` of distinct RGB colors.
If ``num`` is less than or equal to the length of ``colors``, first ``num``
elements of ``colors`` are returned.
Else ``num`` elements of colors are interpolated between the first and the last
colors of ``colors``.
Parameters
----------
num : `int`
Number of colors required.
colors : [`str`, `list` [`str`]], default ``DEFAULT_PLOTLY_COLORS``
Which colors to use to build the color palette.
This can be a list of RGB colors or a `str` from ``PLOTLY_SCALES``.
Returns
-------
color_palette: List
A list consisting ``num`` of RGB colors.
"""
validate_colors(colors, colortype="rgb")
if len(colors) == 1:
return colors * num
elif len(colors) >= num:
color_palette = colors[0:num]
else:
color_palette = n_colors(
colors[0],
colors[-1],
num,
colortype="rgb")
return color_palette
The provided code snippet includes necessary dependencies for implementing the `plot_multivariate_grouped` function. Write a Python function `def plot_multivariate_grouped( df, x_col, y_col_style_dict, grouping_x_col, grouping_x_col_values, grouping_y_col_style_dict, colors=DEFAULT_PLOTLY_COLORS, xlabel=None, ylabel=cst.VALUE_COL, title=None, showlegend=True)` to solve the following problem:
Plots multiple lines against the same x-axis values. The lines can partially share the x-axis values. See parameter descriptions for a running example. Parameters ---------- df : `pandas.DataFrame` Data frame with ``x_col`` and columns named by the keys in ``y_col_style_dict``, ``grouping_x_col``, ``grouping_y_col_style_dict``. For example:: df = pd.DataFrame({ time: [dt(2018, 1, 1), dt(2018, 1, 2), dt(2018, 1, 3)], "y1": [8.5, 2.0, 3.0], "y2": [1.4, 2.1, 3.4], "y3": [4.2, 3.1, 3.0], "y4": [0, 1, 2], "y5": [10, 9, 8], "group": [1, 2, 1], }) This will be our running example. x_col: `str` Which column to plot on the x-axis. "time" in our example. y_col_style_dict: `dict` [`str`, `dict` or None] The column(s) to plot on the y-axis, and how to style them. These columns are plotted against the complete x-axis. - key : `str` column name in ``df`` - value : `dict` or None Optional styling options, passed as kwargs to `go.Scatter`. If None, uses the default: line labeled by the column name. If line color is not given, it is added according to ``colors``. See reference page for `plotly.graph_objects.Scatter` for options (e.g. color, mode, width/size, opacity). https://plotly.com/python/reference/#scatter. For example:: y_col_style_dict={ "y1": { "name": "y1_name", "legendgroup": "one", "mode": "markers", "line": None # Remove line params since we use mode="markers" }, "y2": None, } The function will add a line color to "y1" and "y2" based on the ``colors`` parameter. It will also add a name to "y2", since none was given. The "name" of "y1" will be preserved. The output ``fig`` will have one line each for each of "y1" and "y2", each plot against the entire "time" column. grouping_x_col: `str` Which column to use to group columns in ``grouping_y_col_style_dict``. "group" in our example. grouping_x_col_values: `list` [`int`] or None Which values to use for grouping. If None, uses all the unique values in ``df`` [``grouping_x_col``]. In our example, specifying ``grouping_x_col_values == [1, 2]`` would plot separate lines corresponding to ``group==1`` and ``group==2``. grouping_y_col_style_dict: `dict` [`str`, `dict` or None] The column(s) to plot on the y-axis, and how to style them. These columns are plotted against partial x-axis. For each ``grouping_x_col_values`` an element in this dictionary produces one line. - key : `str` column name in ``df`` - value : `dict` or None Optional styling options, passed as kwargs to `go.Scatter`. If None, uses the default: line labeled by the ``grouping_x_col_values``, ``grouping_x_col`` and column name. If a name is given, it is augmented with the ``grouping_x_col_values``. If line color is not given, it is added according to ``colors``. All the lines sharing same ``grouping_x_col_values`` have the same color. See reference page for `plotly.graph_objects.Scatter` for options (e.g. color, mode, width/size, opacity). https://plotly.com/python/reference/#scatter. For example:: grouping_y_col_style_dict={ "y3": { "line": { "color": "blue" } }, "y4": { "name": "y4_name", "line": { "width": 2, "dash": "dot" } }, "y5": None, } The function will add a line color to "y4" and "y5" based on the ``colors`` parameter. The line color of "y3" will be "blue" as specified. We also preserve the given line properties of "y4". ` The function adds a name to "y3" and "y5", since none was given. The given "name" of "y4" will be augmented with ``grouping_x_col_values``. Each element of ``grouping_y_col_style_dict`` gets one line for each ``grouping_x_col_values``. In our example, there will be 2 lines corresponding to "y3", named "1_y3" and "2_y3". "1_y3" is plotted against "time = [dt(2018, 1, 1), dt(2018, 1, 3)]", corresponding to ``group==1``. "2_y3" is plotted against "time = [dt(2018, 1, 2)", corresponding to ``group==2``. colors: [`str`, `list` [`str`]], default ``DEFAULT_PLOTLY_COLORS`` Which colors to use to build a color palette for plotting. This can be a list of RGB colors or a `str` from ``PLOTLY_SCALES``. Required number of colors equals sum of the length of ``y_col_style_dict`` and length of ``grouping_x_col_values``. See `~greykite.common.viz.colors_utils.get_color_palette` for details. xlabel : `str` or None, default None x-axis label. If None, default is ``x_col``. ylabel : `str` or None, default ``VALUE_COL`` y-axis label title : `str` or None, default None Plot title. If None, default is based on axis labels. showlegend : `bool`, default True Whether to show the legend. Returns ------- fig : `plotly.graph_objects.Figure` Interactive plotly graph of one or more columns in ``df`` against ``x_col``. See `~greykite.common.viz.timeseries_plotting.plot_forecast_vs_actual` return value for how to plot the figure and add customization.
Here is the function:
def plot_multivariate_grouped(
df,
x_col,
y_col_style_dict,
grouping_x_col,
grouping_x_col_values,
grouping_y_col_style_dict,
colors=DEFAULT_PLOTLY_COLORS,
xlabel=None,
ylabel=cst.VALUE_COL,
title=None,
showlegend=True):
"""Plots multiple lines against the same x-axis values. The lines can
partially share the x-axis values.
See parameter descriptions for a running example.
Parameters
----------
df : `pandas.DataFrame`
Data frame with ``x_col`` and columns named by the keys in ``y_col_style_dict``,
``grouping_x_col``, ``grouping_y_col_style_dict``.
For example::
df = pd.DataFrame({
time: [dt(2018, 1, 1),
dt(2018, 1, 2),
dt(2018, 1, 3)],
"y1": [8.5, 2.0, 3.0],
"y2": [1.4, 2.1, 3.4],
"y3": [4.2, 3.1, 3.0],
"y4": [0, 1, 2],
"y5": [10, 9, 8],
"group": [1, 2, 1],
})
This will be our running example.
x_col: `str`
Which column to plot on the x-axis.
"time" in our example.
y_col_style_dict: `dict` [`str`, `dict` or None]
The column(s) to plot on the y-axis, and how to style them.
These columns are plotted against the complete x-axis.
- key : `str`
column name in ``df``
- value : `dict` or None
Optional styling options, passed as kwargs to `go.Scatter`.
If None, uses the default: line labeled by the column name.
If line color is not given, it is added according to ``colors``.
See reference page for `plotly.graph_objects.Scatter` for options
(e.g. color, mode, width/size, opacity).
https://plotly.com/python/reference/#scatter.
For example::
y_col_style_dict={
"y1": {
"name": "y1_name",
"legendgroup": "one",
"mode": "markers",
"line": None # Remove line params since we use mode="markers"
},
"y2": None,
}
The function will add a line color to "y1" and "y2" based on the ``colors`` parameter.
It will also add a name to "y2", since none was given. The "name" of "y1" will be preserved.
The output ``fig`` will have one line each for each of "y1" and "y2", each plot against
the entire "time" column.
grouping_x_col: `str`
Which column to use to group columns in ``grouping_y_col_style_dict``.
"group" in our example.
grouping_x_col_values: `list` [`int`] or None
Which values to use for grouping. If None, uses all the unique values in
``df`` [``grouping_x_col``].
In our example, specifying ``grouping_x_col_values == [1, 2]`` would plot
separate lines corresponding to ``group==1`` and ``group==2``.
grouping_y_col_style_dict: `dict` [`str`, `dict` or None]
The column(s) to plot on the y-axis, and how to style them.
These columns are plotted against partial x-axis.
For each ``grouping_x_col_values`` an element in this dictionary produces
one line.
- key : `str`
column name in ``df``
- value : `dict` or None
Optional styling options, passed as kwargs to `go.Scatter`.
If None, uses the default: line labeled by the ``grouping_x_col_values``,
``grouping_x_col`` and column name.
If a name is given, it is augmented with the ``grouping_x_col_values``.
If line color is not given, it is added according to ``colors``.
All the lines sharing same ``grouping_x_col_values`` have the same color.
See reference page for `plotly.graph_objects.Scatter` for options
(e.g. color, mode, width/size, opacity).
https://plotly.com/python/reference/#scatter.
For example::
grouping_y_col_style_dict={
"y3": {
"line": {
"color": "blue"
}
},
"y4": {
"name": "y4_name",
"line": {
"width": 2,
"dash": "dot"
}
},
"y5": None,
}
The function will add a line color to "y4" and "y5" based on the ``colors`` parameter.
The line color of "y3" will be "blue" as specified. We also preserve the given line
properties of "y4".
` The function adds a name to "y3" and "y5", since none was given. The given "name" of "y4"
will be augmented with ``grouping_x_col_values``.
Each element of ``grouping_y_col_style_dict`` gets one line for each ``grouping_x_col_values``.
In our example, there will be 2 lines corresponding to "y3", named "1_y3" and "2_y3".
"1_y3" is plotted against "time = [dt(2018, 1, 1), dt(2018, 1, 3)]", corresponding to ``group==1``.
"2_y3" is plotted against "time = [dt(2018, 1, 2)", corresponding to ``group==2``.
colors: [`str`, `list` [`str`]], default ``DEFAULT_PLOTLY_COLORS``
Which colors to use to build a color palette for plotting.
This can be a list of RGB colors or a `str` from ``PLOTLY_SCALES``.
Required number of colors equals sum of the length of ``y_col_style_dict``
and length of ``grouping_x_col_values``.
See `~greykite.common.viz.colors_utils.get_color_palette` for details.
xlabel : `str` or None, default None
x-axis label. If None, default is ``x_col``.
ylabel : `str` or None, default ``VALUE_COL``
y-axis label
title : `str` or None, default None
Plot title. If None, default is based on axis labels.
showlegend : `bool`, default True
Whether to show the legend.
Returns
-------
fig : `plotly.graph_objects.Figure`
Interactive plotly graph of one or more columns
in ``df`` against ``x_col``.
See `~greykite.common.viz.timeseries_plotting.plot_forecast_vs_actual`
return value for how to plot the figure and add customization.
"""
available_grouping_x_col_values = np.unique(df[grouping_x_col])
if grouping_x_col_values is None:
grouping_x_col_values = available_grouping_x_col_values
else:
missing_grouping_x_col_values = set(grouping_x_col_values) - set(available_grouping_x_col_values)
if len(missing_grouping_x_col_values) > 0:
raise ValueError(f"Following 'grouping_x_col_values' are missing in '{grouping_x_col}' column: "
f"{missing_grouping_x_col_values}")
# Chooses the color palette
n_color = len(y_col_style_dict) + len(grouping_x_col_values)
color_palette = get_color_palette(num=n_color, colors=colors)
# Updates colors for y_col_style_dict if it is not specified
for color_num, (column, style_dict) in enumerate(y_col_style_dict.items()):
if style_dict is None:
style_dict = {}
default_color = {"color": color_palette[color_num]}
style_dict["line"] = update_dictionary(default_color, overwrite_dict=style_dict.get("line"))
y_col_style_dict[column] = style_dict
# Standardizes dataset for the next figure
df_standardized = df.copy().drop_duplicates(subset=[x_col]).sort_values(by=x_col)
# This figure plots the whole xaxis vs yaxis values
fig = plot_multivariate(
df=df_standardized,
x_col=x_col,
y_col_style_dict=y_col_style_dict,
xlabel=xlabel,
ylabel=ylabel,
title=title,
showlegend=showlegend)
data = fig.data
layout = fig.layout
# These figures plot the sliced xaxis vs yaxis values
for color_num, grouping_x_col_value in enumerate(grouping_x_col_values, len(y_col_style_dict)):
default_color = {"color": color_palette[color_num]}
sliced_y_col_style_dict = grouping_y_col_style_dict.copy()
for column, style_dict in sliced_y_col_style_dict.items():
# Updates colors if it is not specified
if style_dict is None:
style_dict = {}
line_dict = update_dictionary(default_color, overwrite_dict=style_dict.get("line"))
# Augments names with grouping_x_col_value
name = style_dict.get("name")
if name is None:
updated_name = f"{grouping_x_col_value}_{grouping_x_col}_{column}"
else:
updated_name = f"{grouping_x_col_value}_{name}"
overwrite_dict = {
"name": updated_name,
"line": line_dict
}
style_dict = update_dictionary(style_dict, overwrite_dict=overwrite_dict)
sliced_y_col_style_dict[column] = style_dict
df_sliced = df[df[grouping_x_col] == grouping_x_col_value]
fig = plot_multivariate(
df=df_sliced,
x_col=x_col,
y_col_style_dict=sliced_y_col_style_dict)
data = data + fig.data
fig = go.Figure(data=data, layout=layout)
return fig | Plots multiple lines against the same x-axis values. The lines can partially share the x-axis values. See parameter descriptions for a running example. Parameters ---------- df : `pandas.DataFrame` Data frame with ``x_col`` and columns named by the keys in ``y_col_style_dict``, ``grouping_x_col``, ``grouping_y_col_style_dict``. For example:: df = pd.DataFrame({ time: [dt(2018, 1, 1), dt(2018, 1, 2), dt(2018, 1, 3)], "y1": [8.5, 2.0, 3.0], "y2": [1.4, 2.1, 3.4], "y3": [4.2, 3.1, 3.0], "y4": [0, 1, 2], "y5": [10, 9, 8], "group": [1, 2, 1], }) This will be our running example. x_col: `str` Which column to plot on the x-axis. "time" in our example. y_col_style_dict: `dict` [`str`, `dict` or None] The column(s) to plot on the y-axis, and how to style them. These columns are plotted against the complete x-axis. - key : `str` column name in ``df`` - value : `dict` or None Optional styling options, passed as kwargs to `go.Scatter`. If None, uses the default: line labeled by the column name. If line color is not given, it is added according to ``colors``. See reference page for `plotly.graph_objects.Scatter` for options (e.g. color, mode, width/size, opacity). https://plotly.com/python/reference/#scatter. For example:: y_col_style_dict={ "y1": { "name": "y1_name", "legendgroup": "one", "mode": "markers", "line": None # Remove line params since we use mode="markers" }, "y2": None, } The function will add a line color to "y1" and "y2" based on the ``colors`` parameter. It will also add a name to "y2", since none was given. The "name" of "y1" will be preserved. The output ``fig`` will have one line each for each of "y1" and "y2", each plot against the entire "time" column. grouping_x_col: `str` Which column to use to group columns in ``grouping_y_col_style_dict``. "group" in our example. grouping_x_col_values: `list` [`int`] or None Which values to use for grouping. If None, uses all the unique values in ``df`` [``grouping_x_col``]. In our example, specifying ``grouping_x_col_values == [1, 2]`` would plot separate lines corresponding to ``group==1`` and ``group==2``. grouping_y_col_style_dict: `dict` [`str`, `dict` or None] The column(s) to plot on the y-axis, and how to style them. These columns are plotted against partial x-axis. For each ``grouping_x_col_values`` an element in this dictionary produces one line. - key : `str` column name in ``df`` - value : `dict` or None Optional styling options, passed as kwargs to `go.Scatter`. If None, uses the default: line labeled by the ``grouping_x_col_values``, ``grouping_x_col`` and column name. If a name is given, it is augmented with the ``grouping_x_col_values``. If line color is not given, it is added according to ``colors``. All the lines sharing same ``grouping_x_col_values`` have the same color. See reference page for `plotly.graph_objects.Scatter` for options (e.g. color, mode, width/size, opacity). https://plotly.com/python/reference/#scatter. For example:: grouping_y_col_style_dict={ "y3": { "line": { "color": "blue" } }, "y4": { "name": "y4_name", "line": { "width": 2, "dash": "dot" } }, "y5": None, } The function will add a line color to "y4" and "y5" based on the ``colors`` parameter. The line color of "y3" will be "blue" as specified. We also preserve the given line properties of "y4". ` The function adds a name to "y3" and "y5", since none was given. The given "name" of "y4" will be augmented with ``grouping_x_col_values``. Each element of ``grouping_y_col_style_dict`` gets one line for each ``grouping_x_col_values``. In our example, there will be 2 lines corresponding to "y3", named "1_y3" and "2_y3". "1_y3" is plotted against "time = [dt(2018, 1, 1), dt(2018, 1, 3)]", corresponding to ``group==1``. "2_y3" is plotted against "time = [dt(2018, 1, 2)", corresponding to ``group==2``. colors: [`str`, `list` [`str`]], default ``DEFAULT_PLOTLY_COLORS`` Which colors to use to build a color palette for plotting. This can be a list of RGB colors or a `str` from ``PLOTLY_SCALES``. Required number of colors equals sum of the length of ``y_col_style_dict`` and length of ``grouping_x_col_values``. See `~greykite.common.viz.colors_utils.get_color_palette` for details. xlabel : `str` or None, default None x-axis label. If None, default is ``x_col``. ylabel : `str` or None, default ``VALUE_COL`` y-axis label title : `str` or None, default None Plot title. If None, default is based on axis labels. showlegend : `bool`, default True Whether to show the legend. Returns ------- fig : `plotly.graph_objects.Figure` Interactive plotly graph of one or more columns in ``df`` against ``x_col``. See `~greykite.common.viz.timeseries_plotting.plot_forecast_vs_actual` return value for how to plot the figure and add customization. |
167,467 | import warnings
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from plotly.colors import DEFAULT_PLOTLY_COLORS
from plotly.subplots import make_subplots
from greykite.common import constants as cst
from greykite.common.features.timeseries_features import build_time_features_df
from greykite.common.logging import LoggingLevelEnum
from greykite.common.logging import log_message
from greykite.common.python_utils import update_dictionary
from greykite.common.viz.colors_utils import get_color_palette
from greykite.common.viz.colors_utils import get_distinct_colors
def plot_multivariate(
df,
x_col,
y_col_style_dict="plotly",
default_color="rgba(0, 145, 202, 1.0)",
xlabel=None,
ylabel=cst.VALUE_COL,
title=None,
showlegend=True):
"""Plots one or more lines against the same x-axis values.
Parameters
----------
df : `pandas.DataFrame`
Data frame with ``x_col`` and columns named by the keys in ``y_col_style_dict``.
x_col: `str`
Which column to plot on the x-axis.
y_col_style_dict: `dict` [`str`, `dict` or None] or "plotly" or "auto" or "auto-fill", default "plotly"
The column(s) to plot on the y-axis, and how to style them.
If a dictionary:
- key : `str`
column name in ``df``
- value : `dict` or None
Optional styling options, passed as kwargs to `go.Scatter`.
If None, uses the default: line labeled by the column name.
See reference page for `plotly.graph_objects.Scatter` for options
(e.g. color, mode, width/size, opacity).
https://plotly.com/python/reference/#scatter.
If a string, plots all columns in ``df`` besides ``x_col`` against ``x_col``:
- "plotly": plot lines with default plotly styling
- "auto": plot lines with color ``default_color``, sorted by value (ascending)
- "auto-fill": plot lines with color ``default_color``, sorted by value (ascending), and fills between lines
default_color: `str`, default "rgba(0, 145, 202, 1.0)" (blue)
Default line color when ``y_col_style_dict`` is one of "auto", "auto-fill".
xlabel : `str` or None, default None
x-axis label. If None, default is ``x_col``.
ylabel : `str` or None, default ``VALUE_COL``
y-axis label
title : `str` or None, default None
Plot title. If None, default is based on axis labels.
showlegend : `bool`, default True
Whether to show the legend.
Returns
-------
fig : `plotly.graph_objects.Figure`
Interactive plotly graph of one or more columns
in ``df`` against ``x_col``.
See `~greykite.common.viz.timeseries_plotting.plot_forecast_vs_actual`
return value for how to plot the figure and add customization.
"""
if xlabel is None:
xlabel = x_col
if title is None and ylabel is not None:
title = f"{ylabel} vs {xlabel}"
auto_style = {"line": {"color": default_color}}
if y_col_style_dict == "plotly":
# Uses plotly default style
y_col_style_dict = {col: None for col in df.columns if col != x_col}
elif y_col_style_dict in ["auto", "auto-fill"]:
# Columns ordered from low to high
means = df.drop(columns=x_col).mean()
column_order = list(means.sort_values().index)
if y_col_style_dict == "auto":
# Lines with color `default_color`
y_col_style_dict = {col: auto_style for col in column_order}
elif y_col_style_dict == "auto-fill":
# Lines with color `default_color`, with fill between lines
y_col_style_dict = {column_order[0]: auto_style}
y_col_style_dict.update({
col: {
"line": {"color": default_color},
"fill": "tonexty"
} for col in column_order[1:]
})
data = []
default_style = dict(mode="lines")
for column, style_dict in y_col_style_dict.items():
# By default, column name in ``df`` is used to label the line
default_col_style = update_dictionary(default_style, overwrite_dict={"name": column})
# User can overwrite any of the default values, or remove them by setting key value to None
style_dict = update_dictionary(default_col_style, overwrite_dict=style_dict)
line = go.Scatter(
x=df[x_col],
y=df[column],
**style_dict)
data.append(line)
layout = go.Layout(
xaxis=dict(title=xlabel),
yaxis=dict(title=ylabel),
title=title,
title_x=0.5,
showlegend=showlegend,
legend={'traceorder': 'reversed'} # Matches the order of ``y_col_style_dict`` (bottom to top)
)
fig = go.Figure(data=data, layout=layout)
return fig
The provided code snippet includes necessary dependencies for implementing the `plot_univariate` function. Write a Python function `def plot_univariate( df, x_col, y_col, xlabel=None, ylabel=None, title=None, color="rgb(32, 149, 212)", # light blue showlegend=True)` to solve the following problem:
Simple plot of univariate timeseries. Parameters ---------- df : `pandas.DataFrame` Data frame with ``x_col`` and ``y_col`` x_col: `str` x-axis column name, usually the time column y_col: `str` y-axis column name, the value the plot xlabel : `str` or None, default None x-axis label ylabel : `str` or None, default None y-axis label title : `str` or None, default None Plot title. If None, default is based on axis labels. color : `str`, default "rgb(32, 149, 212)" (light blue) Line color showlegend : `bool`, default True Whether to show the legend Returns ------- fig : `plotly.graph_objects.Figure` Interactive plotly graph of the value against time. See `~greykite.common.viz.timeseries_plotting.plot_forecast_vs_actual` return value for how to plot the figure and add customization. See Also -------- `~greykite.common.viz.timeseries_plotting.plot_multivariate` Provides more styling options. Also consider using plotly's `go.Scatter` and `go.Layout` directly.
Here is the function:
def plot_univariate(
df,
x_col,
y_col,
xlabel=None,
ylabel=None,
title=None,
color="rgb(32, 149, 212)", # light blue
showlegend=True):
"""Simple plot of univariate timeseries.
Parameters
----------
df : `pandas.DataFrame`
Data frame with ``x_col`` and ``y_col``
x_col: `str`
x-axis column name, usually the time column
y_col: `str`
y-axis column name, the value the plot
xlabel : `str` or None, default None
x-axis label
ylabel : `str` or None, default None
y-axis label
title : `str` or None, default None
Plot title. If None, default is based on axis labels.
color : `str`, default "rgb(32, 149, 212)" (light blue)
Line color
showlegend : `bool`, default True
Whether to show the legend
Returns
-------
fig : `plotly.graph_objects.Figure`
Interactive plotly graph of the value against time.
See `~greykite.common.viz.timeseries_plotting.plot_forecast_vs_actual`
return value for how to plot the figure and add customization.
See Also
--------
`~greykite.common.viz.timeseries_plotting.plot_multivariate`
Provides more styling options. Also consider using plotly's `go.Scatter` and `go.Layout` directly.
"""
# sets default x and y-axis names based on column names
if xlabel is None:
xlabel = x_col
if ylabel is None:
ylabel = y_col
y_col_style_dict = {
y_col: dict(
name=y_col,
mode="lines",
line=dict(
color=color
),
opacity=0.8
)
}
return plot_multivariate(
df,
x_col,
y_col_style_dict,
xlabel=xlabel,
ylabel=ylabel,
title=title,
showlegend=showlegend,
) | Simple plot of univariate timeseries. Parameters ---------- df : `pandas.DataFrame` Data frame with ``x_col`` and ``y_col`` x_col: `str` x-axis column name, usually the time column y_col: `str` y-axis column name, the value the plot xlabel : `str` or None, default None x-axis label ylabel : `str` or None, default None y-axis label title : `str` or None, default None Plot title. If None, default is based on axis labels. color : `str`, default "rgb(32, 149, 212)" (light blue) Line color showlegend : `bool`, default True Whether to show the legend Returns ------- fig : `plotly.graph_objects.Figure` Interactive plotly graph of the value against time. See `~greykite.common.viz.timeseries_plotting.plot_forecast_vs_actual` return value for how to plot the figure and add customization. See Also -------- `~greykite.common.viz.timeseries_plotting.plot_multivariate` Provides more styling options. Also consider using plotly's `go.Scatter` and `go.Layout` directly. |
167,468 | import warnings
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from plotly.colors import DEFAULT_PLOTLY_COLORS
from plotly.subplots import make_subplots
from greykite.common import constants as cst
from greykite.common.features.timeseries_features import build_time_features_df
from greykite.common.logging import LoggingLevelEnum
from greykite.common.logging import log_message
from greykite.common.python_utils import update_dictionary
from greykite.common.viz.colors_utils import get_color_palette
from greykite.common.viz.colors_utils import get_distinct_colors
def split_range_into_groups(
n,
group_size,
which_group_complete="last"):
"""Partitions `n` elements into adjacent groups,
each with `group_size` elements
Group number starts from 0 and increments upward
Can be used to generate groups for sliding window aggregation.
:param n: int
number of elemnts to split into groups
:param group_size: int
number of elements per group
:param which_group_complete: str
If n % group_size > 0, one group will have fewer than `group_size` elements
if "first", the first group is full if possible, and last group may be incomplete
if "last", (default) the last group is full if possible,
and first group may be incomplete
:return: np.array of length n
values correspond to the element's group number
Examples:
>>> split_range_into_groups(10, 1, "last")
array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])
>>> split_range_into_groups(10, 2, "last")
array([0., 0., 1., 1., 2., 2., 3., 3., 4., 4.])
>>> split_range_into_groups(10, 3, "last")
array([0., 1., 1., 1., 2., 2., 2., 3., 3., 3.])
>>> split_range_into_groups(10, 4, "last")
array([0., 0., 1., 1., 1., 1., 2., 2., 2., 2.])
>>> split_range_into_groups(10, 4, "first")
array([0., 0., 0., 0., 1., 1., 1., 1., 2., 2.])
>>> split_range_into_groups(10, 5, "last")
array([0., 0., 0., 0., 0., 1., 1., 1., 1., 1.])
>>> split_range_into_groups(10, 6, "last")
array([0., 0., 0., 0., 1., 1., 1., 1., 1., 1.])
>>> split_range_into_groups(10, 10, "last")
array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
>>> split_range_into_groups(10, 12, "last")
array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
"""
if which_group_complete.lower() == "first":
offset = 0
else:
offset = group_size - n % group_size
offset = offset % group_size # sets offset to 0 if n % group_size == 0
return np.floor(np.arange(offset, n + offset) / group_size)
def build_time_features_df(
dt,
conti_year_origin,
add_dst_info=True):
"""This function gets a datetime-like vector and creates new columns containing temporal
features useful for time series analysis and forecasting e.g. year, week of year, etc.
Parameters
----------
dt : array-like (1-dimensional)
A vector of datetime-like values
conti_year_origin : `float`
The origin used for creating continuous time which is in years unit.
add_dst_info : `bool`, default True
Determines if daylight saving columns for US and Europe should be added.
Returns
-------
time_features_df : `pandas.DataFrame`
Dataframe with the following time features.
* "datetime": `datetime.datetime` object, a combination of date and a time
* "date": `datetime.date` object, date with the format (year, month, day)
* "year": integer, year of the date e.g. 2018
* "year_length": integer, number of days in the year e.g. 365 or 366
* "quarter": integer, quarter of the date, 1, 2, 3, 4
* "quarter_start": `pandas.DatetimeIndex`, date of beginning of the current quarter
* "quarter_length": integer, number of days in the quarter, 90/91 for Q1, 91 for Q2, 92 for Q3 and Q4
* "month": integer, month of the year, January=1, February=2, ..., December=12
* "month_length": integer, number of days in the month, 28/ 29/ 30/ 31
* "woy": integer, ISO 8601 week of the year where a week starts from Monday, 1, 2, ..., 53
* "doy": integer, ordinal day of the year, 1, 2, ..., year_length
* "doq": integer, ordinal day of the quarter, 1, 2, ..., quarter_length
* "dom": integer, ordinal day of the month, 1, 2, ..., month_length
* "dow": integer, day of the week, Monday=1, Tuesday=2, ..., Sunday=7
* "str_dow": string, day of the week as a string e.g. "1-Mon", "2-Tue", ..., "7-Sun"
* "str_doy": string, day of the year e.g. "2020-03-20" for March 20, 2020
* "hour": integer, discrete hours of the datetime, 0, 1, ..., 23
* "minute": integer, minutes of the datetime, 0, 1, ..., 59
* "second": integer, seconds of the datetime, 0, 1, ..., 3599
* "year_month": string, (year, month) e.g. "2020-03" for March 2020
* "year_woy": string, (year, week of year) e.g. "2020_42" for 42nd week of 2020
* "month_dom": string, (month, day of month) e.g. "02/20" for February 20th
* "year_woy_dow": string, (year, week of year, day of week) e.g. "2020_03_6" for Saturday of 3rd week in 2020
* "woy_dow": string, (week of year, day of week) e.g. "03_6" for Saturday of 3rd week
* "dow_hr": string, (day of week, hour) e.g. "4_09" for 9am on Thursday
* "dow_hr_min": string, (day of week, hour, minute) e.g. "4_09_10" for 9:10am on Thursday
* "tod": float, time of day, continuous, 0.0 to 24.0
* "tow": float, time of week, continuous, 0.0 to 7.0
* "tom": float, standardized time of month, continuous, 0.0 to 1.0
* "toq": float, time of quarter, continuous, 0.0 to 1.0
* "toy": float, standardized time of year, continuous, 0.0 to 1.0
* "conti_year": float, year in continuous time, eg 2018.5 means middle of the year 2018
* "is_weekend": boolean, weekend indicator, True for weekend, else False
* "dow_grouped": string, Monday-Thursday=1234-MTuWTh, Friday=5-Fri, Saturday=6-Sat, Sunday=7-Sun
* "ct1": float, linear growth based on conti_year_origin, -infinity to infinity
* "ct2": float, signed quadratic growth, -infinity to infinity
* "ct3": float, signed cubic growth, -infinity to infinity
* "ct_sqrt": float, signed square root growth, -infinity to infinity
* "ct_root3": float, signed cubic root growth, -infinity to infinity
* "us_dst": bool, determines if the time inside the daylight saving time of US
This column is only generated if ``add_dst_info=True``
* "eu_dst": bool, determines if the time inside the daylight saving time of Europe. This column is only generated if ``add_dst_info=True``
"""
dt = pd.DatetimeIndex(dt)
if len(dt) == 0:
raise ValueError("Length of dt cannot be zero.")
# basic time features
date = dt.date
year = dt.year
year_length = (365.0 + dt.is_leap_year)
quarter = dt.quarter
month = dt.month
month_length = dt.days_in_month
# finds first day of quarter
quarter_start = pd.DatetimeIndex(
dt.year.map(str) + "-" + (3 * quarter - 2).map(int).map(str) + "-01")
next_quarter_start = dt + pd.tseries.offsets.QuarterBegin(startingMonth=1)
quarter_length = (next_quarter_start - quarter_start).days
# finds offset from first day of quarter (rounds down to nearest day)
doq = ((dt - quarter_start) / pd.to_timedelta("1D") + 1).astype(int)
# week of year, "woy", follows ISO 8601:
# - Week 01 is the week with the year's first Thursday in it.
# - A week begins with Monday and ends with Sunday.
# So the week number of the week that overlaps both years, is 1, 52, or 53,
# depending on whether it has more days in the previous year or new year.
# - e.g. Jan 1st, 2018 is Monday. woy of first 8 days = [1, 1, 1, 1, 1, 1, 1, 2]
# - e.g. Jan 1st, 2019 is Tuesday. woy of first 8 days = [1, 1, 1, 1, 1, 1, 2, 2]
# - e.g. Jan 1st, 2020 is Wednesday. woy of first 8 days = [1, 1, 1, 1, 1, 2, 2, 2]
# - e.g. Jan 1st, 2015 is Thursday. woy of first 8 days = [1, 1, 1, 1, 2, 2, 2, 2]
# - e.g. Jan 1st, 2021 is Friday. woy of first 8 days = [53, 53, 53, 1, 1, 1, 1, 1]
# - e.g. Jan 1st, 2022 is Saturday. woy of first 8 days = [52, 52, 1, 1, 1, 1, 1, 1]
# - e.g. Jan 1st, 2023 is Sunday. woy of first 8 days = [52, 1, 1, 1, 1, 1, 1, 1]
woy = dt.strftime("%V").astype(int)
doy = dt.dayofyear
dom = dt.day
dow = dt.strftime("%u").astype(int)
str_dow = dt.strftime("%u-%a") # e.g. 1-Mon, 2-Tue, ..., 7-Sun
hour = dt.hour
minute = dt.minute
second = dt.second
# grouped time feature
year_quarter = dt.strftime("%Y-") + quarter.astype(str) # e.g. 2020-1 for March 2020
str_doy = dt.strftime("%Y-%m-%d") # e.g. 2020-03-20 for March 20, 2020
year_month = dt.strftime("%Y-%m") # e.g. 2020-03 for March 2020
month_dom = dt.strftime("%m/%d") # e.g. 02/20 for February 20th
year_woy = dt.strftime("%Y_%V") # e.g. 2020_42 for 42nd week of 2020
year_woy_dow = dt.strftime("%Y_%V_%u") # e.g. 2020_03_6 for Saturday of 3rd week in 2020
woy_dow = dt.strftime("%W_%u") # e.g. 03_6 for Saturday of 3rd week
dow_hr = dt.strftime("%u_%H") # e.g. 4_09 for 9am on Thursday
dow_hr_min = dt.strftime("%u_%H_%M") # e.g. 4_09_10 for 9:10am on Thursday
# iso features https://en.wikipedia.org/wiki/ISO_week_date
# Uses `pd.Index` to avoid overriding the indices in the output df.
year_iso = pd.Index(dt.isocalendar()["year"])
year_woy_iso = pd.Index(year_iso.astype(str) + "_" + dt.strftime("%V"))
year_woy_dow_iso = pd.Index(year_woy_iso + "_" + dt.isocalendar()["day"].astype(str))
# derived time features
tod = hour + (minute / 60.0) + (second / 3600.0)
tow = dow - 1 + (tod / 24.0)
tom = (dom - 1 + (tod / 24.0)) / month_length
toq = (doq - 1 + (tod / 24.0)) / quarter_length
# time of year, continuous, 0.0 to 1.0. e.g. Jan 1, 12 am = 0/365, Jan 2, 12 am = 1/365, ...
# To handle leap years, Feb 28 = 58/365 - 59/365, Feb 29 = 59/365, Mar 1 = 59/365 - 60/365
# offset term is nonzero only in leap years
# doy_offset reduces doy by 1 from from Mar 1st (doy > 60)
doy_offset = (year_length == 366) * 1.0 * (doy > 60)
# tod_offset sets tod to 0 on Feb 29th (doy == 60)
tod_offset = 1 - (year_length == 366) * 1.0 * (doy == 60)
toy = (doy - 1 - doy_offset + (tod / 24.0) * tod_offset) / 365.0
# year of date in continuous time, eg 2018.5 means middle of year 2018
# this is useful for modeling features that do not care about leap year e.g. environmental variables
conti_year = year + (doy - 1 + (tod / 24.0)) / year_length
is_weekend = pd.Series(dow).apply(lambda x: x in [6, 7]).values # weekend indicator
# categorical var with levels (Mon-Thu, Fri, Sat, Sun), could help when training data are sparse.
dow_grouped = pd.Series(str_dow).apply(
lambda x: "1234-MTuWTh" if (x in ["1-Mon", "2-Tue", "3-Wed", "4-Thu"]) else x).values
# growth terms
ct1 = conti_year - conti_year_origin
ct2 = signed_pow(ct1, 2)
ct3 = signed_pow(ct1, 3)
ct_sqrt = signed_pow(ct1, 1/2)
ct_root3 = signed_pow(ct1, 1/3)
# All keys must be added to constants.
features_dict = {
cst.TimeFeaturesEnum.datetime.value: dt,
cst.TimeFeaturesEnum.date.value: date,
cst.TimeFeaturesEnum.year.value: year,
cst.TimeFeaturesEnum.year_length.value: year_length,
cst.TimeFeaturesEnum.quarter.value: quarter,
cst.TimeFeaturesEnum.quarter_start.value: quarter_start,
cst.TimeFeaturesEnum.quarter_length.value: quarter_length,
cst.TimeFeaturesEnum.month.value: month,
cst.TimeFeaturesEnum.month_length.value: month_length,
cst.TimeFeaturesEnum.woy.value: woy,
cst.TimeFeaturesEnum.doy.value: doy,
cst.TimeFeaturesEnum.doq.value: doq,
cst.TimeFeaturesEnum.dom.value: dom,
cst.TimeFeaturesEnum.dow.value: dow,
cst.TimeFeaturesEnum.str_dow.value: str_dow,
cst.TimeFeaturesEnum.str_doy.value: str_doy,
cst.TimeFeaturesEnum.hour.value: hour,
cst.TimeFeaturesEnum.minute.value: minute,
cst.TimeFeaturesEnum.second.value: second,
cst.TimeFeaturesEnum.year_quarter.value: year_quarter,
cst.TimeFeaturesEnum.year_month.value: year_month,
cst.TimeFeaturesEnum.year_woy.value: year_woy,
cst.TimeFeaturesEnum.month_dom.value: month_dom,
cst.TimeFeaturesEnum.year_woy_dow.value: year_woy_dow,
cst.TimeFeaturesEnum.woy_dow.value: woy_dow,
cst.TimeFeaturesEnum.dow_hr.value: dow_hr,
cst.TimeFeaturesEnum.dow_hr_min.value: dow_hr_min,
cst.TimeFeaturesEnum.year_iso.value: year_iso,
cst.TimeFeaturesEnum.year_woy_iso.value: year_woy_iso,
cst.TimeFeaturesEnum.year_woy_dow_iso.value: year_woy_dow_iso,
cst.TimeFeaturesEnum.tod.value: tod,
cst.TimeFeaturesEnum.tow.value: tow,
cst.TimeFeaturesEnum.tom.value: tom,
cst.TimeFeaturesEnum.toq.value: toq,
cst.TimeFeaturesEnum.toy.value: toy,
cst.TimeFeaturesEnum.conti_year.value: conti_year,
cst.TimeFeaturesEnum.is_weekend.value: is_weekend,
cst.TimeFeaturesEnum.dow_grouped.value: dow_grouped,
cst.TimeFeaturesEnum.ct1.value: ct1,
cst.TimeFeaturesEnum.ct2.value: ct2,
cst.TimeFeaturesEnum.ct3.value: ct3,
cst.TimeFeaturesEnum.ct_sqrt.value: ct_sqrt,
cst.TimeFeaturesEnum.ct_root3.value: ct_root3,
}
df = pd.DataFrame(features_dict)
if add_dst_info:
df[cst.TimeFeaturesEnum.us_dst.value] = is_dst_fcn("US/Pacific")(
df[cst.TimeFeaturesEnum.datetime.value])
df[cst.TimeFeaturesEnum.eu_dst.value] = is_dst_fcn("Europe/London")(
df[cst.TimeFeaturesEnum.datetime.value])
return df
The provided code snippet includes necessary dependencies for implementing the `add_groupby_column` function. Write a Python function `def add_groupby_column( df, time_col, groupby_time_feature=None, groupby_sliding_window_size=None, groupby_custom_column=None)` to solve the following problem:
Extracts a column to group by from ``df``. Exactly one of ``groupby_time_feature``, ``groupby_sliding_window_size``, `groupby_custom_column` must be provided. Parameters ---------- df : 'pandas.DataFrame` Contains the univariate time series / forecast time_col : `str` The name of the time column of the univariate time series / forecast groupby_time_feature : `str` or None, optional If provided, groups by a column generated by `~greykite.common.features.timeseries_features.build_time_features_df`. See that function for valid values. groupby_sliding_window_size : `int` or None, optional If provided, sequentially partitions data into groups of size ``groupby_sliding_window_size``. groupby_custom_column : `pandas.Series` or None, optional If provided, groups by this column value. Should be same length as the ``df``. Returns ------- result : `dict` Dictionary with two items: * ``"df"`` : `pandas.DataFrame` ``df`` with a grouping column added. The column can be used to group rows together. * ``"groupby_col"`` : `str` The name of the groupby column added to ``df``. The column name depends on the grouping method: - ``groupby_time_feature`` for ``groupby_time_feature`` - ``{cst.TIME_COL}_downsample`` for ``groupby_sliding_window_size`` - ``groupby_custom_column.name`` for ``groupby_custom_column``.
Here is the function:
def add_groupby_column(
df,
time_col,
groupby_time_feature=None,
groupby_sliding_window_size=None,
groupby_custom_column=None):
"""Extracts a column to group by from ``df``.
Exactly one of ``groupby_time_feature``, ``groupby_sliding_window_size``,
`groupby_custom_column` must be provided.
Parameters
----------
df : 'pandas.DataFrame`
Contains the univariate time series / forecast
time_col : `str`
The name of the time column of the univariate time series / forecast
groupby_time_feature : `str` or None, optional
If provided, groups by a column generated by
`~greykite.common.features.timeseries_features.build_time_features_df`.
See that function for valid values.
groupby_sliding_window_size : `int` or None, optional
If provided, sequentially partitions data into groups of size
``groupby_sliding_window_size``.
groupby_custom_column : `pandas.Series` or None, optional
If provided, groups by this column value.
Should be same length as the ``df``.
Returns
-------
result : `dict`
Dictionary with two items:
* ``"df"`` : `pandas.DataFrame`
``df`` with a grouping column added.
The column can be used to group rows together.
* ``"groupby_col"`` : `str`
The name of the groupby column added to ``df``.
The column name depends on the grouping method:
- ``groupby_time_feature`` for ``groupby_time_feature``
- ``{cst.TIME_COL}_downsample`` for ``groupby_sliding_window_size``
- ``groupby_custom_column.name`` for ``groupby_custom_column``.
"""
# Resets index to support indexing in groupby_sliding_window_size
df = df.copy()
dt = pd.Series(df[time_col].values)
# Determines the groups
is_groupby_time_feature = 1 if groupby_time_feature is not None else 0
is_groupby_sliding_window_size = 1 if groupby_sliding_window_size is not None else 0
is_groupby_custom_column = 1 if groupby_custom_column is not None else 0
if is_groupby_time_feature + is_groupby_sliding_window_size + is_groupby_custom_column != 1:
raise ValueError(
"Exactly one of (groupby_time_feature, groupby_rolling_window_size, groupby_custom_column)"
"must be specified")
groups = None
if is_groupby_time_feature == 1:
# Group by a value derived from the time column
time_features = build_time_features_df(dt, conti_year_origin=min(dt).year)
groups = time_features[groupby_time_feature]
groups.name = groupby_time_feature
elif is_groupby_sliding_window_size == 1:
# Group by sliding window for evaluation over time
index_dates = split_range_into_groups(
n=df.shape[0],
group_size=groupby_sliding_window_size,
which_group_complete="last") # ensures the last group is complete (first group may be partial)
groups = dt[index_dates * groupby_sliding_window_size] # uses first date in each group as grouping value
groups.name = f"{time_col}_downsample"
elif is_groupby_custom_column == 1:
# Group by custom column
groups = groupby_custom_column
groups_col_name = groups.name if groups.name is not None else "groups"
df[groups_col_name] = groups.values
if df.index.name in df.columns:
# Removes ambiguity in case the index name is the same as the newly added column,
# (or an existing column).
df.index.name = None
return {
"df": df,
"groupby_col": groups_col_name
} | Extracts a column to group by from ``df``. Exactly one of ``groupby_time_feature``, ``groupby_sliding_window_size``, `groupby_custom_column` must be provided. Parameters ---------- df : 'pandas.DataFrame` Contains the univariate time series / forecast time_col : `str` The name of the time column of the univariate time series / forecast groupby_time_feature : `str` or None, optional If provided, groups by a column generated by `~greykite.common.features.timeseries_features.build_time_features_df`. See that function for valid values. groupby_sliding_window_size : `int` or None, optional If provided, sequentially partitions data into groups of size ``groupby_sliding_window_size``. groupby_custom_column : `pandas.Series` or None, optional If provided, groups by this column value. Should be same length as the ``df``. Returns ------- result : `dict` Dictionary with two items: * ``"df"`` : `pandas.DataFrame` ``df`` with a grouping column added. The column can be used to group rows together. * ``"groupby_col"`` : `str` The name of the groupby column added to ``df``. The column name depends on the grouping method: - ``groupby_time_feature`` for ``groupby_time_feature`` - ``{cst.TIME_COL}_downsample`` for ``groupby_sliding_window_size`` - ``groupby_custom_column.name`` for ``groupby_custom_column``. |
167,469 | import warnings
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from plotly.colors import DEFAULT_PLOTLY_COLORS
from plotly.subplots import make_subplots
from greykite.common import constants as cst
from greykite.common.features.timeseries_features import build_time_features_df
from greykite.common.logging import LoggingLevelEnum
from greykite.common.logging import log_message
from greykite.common.python_utils import update_dictionary
from greykite.common.viz.colors_utils import get_color_palette
from greykite.common.viz.colors_utils import get_distinct_colors
The provided code snippet includes necessary dependencies for implementing the `grouping_evaluation` function. Write a Python function `def grouping_evaluation( df, groupby_col, grouping_func, grouping_func_name)` to solve the following problem:
Groups ``df`` and evaluates a function on each group. The function takes a `pandas.DataFrame` and returns a scalar. Parameters ---------- df : `pandas.DataFrame` Input data. For example, univariate time series, or forecast result. Contains ``groupby_col`` and columns to apply ``grouping_func`` on. groupby_col : `str` Column name in ``df`` to group by. grouping_func : `callable` Function that is applied to each group via `pandas.groupBy.apply`. Signature (grp: `pandas.DataFrame`) -> aggregated value: `float`. grouping_func_name : `str` What to call the output column generated by ``grouping_func``. Returns ------- grouped_df : `pandas.DataFrame` Dataframe with ``grouping_func`` evaluated on each level of ``df[groupby_col]``. Contains two columns: - ``groupby_col``: The groupby value - ``grouping_func_name``: The output of ``grouping_func`` on the group
Here is the function:
def grouping_evaluation(
df,
groupby_col,
grouping_func,
grouping_func_name):
"""Groups ``df`` and evaluates a function on each group.
The function takes a `pandas.DataFrame` and returns a scalar.
Parameters
----------
df : `pandas.DataFrame`
Input data. For example, univariate time series, or forecast result.
Contains ``groupby_col`` and columns to apply ``grouping_func`` on.
groupby_col : `str`
Column name in ``df`` to group by.
grouping_func : `callable`
Function that is applied to each group via `pandas.groupBy.apply`.
Signature (grp: `pandas.DataFrame`) -> aggregated value: `float`.
grouping_func_name : `str`
What to call the output column generated by ``grouping_func``.
Returns
-------
grouped_df : `pandas.DataFrame`
Dataframe with ``grouping_func`` evaluated on each level of ``df[groupby_col]``.
Contains two columns:
- ``groupby_col``: The groupby value
- ``grouping_func_name``: The output of ``grouping_func`` on the group
"""
grouped_df = (df
.groupby(groupby_col)
.apply(grouping_func)
.reset_index()
.rename({0: grouping_func_name}, axis=1))
return grouped_df | Groups ``df`` and evaluates a function on each group. The function takes a `pandas.DataFrame` and returns a scalar. Parameters ---------- df : `pandas.DataFrame` Input data. For example, univariate time series, or forecast result. Contains ``groupby_col`` and columns to apply ``grouping_func`` on. groupby_col : `str` Column name in ``df`` to group by. grouping_func : `callable` Function that is applied to each group via `pandas.groupBy.apply`. Signature (grp: `pandas.DataFrame`) -> aggregated value: `float`. grouping_func_name : `str` What to call the output column generated by ``grouping_func``. Returns ------- grouped_df : `pandas.DataFrame` Dataframe with ``grouping_func`` evaluated on each level of ``df[groupby_col]``. Contains two columns: - ``groupby_col``: The groupby value - ``grouping_func_name``: The output of ``grouping_func`` on the group |
167,470 | import warnings
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from plotly.colors import DEFAULT_PLOTLY_COLORS
from plotly.subplots import make_subplots
from greykite.common import constants as cst
from greykite.common.features.timeseries_features import build_time_features_df
from greykite.common.logging import LoggingLevelEnum
from greykite.common.logging import log_message
from greykite.common.python_utils import update_dictionary
from greykite.common.viz.colors_utils import get_color_palette
from greykite.common.viz.colors_utils import get_distinct_colors
class LoggingLevelEnum(Enum):
"""Valid types of logging levels available to use."""
CRITICAL = 50
ERROR = 40
WARNING = 30
INFO = 20
DEBUG = 10
NOTSET = 0
def log_message(message, level=LoggingLevelEnum.INFO):
"""Adds a message to logger.
Parameters
----------
message : `any`
The message to be added to logger.
level : `Enum`
One of the levels in the `~greykite.common.enums.LoggingLevelEnum`.
"""
if level.name not in list(LoggingLevelEnum.__members__):
raise ValueError(f"{level} not found, it must be a member of the LoggingLevelEnum class.")
logger.log(level.value, message)
The provided code snippet includes necessary dependencies for implementing the `flexible_grouping_evaluation` function. Write a Python function `def flexible_grouping_evaluation( df, map_func_dict=None, groupby_col=None, agg_kwargs=None, extend_col_names=True, unpack_list=True, list_names_dict=None)` to solve the following problem:
Flexible aggregation. Generates additional columns for evaluation via ``map_func_dict``, groups by ``groupby_col``, then aggregates according to ``agg_kwargs``. This function calls `pandas.DataFrame.apply` and `pandas.core.groupby.DataFrameGroupBy.agg` internally. Parameters ---------- df : `pandas.DataFrame` DataFrame to transform / aggregate map_func_dict : `dict` [`str`, `callable`] or None, default None Row-wise transformation functions to create new columns. If None, no new columns are added. key: new column name value: row-wise function to apply to ``df`` to generate the column value. Signature (row: `pandas.DataFrame`) -> transformed value: `float`. For example:: map_func_dict = { "residual": lambda row: row["predicted"] - row["actual"], "squared_error": lambda row: (row["predicted"] - row["actual"])**2 } groupby_col : `str` or None, default None Which column to group by. Can be in ``df`` or generated by ``map_func_dict``. If None, no grouping or aggregation is done. agg_kwargs : `dict` or None, default None Passed as keyword args to `pandas.core.groupby.DataFrameGroupBy.aggregate` after creating new columns and grouping by ``groupby_col``. Must be provided if ``groupby_col is not None``. To fully customize output column names, pass a dictionary as shown below. For example:: # Example 1, named aggregation to explicitly name output columns. # Assume ``df`` contains ``abs_percent_err``, ``abs_err`` columns. # Output columns are "MedAPE", "MAPE", "MAE", etc. in a single level index. from functools import partial agg_kwargs = { # output column name: (column to aggregate, aggregation function) "MedAPE": pd.NamedAgg(column="abs_percent_err", aggfunc=np.nanmedian), "MAPE": pd.NamedAgg(column="abs_percent_err", aggfunc=np.nanmean), "MAE": pd.NamedAgg(column="abs_err", aggfunc=np.nanmean), "q95_abs_err": pd.NamedAgg(column="abs_err", aggfunc=partial(np.nanquantile, q=0.95)), "q05_abs_err": pd.NamedAgg(column="abs_err", aggfunc=partial(np.nanquantile, q=0.05)), } # Example 2, multi-level aggregation using `func` parameter # to `pandas.core.groupby.DataFrameGroupBy.aggregate`. # Assume ``df`` contains ``y1``, ``y2`` columns. agg_kwargs = { "func": { "y1": [np.nanmedian, np.nanmean], "y2": [np.nanmedian, np.nanmax], } } # `extend_col_names` controls the output column names extend_col_names = True # output columns are "y1_nanmean", "y1_nanmedian", "y2_nanmean", "y2_nanmax" extend_col_names = False # output columns are "nanmean", "nanmedian", "nanmean", "nanmax" extend_col_names : `bool` or None, default True How to flatten index after aggregation. In some cases, the column index after aggregation is a multi-index. This parameter controls how to flatten an index with 2 levels to 1 level. - If None, the index is not flattened. - If True, column name is a composite: ``{index0}_{index1}`` Use this option if index1 is not unique. - If False, column name is simply ``{index1}`` Ignored if the ColumnIndex after aggregation has only one level (e.g. if named aggregation is used in ``agg_kwargs``). unpack_list : `bool`, default True Whether to unpack (flatten) columns that contain list/tuple after aggregation, to create one column per element of the list/tuple. If True, ``list_names_dict`` can be used to rename the unpacked columns. list_names_dict : `dict` [`str`, `list` [`str`]] or None, default None If ``unpack_list`` is True, this dictionary can optionally be used to rename the unpacked columns. - Key = column name after aggregation, before upacking. E.g. ``{index0}_{index1}`` or ``{index1}`` depending on ``extend_col_names``. - Value = list of names to use for the unpacked columns. Length must match the length of the lists contained in the column. If a particular list/tuple column is not found in this dictionary, appends 0, 1, 2, ..., n-1 to the original column name, where n = list length. For example, if the column contains a tuple of length 4 corresponding to quantiles 0.1, 0.25, 0.75, 0.9, then the following would be appropriate:: aggfunc = lambda grp: partial(np.nanquantile, q=[0.1, 0.25, 0.75, 0.9])(grp).tolist() agg_kwargs = { "value_Q": pd.NamedAgg(column="value", aggfunc=aggfunc) } list_names_dict = { # the key is the name of the unpacked column "value_Q" : ["Q0.10", "Q0.25", "Q0.75", "Q0.90"] } # Output columns are "Q0.10", "Q0.25", "Q0.75", "Q0.90" # In this example, if list_names_dict=None, the default output column names # would be: "value_Q0", "value_Q1", "value_Q2", "value_Q3" Returns ------- df_transformed : `pandas.DataFrame` df after transformation and optional aggregation. If ``groupby_col`` is None, returns ``df`` with additional columns as the keys in ``map_func_dict``. Otherwise, ``df`` is grouped by ``groupby_col`` and this becomes the index. Columns are determined by ``agg_kwargs`` and ``extend_col_names``.
Here is the function:
def flexible_grouping_evaluation(
df,
map_func_dict=None,
groupby_col=None,
agg_kwargs=None,
extend_col_names=True,
unpack_list=True,
list_names_dict=None):
"""Flexible aggregation. Generates additional columns for evaluation via
``map_func_dict``, groups by ``groupby_col``, then aggregates according
to ``agg_kwargs``.
This function calls `pandas.DataFrame.apply` and
`pandas.core.groupby.DataFrameGroupBy.agg` internally.
Parameters
----------
df : `pandas.DataFrame`
DataFrame to transform / aggregate
map_func_dict : `dict` [`str`, `callable`] or None, default None
Row-wise transformation functions to create new columns.
If None, no new columns are added.
key: new column name
value: row-wise function to apply to ``df`` to generate the column value.
Signature (row: `pandas.DataFrame`) -> transformed value: `float`.
For example::
map_func_dict = {
"residual": lambda row: row["predicted"] - row["actual"],
"squared_error": lambda row: (row["predicted"] - row["actual"])**2
}
groupby_col : `str` or None, default None
Which column to group by.
Can be in ``df`` or generated by ``map_func_dict``.
If None, no grouping or aggregation is done.
agg_kwargs : `dict` or None, default None
Passed as keyword args to `pandas.core.groupby.DataFrameGroupBy.aggregate` after creating
new columns and grouping by ``groupby_col``. Must be provided if ``groupby_col is not None``.
To fully customize output column names, pass a dictionary as shown below.
For example::
# Example 1, named aggregation to explicitly name output columns.
# Assume ``df`` contains ``abs_percent_err``, ``abs_err`` columns.
# Output columns are "MedAPE", "MAPE", "MAE", etc. in a single level index.
from functools import partial
agg_kwargs = {
# output column name: (column to aggregate, aggregation function)
"MedAPE": pd.NamedAgg(column="abs_percent_err", aggfunc=np.nanmedian),
"MAPE": pd.NamedAgg(column="abs_percent_err", aggfunc=np.nanmean),
"MAE": pd.NamedAgg(column="abs_err", aggfunc=np.nanmean),
"q95_abs_err": pd.NamedAgg(column="abs_err", aggfunc=partial(np.nanquantile, q=0.95)),
"q05_abs_err": pd.NamedAgg(column="abs_err", aggfunc=partial(np.nanquantile, q=0.05)),
}
# Example 2, multi-level aggregation using `func` parameter
# to `pandas.core.groupby.DataFrameGroupBy.aggregate`.
# Assume ``df`` contains ``y1``, ``y2`` columns.
agg_kwargs = {
"func": {
"y1": [np.nanmedian, np.nanmean],
"y2": [np.nanmedian, np.nanmax],
}
}
# `extend_col_names` controls the output column names
extend_col_names = True # output columns are "y1_nanmean", "y1_nanmedian", "y2_nanmean", "y2_nanmax"
extend_col_names = False # output columns are "nanmean", "nanmedian", "nanmean", "nanmax"
extend_col_names : `bool` or None, default True
How to flatten index after aggregation.
In some cases, the column index after aggregation is a multi-index.
This parameter controls how to flatten an index with 2 levels to 1 level.
- If None, the index is not flattened.
- If True, column name is a composite: ``{index0}_{index1}``
Use this option if index1 is not unique.
- If False, column name is simply ``{index1}``
Ignored if the ColumnIndex after aggregation has only one level (e.g.
if named aggregation is used in ``agg_kwargs``).
unpack_list : `bool`, default True
Whether to unpack (flatten) columns that contain list/tuple after aggregation,
to create one column per element of the list/tuple.
If True, ``list_names_dict`` can be used to rename the unpacked columns.
list_names_dict : `dict` [`str`, `list` [`str`]] or None, default None
If ``unpack_list`` is True, this dictionary can optionally be
used to rename the unpacked columns.
- Key = column name after aggregation, before upacking.
E.g. ``{index0}_{index1}`` or ``{index1}`` depending on ``extend_col_names``.
- Value = list of names to use for the unpacked columns. Length must match
the length of the lists contained in the column.
If a particular list/tuple column is not found in this dictionary, appends
0, 1, 2, ..., n-1 to the original column name, where n = list length.
For example, if the column contains a tuple of length 4 corresponding to
quantiles 0.1, 0.25, 0.75, 0.9, then the following would be appropriate::
aggfunc = lambda grp: partial(np.nanquantile, q=[0.1, 0.25, 0.75, 0.9])(grp).tolist()
agg_kwargs = {
"value_Q": pd.NamedAgg(column="value", aggfunc=aggfunc)
}
list_names_dict = {
# the key is the name of the unpacked column
"value_Q" : ["Q0.10", "Q0.25", "Q0.75", "Q0.90"]
}
# Output columns are "Q0.10", "Q0.25", "Q0.75", "Q0.90"
# In this example, if list_names_dict=None, the default output column names
# would be: "value_Q0", "value_Q1", "value_Q2", "value_Q3"
Returns
-------
df_transformed : `pandas.DataFrame`
df after transformation and optional aggregation.
If ``groupby_col`` is None, returns ``df`` with additional columns as the keys in ``map_func_dict``.
Otherwise, ``df`` is grouped by ``groupby_col`` and this becomes the index. Columns
are determined by ``agg_kwargs`` and ``extend_col_names``.
"""
if groupby_col and not agg_kwargs:
raise ValueError("Must specify `agg_kwargs` if grouping is requested via `groupby_col`.")
if agg_kwargs and not groupby_col:
log_message(f"`agg_kwargs` is ignored because `groupby_col` is None. "
f"Specify `groupby_col` to allow aggregation.", LoggingLevelEnum.WARNING)
df = df.copy()
if map_func_dict is not None:
for col_name, func in map_func_dict.items():
df[col_name] = df.apply(func, axis=1)
if groupby_col is not None:
groups = df.groupby(groupby_col)
with warnings.catch_warnings():
# Ignores pandas FutureWarning. Use NamedAgg in pandas 0.25.+
warnings.filterwarnings(
"ignore",
message="using a dict with renaming is deprecated",
category=FutureWarning)
df_transformed = groups.agg(**agg_kwargs)
if extend_col_names is not None and df_transformed.columns.nlevels > 1:
# Flattens multi-level column index
if extend_col_names:
# By concatenating names
df_transformed.columns = ["_".join(col).strip("_") for col in df_transformed.columns]
else:
# By using level 1 names
df_transformed.columns = list(df_transformed.columns.get_level_values(1))
if np.any(df_transformed.columns.duplicated()):
warnings.warn("Column names are not unique. Use `extend_col_names=True` "
"to uniquely identify every column.")
else:
# No grouping is requested
df_transformed = df
if unpack_list and df_transformed.shape[0] > 0:
# Identifies the columns that contain list elements
which_list_cols = df_transformed.iloc[0].apply(lambda x: isinstance(x, (list, tuple)))
list_cols = list(which_list_cols[which_list_cols].index)
for col in list_cols:
if isinstance(df_transformed[col], pd.DataFrame):
warnings.warn(f"Skipping list unpacking for `{col}`. There are multiple columns "
f"with this name. Make sure column names are unique to enable unpacking.")
continue
# Unpacks the column, creating one column for each list entry
list_df = pd.DataFrame(df_transformed[col].to_list())
n_cols = list_df.shape[1]
# Adds column names
if list_names_dict is not None and col in list_names_dict:
found_length = len(list_names_dict[col])
if found_length != n_cols:
raise ValueError(
f"list_names_dict['{col}'] has length {found_length}, "
f"but there are {n_cols} columns to name. Example row(s):\n"
f"{list_df.head(2)}")
list_df.columns = [f"{list_names_dict.get(col)[i]}" for i in range(n_cols)]
else:
list_df.columns = [f"{col}{i}" for i in range(n_cols)]
# replaces original column with new ones
list_df.index = df_transformed.index
del df_transformed[col]
df_transformed = pd.concat([df_transformed, list_df], axis=1)
if list_names_dict:
unused_names = sorted(list(set(list_names_dict.keys()) - set(list_cols)))
if len(unused_names) > 0:
warnings.warn("These names from `list_names_dict` are not used, because the "
"column (key) is not found in the dataframe after aggregation:\n"
f"{unused_names}.\nAvailable columns are:\n"
f"{list_cols}.")
return df_transformed | Flexible aggregation. Generates additional columns for evaluation via ``map_func_dict``, groups by ``groupby_col``, then aggregates according to ``agg_kwargs``. This function calls `pandas.DataFrame.apply` and `pandas.core.groupby.DataFrameGroupBy.agg` internally. Parameters ---------- df : `pandas.DataFrame` DataFrame to transform / aggregate map_func_dict : `dict` [`str`, `callable`] or None, default None Row-wise transformation functions to create new columns. If None, no new columns are added. key: new column name value: row-wise function to apply to ``df`` to generate the column value. Signature (row: `pandas.DataFrame`) -> transformed value: `float`. For example:: map_func_dict = { "residual": lambda row: row["predicted"] - row["actual"], "squared_error": lambda row: (row["predicted"] - row["actual"])**2 } groupby_col : `str` or None, default None Which column to group by. Can be in ``df`` or generated by ``map_func_dict``. If None, no grouping or aggregation is done. agg_kwargs : `dict` or None, default None Passed as keyword args to `pandas.core.groupby.DataFrameGroupBy.aggregate` after creating new columns and grouping by ``groupby_col``. Must be provided if ``groupby_col is not None``. To fully customize output column names, pass a dictionary as shown below. For example:: # Example 1, named aggregation to explicitly name output columns. # Assume ``df`` contains ``abs_percent_err``, ``abs_err`` columns. # Output columns are "MedAPE", "MAPE", "MAE", etc. in a single level index. from functools import partial agg_kwargs = { # output column name: (column to aggregate, aggregation function) "MedAPE": pd.NamedAgg(column="abs_percent_err", aggfunc=np.nanmedian), "MAPE": pd.NamedAgg(column="abs_percent_err", aggfunc=np.nanmean), "MAE": pd.NamedAgg(column="abs_err", aggfunc=np.nanmean), "q95_abs_err": pd.NamedAgg(column="abs_err", aggfunc=partial(np.nanquantile, q=0.95)), "q05_abs_err": pd.NamedAgg(column="abs_err", aggfunc=partial(np.nanquantile, q=0.05)), } # Example 2, multi-level aggregation using `func` parameter # to `pandas.core.groupby.DataFrameGroupBy.aggregate`. # Assume ``df`` contains ``y1``, ``y2`` columns. agg_kwargs = { "func": { "y1": [np.nanmedian, np.nanmean], "y2": [np.nanmedian, np.nanmax], } } # `extend_col_names` controls the output column names extend_col_names = True # output columns are "y1_nanmean", "y1_nanmedian", "y2_nanmean", "y2_nanmax" extend_col_names = False # output columns are "nanmean", "nanmedian", "nanmean", "nanmax" extend_col_names : `bool` or None, default True How to flatten index after aggregation. In some cases, the column index after aggregation is a multi-index. This parameter controls how to flatten an index with 2 levels to 1 level. - If None, the index is not flattened. - If True, column name is a composite: ``{index0}_{index1}`` Use this option if index1 is not unique. - If False, column name is simply ``{index1}`` Ignored if the ColumnIndex after aggregation has only one level (e.g. if named aggregation is used in ``agg_kwargs``). unpack_list : `bool`, default True Whether to unpack (flatten) columns that contain list/tuple after aggregation, to create one column per element of the list/tuple. If True, ``list_names_dict`` can be used to rename the unpacked columns. list_names_dict : `dict` [`str`, `list` [`str`]] or None, default None If ``unpack_list`` is True, this dictionary can optionally be used to rename the unpacked columns. - Key = column name after aggregation, before upacking. E.g. ``{index0}_{index1}`` or ``{index1}`` depending on ``extend_col_names``. - Value = list of names to use for the unpacked columns. Length must match the length of the lists contained in the column. If a particular list/tuple column is not found in this dictionary, appends 0, 1, 2, ..., n-1 to the original column name, where n = list length. For example, if the column contains a tuple of length 4 corresponding to quantiles 0.1, 0.25, 0.75, 0.9, then the following would be appropriate:: aggfunc = lambda grp: partial(np.nanquantile, q=[0.1, 0.25, 0.75, 0.9])(grp).tolist() agg_kwargs = { "value_Q": pd.NamedAgg(column="value", aggfunc=aggfunc) } list_names_dict = { # the key is the name of the unpacked column "value_Q" : ["Q0.10", "Q0.25", "Q0.75", "Q0.90"] } # Output columns are "Q0.10", "Q0.25", "Q0.75", "Q0.90" # In this example, if list_names_dict=None, the default output column names # would be: "value_Q0", "value_Q1", "value_Q2", "value_Q3" Returns ------- df_transformed : `pandas.DataFrame` df after transformation and optional aggregation. If ``groupby_col`` is None, returns ``df`` with additional columns as the keys in ``map_func_dict``. Otherwise, ``df`` is grouped by ``groupby_col`` and this becomes the index. Columns are determined by ``agg_kwargs`` and ``extend_col_names``. |
167,471 | import warnings
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from plotly.colors import DEFAULT_PLOTLY_COLORS
from plotly.subplots import make_subplots
from greykite.common import constants as cst
from greykite.common.features.timeseries_features import build_time_features_df
from greykite.common.logging import LoggingLevelEnum
from greykite.common.logging import log_message
from greykite.common.python_utils import update_dictionary
from greykite.common.viz.colors_utils import get_color_palette
from greykite.common.viz.colors_utils import get_distinct_colors
def get_distinct_colors(
num_colors,
opacity=0.95):
"""Gets ``num_colors`` most distinguishable colors.
Uses color maps "tab10", "tab20" or "viridis" depending on the
number of colors needed.
See above color pallettes here:
https://matplotlib.org/stable/tutorials/colors/colormaps.html
Parameters
----------
num_colors : `int`
The number of colors needed.
opacity : `float`, default 0.95
The opacity of the color. This has to be a number between 0 and 1.
Returns
-------
colors : `list` [`str`]
A list of string colors in RGB.
"""
if opacity < 0 or opacity > 1:
raise ValueError("Opacity must be between 0 and 1.")
if num_colors <= 10:
colors = get_cmap("tab10").colors
elif num_colors <= 20:
colors = get_cmap("tab20").colors
elif num_colors <= 256:
# Removes default opacity by ":3".
colors = get_cmap(name="viridis")(np.linspace(0, 1, num_colors))[:, :3]
else:
raise ValueError("The maximum number of colors is 256.")
result = []
for color in colors:
# Converts the color components to "rgba" format
color = f"rgba{int(color[0] * 255), int(color[1] * 255), int(color[2] * 255), opacity}"
result.append(color)
return result[:num_colors]
The provided code snippet includes necessary dependencies for implementing the `plot_dual_axis_figure` function. Write a Python function `def plot_dual_axis_figure( df, x_col, y_left_col, y_right_col, grouping_col=None, xlabel=None, ylabel_left=None, ylabel_right=None, title=None, y_left_linestyle="solid", y_right_linestyle="dash", opacity=0.9, axis_font_size=18, title_font_size=20, x_range=None, y_left_range=None, y_right_range=None, x_tick_format=None, y_left_tick_format=None, y_right_tick_format=None, x_hover_format=None, y_left_hover_format=None, y_right_hover_format=None, group_color_dict=None)` to solve the following problem:
Generic function to plot a dual y-axis plot. The x-axis is specified by ``x_col``. The left and right y-axes are specified by ``y_left_col`` and ``y_right_col`` respectively. If ``grouping_col`` is specified, then multiple pairs of curves are drawn, one for each level in ``grouping_col``. Parameters ---------- df : `pandas.DataFrame` The input dataframe. Must contain the columns ``x_col``, ``y_left_col`` and ``y_right_col``. If ``grouping_col`` is not None, it must also contain the ``grouping_col`` column. For example, the dataframe could look like this. +-----------+----------------+-----------------+------------------+ | ``x_col`` | ``y_left_col`` | ``y_right_col`` | ``grouping_col`` | +===========+================+=================+==================+ | 1.10 | 20.12 | 0.21 | "A" | +-----------+----------------+-----------------+------------------+ | 1.40 | 40.31 | 0.43 | "A" | +-----------+----------------+-----------------+------------------+ | 1.23 | 63.21 | NaN | "B" | +-----------+----------------+-----------------+------------------+ | 1.54 | 10.31 | 0.12 | "B" | +-----------+----------------+-----------------+------------------+ | ... | ... | ... | ... | +-----------+----------------+-----------------+------------------+ x_col : `str` The column name of the column in ``df`` to be used for the x-axis. y_left_col : `str` The column name of the column in ``df`` to be used for the left y-axis. y_right_col : `str` The column name of the column in ``df`` to be used for the right y-axis. grouping_col : `str` or None, default None Name of the grouping column in ``df`` to be used for overlaying curves for each level in ``grouping_col``. xlabel : `str` or None, default None Name for the x-axis label. If it is `None`, then it is set to be ``x_col``. ylabel_left : `str` or None, default None Name for the left y-axis label. If it is `None`, then it is set to be ``y_left_col``. ylabel_right : `str` or None, default None Name for the right y-axis label. If it is `None`, then it is set to be ``y_right_col``. title : `str` or None, default None The title for the plot. y_left_linestyle : `str`, default "solid" Line style for the left y-axis curve. y_right_linestyle : `str`, default "dash" Line style for the right y-axis curve. opacity : `float`, default 0.9 The opacity of the colors. This has to be a number between 0 and 1. axis_font_size : `int`, default 18 The size of the axis fonts. title_font_size : `int`, default 20 The size of the title fonts. x_range : `list` or None, default None Range of the x-axis. y_left_range : `list` or None, default None Range of the left y-axis. y_right_range : `list` or None, default None Range of the right y-axis. x_tick_format : `str` or None, default None Format of the ticks on the x-axis. y_left_tick_format : `str` or None, default None Format of the ticks on the left y-axis. y_right_tick_format : `str` or None, default None Format of the ticks on the right y-axis. x_hover_format : `str` or None, default None Format of the values when hovering for the x-axis. y_left_hover_format : `str` or None, default None Format of the values when hovering for the left y-axis. y_right_hover_format : `str` or None, default None Format of the values when hovering for the right y-axis. group_color_dict : `dict` [`str`, `str`] or None, default None. Dictionary with a mapping from levels within the ``grouping_col`` and a specified color. The keys are the levels in ``grouping_col`` and the values are a specified color. If ``group_color_dict`` is `None`, the colors are generated using the function `greykite.common.viz.colors_utils.get_distinct_colors`. Returns ------- fig : `plotly.graph_objects.Figure` Dual y-axes plot.
Here is the function:
def plot_dual_axis_figure(
df,
x_col,
y_left_col,
y_right_col,
grouping_col=None,
xlabel=None,
ylabel_left=None,
ylabel_right=None,
title=None,
y_left_linestyle="solid",
y_right_linestyle="dash",
opacity=0.9,
axis_font_size=18,
title_font_size=20,
x_range=None,
y_left_range=None,
y_right_range=None,
x_tick_format=None,
y_left_tick_format=None,
y_right_tick_format=None,
x_hover_format=None,
y_left_hover_format=None,
y_right_hover_format=None,
group_color_dict=None):
"""Generic function to plot a dual y-axis plot. The x-axis is specified by ``x_col``.
The left and right y-axes are specified by ``y_left_col`` and ``y_right_col`` respectively.
If ``grouping_col`` is specified, then multiple pairs of curves are drawn, one for each level in ``grouping_col``.
Parameters
----------
df : `pandas.DataFrame`
The input dataframe. Must contain the columns ``x_col``, ``y_left_col`` and ``y_right_col``.
If ``grouping_col`` is not None, it must also contain the ``grouping_col`` column.
For example, the dataframe could look like this.
+-----------+----------------+-----------------+------------------+
| ``x_col`` | ``y_left_col`` | ``y_right_col`` | ``grouping_col`` |
+===========+================+=================+==================+
| 1.10 | 20.12 | 0.21 | "A" |
+-----------+----------------+-----------------+------------------+
| 1.40 | 40.31 | 0.43 | "A" |
+-----------+----------------+-----------------+------------------+
| 1.23 | 63.21 | NaN | "B" |
+-----------+----------------+-----------------+------------------+
| 1.54 | 10.31 | 0.12 | "B" |
+-----------+----------------+-----------------+------------------+
| ... | ... | ... | ... |
+-----------+----------------+-----------------+------------------+
x_col : `str`
The column name of the column in ``df`` to be used for the x-axis.
y_left_col : `str`
The column name of the column in ``df`` to be used for the left y-axis.
y_right_col : `str`
The column name of the column in ``df`` to be used for the right y-axis.
grouping_col : `str` or None, default None
Name of the grouping column in ``df`` to be used for overlaying curves for each level in ``grouping_col``.
xlabel : `str` or None, default None
Name for the x-axis label. If it is `None`, then it is set to be ``x_col``.
ylabel_left : `str` or None, default None
Name for the left y-axis label. If it is `None`, then it is set to be ``y_left_col``.
ylabel_right : `str` or None, default None
Name for the right y-axis label. If it is `None`, then it is set to be ``y_right_col``.
title : `str` or None, default None
The title for the plot.
y_left_linestyle : `str`, default "solid"
Line style for the left y-axis curve.
y_right_linestyle : `str`, default "dash"
Line style for the right y-axis curve.
opacity : `float`, default 0.9
The opacity of the colors. This has to be a number between 0 and 1.
axis_font_size : `int`, default 18
The size of the axis fonts.
title_font_size : `int`, default 20
The size of the title fonts.
x_range : `list` or None, default None
Range of the x-axis.
y_left_range : `list` or None, default None
Range of the left y-axis.
y_right_range : `list` or None, default None
Range of the right y-axis.
x_tick_format : `str` or None, default None
Format of the ticks on the x-axis.
y_left_tick_format : `str` or None, default None
Format of the ticks on the left y-axis.
y_right_tick_format : `str` or None, default None
Format of the ticks on the right y-axis.
x_hover_format : `str` or None, default None
Format of the values when hovering for the x-axis.
y_left_hover_format : `str` or None, default None
Format of the values when hovering for the left y-axis.
y_right_hover_format : `str` or None, default None
Format of the values when hovering for the right y-axis.
group_color_dict : `dict` [`str`, `str`] or None, default None.
Dictionary with a mapping from levels within the ``grouping_col`` and a specified color.
The keys are the levels in ``grouping_col`` and the values are a specified color.
If ``group_color_dict`` is `None`, the colors are generated using the function
`greykite.common.viz.colors_utils.get_distinct_colors`.
Returns
-------
fig : `plotly.graph_objects.Figure`
Dual y-axes plot.
"""
if any([col not in df.columns for col in [x_col, y_left_col, y_right_col]]):
raise ValueError(f"`df` must contain the columns: '{x_col}', '{y_left_col}' and '{y_right_col}'!")
# If no custom labels are given, we simply use the names of the passed columns.
if xlabel is None:
xlabel = x_col
if ylabel_left is None:
ylabel_left = y_left_col
if ylabel_right is None:
ylabel_right = y_right_col
# Stores the data for the left and right curves.
y_left_data = []
y_right_data = []
# Creates the curve(s).
if grouping_col is None: # No `grouping_col`
# In this case, only one color is needed.
color = get_distinct_colors(num_colors=1, opacity=opacity)[0]
df = df.reset_index(drop=True).sort_values(x_col)
# Left lines.
line_left = go.Scatter(
name=ylabel_left,
x=df[x_col].tolist(),
y=df[y_left_col].tolist(),
showlegend=True,
line=dict(
dash=y_left_linestyle,
color=color))
y_left_data.append(line_left)
# Right lines.
line_right = go.Scatter(
name=ylabel_right,
x=df[x_col].tolist(),
y=df[y_right_col].tolist(),
showlegend=True,
line=dict(
dash=y_right_linestyle,
color=color))
y_right_data.append(line_right)
else: # `grouping_col` is not None.
# Gets the levels for the specified `grouping_col`.
levels = df.groupby(grouping_col).groups
# Assigns colors to levels if not specified.
if group_color_dict is None:
color_list = get_distinct_colors(
num_colors=len(levels),
opacity=opacity)
group_color_dict = {level: color_list[i] for i, level in enumerate(levels.keys())}
# Generates curves for each level.
for level, indices in levels.items():
df_subset = df.loc[indices].reset_index(drop=True).sort_values(x_col)
# Left lines.
line_left = go.Scatter(
name=ylabel_left,
legendgroup=f"{grouping_col} = {level}",
legendgrouptitle_text=f"{grouping_col} = {level}",
x=df_subset[x_col].tolist(),
y=df_subset[y_left_col].tolist(),
showlegend=True,
line=dict(
dash=y_left_linestyle,
color=group_color_dict[level]))
y_left_data.append(line_left)
# Right lines.
line_right = go.Scatter(
name=ylabel_right,
legendgroup=f"{grouping_col} = {level}",
legendgrouptitle_text=f"{grouping_col} = {level}",
x=df_subset[x_col].tolist(),
y=df_subset[y_right_col].tolist(),
showlegend=True,
line=dict(
dash=y_right_linestyle,
color=group_color_dict[level]))
y_right_data.append(line_right)
fig = make_subplots(specs=[[{"secondary_y": True}]])
for line_left, line_right in zip(y_left_data, y_right_data):
fig.add_trace(line_left, secondary_y=False)
fig.add_trace(line_right, secondary_y=True)
# Updates figure layout.
fig.update_layout(
title_text=title,
titlefont=dict(size=title_font_size),
autosize=False,
width=1000,
height=800,
hovermode="x")
# Updates x-axis.
fig.update_xaxes(
title=xlabel,
titlefont=dict(size=axis_font_size),
range=x_range,
tickfont_size=axis_font_size,
tickformat=x_tick_format,
hoverformat=x_hover_format),
# Updates the left y-axis.
fig.update_yaxes(
title_text=ylabel_left,
secondary_y=False,
titlefont=dict(size=axis_font_size),
range=y_left_range,
tickfont_size=axis_font_size,
tickformat=y_left_tick_format,
hoverformat=y_left_hover_format)
# Updates the right y-axis.
fig.update_yaxes(
title_text=ylabel_right,
secondary_y=True,
titlefont=dict(size=axis_font_size),
range=y_right_range,
tickfont_size=axis_font_size,
tickformat=y_right_tick_format,
hoverformat=y_right_hover_format)
return fig | Generic function to plot a dual y-axis plot. The x-axis is specified by ``x_col``. The left and right y-axes are specified by ``y_left_col`` and ``y_right_col`` respectively. If ``grouping_col`` is specified, then multiple pairs of curves are drawn, one for each level in ``grouping_col``. Parameters ---------- df : `pandas.DataFrame` The input dataframe. Must contain the columns ``x_col``, ``y_left_col`` and ``y_right_col``. If ``grouping_col`` is not None, it must also contain the ``grouping_col`` column. For example, the dataframe could look like this. +-----------+----------------+-----------------+------------------+ | ``x_col`` | ``y_left_col`` | ``y_right_col`` | ``grouping_col`` | +===========+================+=================+==================+ | 1.10 | 20.12 | 0.21 | "A" | +-----------+----------------+-----------------+------------------+ | 1.40 | 40.31 | 0.43 | "A" | +-----------+----------------+-----------------+------------------+ | 1.23 | 63.21 | NaN | "B" | +-----------+----------------+-----------------+------------------+ | 1.54 | 10.31 | 0.12 | "B" | +-----------+----------------+-----------------+------------------+ | ... | ... | ... | ... | +-----------+----------------+-----------------+------------------+ x_col : `str` The column name of the column in ``df`` to be used for the x-axis. y_left_col : `str` The column name of the column in ``df`` to be used for the left y-axis. y_right_col : `str` The column name of the column in ``df`` to be used for the right y-axis. grouping_col : `str` or None, default None Name of the grouping column in ``df`` to be used for overlaying curves for each level in ``grouping_col``. xlabel : `str` or None, default None Name for the x-axis label. If it is `None`, then it is set to be ``x_col``. ylabel_left : `str` or None, default None Name for the left y-axis label. If it is `None`, then it is set to be ``y_left_col``. ylabel_right : `str` or None, default None Name for the right y-axis label. If it is `None`, then it is set to be ``y_right_col``. title : `str` or None, default None The title for the plot. y_left_linestyle : `str`, default "solid" Line style for the left y-axis curve. y_right_linestyle : `str`, default "dash" Line style for the right y-axis curve. opacity : `float`, default 0.9 The opacity of the colors. This has to be a number between 0 and 1. axis_font_size : `int`, default 18 The size of the axis fonts. title_font_size : `int`, default 20 The size of the title fonts. x_range : `list` or None, default None Range of the x-axis. y_left_range : `list` or None, default None Range of the left y-axis. y_right_range : `list` or None, default None Range of the right y-axis. x_tick_format : `str` or None, default None Format of the ticks on the x-axis. y_left_tick_format : `str` or None, default None Format of the ticks on the left y-axis. y_right_tick_format : `str` or None, default None Format of the ticks on the right y-axis. x_hover_format : `str` or None, default None Format of the values when hovering for the x-axis. y_left_hover_format : `str` or None, default None Format of the values when hovering for the left y-axis. y_right_hover_format : `str` or None, default None Format of the values when hovering for the right y-axis. group_color_dict : `dict` [`str`, `str`] or None, default None. Dictionary with a mapping from levels within the ``grouping_col`` and a specified color. The keys are the levels in ``grouping_col`` and the values are a specified color. If ``group_color_dict`` is `None`, the colors are generated using the function `greykite.common.viz.colors_utils.get_distinct_colors`. Returns ------- fig : `plotly.graph_objects.Figure` Dual y-axes plot. |
167,472 | import warnings
import numpy as np
import pandas as pd
The provided code snippet includes necessary dependencies for implementing the `gen_moving_timeseries_forecast` function. Write a Python function `def gen_moving_timeseries_forecast( df, time_col, value_col, train_forecast_func, train_move_ahead, forecast_horizon, min_training_end_point=None, min_training_end_timestamp=None, max_forecast_end_point=None, max_forecast_end_timestamp=None, regressor_cols=None, keep_cols=None, # extra cols in df which we want to keep from the raw data forecast_keep_cols=None, # extra cols we want to keep from the forecast result **model_params)` to solve the following problem:
Applies a forecast function (`train_forecast_func`) to many derived timeseries from `df` which are moving windows of `df`. For each derived series a model is trained and forecast is generated. It returns a `compare_df` to compare actuals and forecasts. Parameters ---------- df : `pandas.DataFrame` A data frame which includes the timestamp column as well as the value column. The time column is assumed to be in increasing order ( timestamps increase). time_col : `str` The column name in ``df`` representing time for the time series data. The time column can be anything that can be parsed by pandas DatetimeIndex. value_col: `str` The column name which has the value of interest to be forecasted. train_forecast_func : `func` A function with this signature:: train_forecast_func( df, time_col, value_col, forecast_horizon, new_external_regressor_df=None) This function is required to return a dictionary which has at minimum this item "fut_df": pd.DataFrame which includes the forecasts in the column ``value_col`` train_move_ahead : `int` The number of steps moving forward for each window This can be set to 1 often to get the maximum number of validations However other numbers can be used e.g. if computation is an issue forecast_horizon : `int` The number of forecasts needed min_training_end_point : `int` or None, default None The minimum number of training time points min_training_end_timestamp : `str` or None, default None The minimum timestamp to be used. If this is not None, ``min_training_end_point`` will be overwritten. max_forecast_end_point : `int` or None, default None The end point to be forecasted. The input ``df`` will be limited to this point. max_forecast_end_timestamp : `str` or None, default None The last timestamp allowed to be forecasted. If this is not None, ``max_forecast_end_point`` will be overwritten. regressor_cols : `list` [`str`] or None, default None If regressors are to be used, they are listed here. keep_cols : `list` [`str`] or None, default None Extra columns in ``df`` which we want to keep forecast_keep_cols : `list` [`str`] or None, default None Extra columns in the forecat result (dataframe) which we want to keep Return : `dict` ---------- A dictionary with following items: - "compare_df": `pd.DataFrame` A dataframe which includes (a) actual true values (observed) given in "y_true" column; (b) forecasted values given in "y_hat"; (c) horizon given in a column "horizon" which determines the number of points into the future for that forecast; (d) training end point given in ``training_end_point`` column - "max_possible_validation_num" : `int` Maximum possible number of validations - "validation_num" : `int` Number of validations used
Here is the function:
def gen_moving_timeseries_forecast(
df,
time_col,
value_col,
train_forecast_func,
train_move_ahead,
forecast_horizon,
min_training_end_point=None,
min_training_end_timestamp=None,
max_forecast_end_point=None,
max_forecast_end_timestamp=None,
regressor_cols=None,
keep_cols=None, # extra cols in df which we want to keep from the raw data
forecast_keep_cols=None, # extra cols we want to keep from the forecast result
**model_params):
"""Applies a forecast function (`train_forecast_func`) to many derived
timeseries from `df` which are moving windows of `df`. For each derived
series a model is trained and forecast is generated.
It returns a `compare_df` to compare actuals and forecasts.
Parameters
----------
df : `pandas.DataFrame`
A data frame which includes the timestamp column
as well as the value column.
The time column is assumed to be in increasing order (
timestamps increase).
time_col : `str`
The column name in ``df`` representing time for the time series data.
The time column can be anything that can be parsed by pandas DatetimeIndex.
value_col: `str`
The column name which has the value of interest to be forecasted.
train_forecast_func : `func`
A function with this signature::
train_forecast_func(
df,
time_col,
value_col,
forecast_horizon,
new_external_regressor_df=None)
This function is required to return a dictionary which has at minimum this item
"fut_df": pd.DataFrame which includes the forecasts in the column
``value_col``
train_move_ahead : `int`
The number of steps moving forward for each window
This can be set to 1 often to get the maximum number of validations
However other numbers can be used e.g. if computation is an issue
forecast_horizon : `int`
The number of forecasts needed
min_training_end_point : `int` or None, default None
The minimum number of training time points
min_training_end_timestamp : `str` or None, default None
The minimum timestamp to be used.
If this is not None, ``min_training_end_point`` will be overwritten.
max_forecast_end_point : `int` or None, default None
The end point to be forecasted. The input ``df`` will be limited
to this point.
max_forecast_end_timestamp : `str` or None, default None
The last timestamp allowed to be forecasted.
If this is not None, ``max_forecast_end_point`` will be overwritten.
regressor_cols : `list` [`str`] or None, default None
If regressors are to be used, they are listed here.
keep_cols : `list` [`str`] or None, default None
Extra columns in ``df`` which we want to keep
forecast_keep_cols : `list` [`str`] or None, default None
Extra columns in the forecat result (dataframe) which we want to keep
Return : `dict`
----------
A dictionary with following items:
- "compare_df": `pd.DataFrame`
A dataframe which includes
(a) actual true values (observed) given in "y_true" column;
(b) forecasted values given in "y_hat";
(c) horizon given in a column "horizon" which determines the number of
points into the future for that forecast;
(d) training end point given in ``training_end_point`` column
- "max_possible_validation_num" : `int`
Maximum possible number of validations
- "validation_num" : `int`
Number of validations used
"""
if max_forecast_end_timestamp is not None:
max_forecast_end_point = max(
np.where(df[time_col] <= max_forecast_end_timestamp)[0])
if min_training_end_timestamp is not None:
min_training_end_point = min(
np.where(df[time_col] >= min_training_end_timestamp)[0])
if max_forecast_end_point is not None:
df = df[:max_forecast_end_point]
compare_df = None
n = df.shape[0]
if (n - forecast_horizon) <= min_training_end_point:
raise ValueError("No reasonble train test period is found for validation")
# Maximum possible validation number for this set
max_possible_validation_num = n - forecast_horizon - min_training_end_point
# Actual validation number
validation_num = max_possible_validation_num / train_move_ahead
training_end_times = np.arange(
min_training_end_point,
n - forecast_horizon,
train_move_ahead)
def get_compare_df_row(m):
"""Calculates comparison df with actuals and forecasted for the given
horizon, using the training data up to time ``m``
----------
Parameters
m : `int`
Last row of data to be used for training
-------
Returns
compare_df0 : `pandas.DataFrame`
A pandas dataframe with ``forecast_horizon`` rows containing
observed values and forecasted values.
It includes
(a) actual true values (observed) given in "y_true" column;
(b) forecasted values given in "y_hat";
(c) horizon given in a column "horizon" which determines the number of
points into the future for that forecast;
(d) training end point given in ``training_end_point`` column
"""
train_df = df[:m]
test_df = df.loc[range(m, m + forecast_horizon), :].reset_index(drop=True)
if regressor_cols is not None:
new_external_regressor_df = test_df[regressor_cols]
obtained_forecast = train_forecast_func(
df=train_df,
value_col=value_col,
time_col=time_col,
forecast_horizon=forecast_horizon,
new_external_regressor_df=new_external_regressor_df,
**model_params)
else:
obtained_forecast = train_forecast_func(
df=train_df,
value_col=value_col,
time_col=time_col,
forecast_horizon=forecast_horizon,
**model_params)
fut_df = obtained_forecast["fut_df"]
fut_df = fut_df.reset_index(drop=True)
y_hat = fut_df[value_col].values
y_true = test_df[value_col]
timestamps = test_df[time_col]
compare_df0 = pd.DataFrame({
time_col: timestamps,
"y_hat": y_hat,
"y_true": y_true})
if keep_cols is not None:
compare_df0 = pd.concat(
[compare_df0, test_df[keep_cols]],
axis=1)
if forecast_keep_cols is not None:
compare_df0 = pd.concat(
[compare_df0, fut_df[forecast_keep_cols]],
axis=1)
compare_df0["horizon"] = range(1, forecast_horizon + 1)
compare_df0["training_end_point"] = m
return compare_df0
# Runs the function for all m and stores the results dataframes for each m
compare_df_list = [get_compare_df_row(m) for m in training_end_times]
# Concats all the dataframes
compare_df = pd.concat(compare_df_list, axis=0)
na_df = compare_df[compare_df.isnull().any(axis=1)]
if na_df.shape[0] > 0:
warnings.warn("NA was generated in compare_df.")
return {
"compare_df": compare_df,
"max_possible_validation_num": max_possible_validation_num,
"validation_num": validation_num} | Applies a forecast function (`train_forecast_func`) to many derived timeseries from `df` which are moving windows of `df`. For each derived series a model is trained and forecast is generated. It returns a `compare_df` to compare actuals and forecasts. Parameters ---------- df : `pandas.DataFrame` A data frame which includes the timestamp column as well as the value column. The time column is assumed to be in increasing order ( timestamps increase). time_col : `str` The column name in ``df`` representing time for the time series data. The time column can be anything that can be parsed by pandas DatetimeIndex. value_col: `str` The column name which has the value of interest to be forecasted. train_forecast_func : `func` A function with this signature:: train_forecast_func( df, time_col, value_col, forecast_horizon, new_external_regressor_df=None) This function is required to return a dictionary which has at minimum this item "fut_df": pd.DataFrame which includes the forecasts in the column ``value_col`` train_move_ahead : `int` The number of steps moving forward for each window This can be set to 1 often to get the maximum number of validations However other numbers can be used e.g. if computation is an issue forecast_horizon : `int` The number of forecasts needed min_training_end_point : `int` or None, default None The minimum number of training time points min_training_end_timestamp : `str` or None, default None The minimum timestamp to be used. If this is not None, ``min_training_end_point`` will be overwritten. max_forecast_end_point : `int` or None, default None The end point to be forecasted. The input ``df`` will be limited to this point. max_forecast_end_timestamp : `str` or None, default None The last timestamp allowed to be forecasted. If this is not None, ``max_forecast_end_point`` will be overwritten. regressor_cols : `list` [`str`] or None, default None If regressors are to be used, they are listed here. keep_cols : `list` [`str`] or None, default None Extra columns in ``df`` which we want to keep forecast_keep_cols : `list` [`str`] or None, default None Extra columns in the forecat result (dataframe) which we want to keep Return : `dict` ---------- A dictionary with following items: - "compare_df": `pd.DataFrame` A dataframe which includes (a) actual true values (observed) given in "y_true" column; (b) forecasted values given in "y_hat"; (c) horizon given in a column "horizon" which determines the number of points into the future for that forecast; (d) training end point given in ``training_end_point`` column - "max_possible_validation_num" : `int` Maximum possible number of validations - "validation_num" : `int` Number of validations used |
167,473 | import copy
import dataclasses
import functools
import math
import re
import warnings
from dataclasses import field
from typing import List
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from pandas.testing import assert_index_equal
from pandas.testing import assert_series_equal
The provided code snippet includes necessary dependencies for implementing the `unique_dict_in_list` function. Write a Python function `def unique_dict_in_list(array)` to solve the following problem:
Returns the unique dictionaries in the input list, preserving the original order. Replaces ``unique_elements_in_list`` because `dict` is not hashable. Parameters ---------- array: `List` [`dict`] List of dictionaries. Returns ------- unique_array : `List` [`dict`] Unique dictionaries in `array`, preserving the order of first appearance.
Here is the function:
def unique_dict_in_list(array):
"""Returns the unique dictionaries in the input list,
preserving the original order. Replaces ``unique_elements_in_list``
because `dict` is not hashable.
Parameters
----------
array: `List` [`dict`]
List of dictionaries.
Returns
-------
unique_array : `List` [`dict`]
Unique dictionaries in `array`, preserving the order of first appearance.
"""
if not array:
return array
result = []
# Brute force.
# Avoids comparing json dumped ordered dictionary.
# The reason is that when dictionary contains list/dict unhashable items, set does not work.
for item in array:
if not any([item == element for element in result]):
result.append(item)
return result | Returns the unique dictionaries in the input list, preserving the original order. Replaces ``unique_elements_in_list`` because `dict` is not hashable. Parameters ---------- array: `List` [`dict`] List of dictionaries. Returns ------- unique_array : `List` [`dict`] Unique dictionaries in `array`, preserving the order of first appearance. |
167,474 | import copy
import dataclasses
import functools
import math
import re
import warnings
from dataclasses import field
from typing import List
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from pandas.testing import assert_index_equal
from pandas.testing import assert_series_equal
def dictionary_values_to_lists(hyperparameter_dict, hyperparameters_list_type=None):
"""Given a dictionary, returns a copy whose values are
either lists, distributions with a ``rvs`` method, or None.
The output is suitable for hyperparameter grid search.
Does this by converting values that do not conform into
singleton elements inside a list.
Parameters
----------
hyperparameter_dict : `dict` [`str`, `any`]
Dictionary of hyperparameters.
hyperparameters_list_type : `set` [`str`] or `dict` [`str`, `list`] or None, optional, default None
Hyperparameters that must be a `list` or other recognized value.
e.g. ``regressor_cols`` is `list` or None, ``holiday_lookup_countries`` is `list` or "auto"
or None.
Thus, a flat list must become nested. E.g. ["US", "UK"] must be converted to [["US", "UK"]].
Specifically, the values in ``hyperparameter_dict`` must be of type
`list` [`list` or other accepted value].
* If a set, other accepted value = [None]
* If a dict, other accepted value is specified by the key's value, a
list of valid options.
For example, to allow `list` or "auto" or None, use
``hyperparameters_list_type={"regressor_cols": [None, "auto"]}``.
For example, ``hyperparameters_list_type={"regressor_cols": [None]}`` is equivalent to
``hyperparameters_list_type={"regressor_cols"}`` using the set specification.
Notes
-----
These values are unchanged:
* [None]
* ["value1", "value2"]
* [[1, 2], [3, 4]]
* [[1], None, [3]]
* scipy.stats.expon(scale=.1)
These values are put in a list:
* None
* 1
* np.array([1, 2, 3])
* {"k": "v"}
These values are put in a list if their key is
in ``hyperparameters_list_type`` and the other
acceptable value is [None], otherwise unchanged:
* []
* [1, 2, 3]
* (1, 2, 3)
* ["value1", "value2"]
Raises
------
ValueError
If a key is in ``hyperparameters_list_type`` but
its value is not a list, tuple, or None.
Returns
-------
hyperparameter_grid : `dict` [`str`, `list` [`any`] or distribution with ``rvs`` method]
A dictionary suitable to pass as ``param_distributions`` to
`sklearn.model_selection.RandomizedSearchCV` for grid search.
As explained in `sklearn.model_selection.RandomizedSearchCV`:
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
"""
hyperparameter_grid = hyperparameter_dict.copy()
if hyperparameters_list_type is None:
hyperparameters_list_type = {}
for param, value in hyperparameter_dict.items():
# Distribution is allowed
is_distribution = hasattr(value, "rvs")
is_list = isinstance(value, (list, tuple))
if param in hyperparameters_list_type:
if isinstance(hyperparameters_list_type, dict):
recognized_values = hyperparameters_list_type.get(param, [None])
else:
recognized_values = [None]
if is_list:
# A list is provided
if (len(value) == 0 or
not all([isinstance(list_item, (list, tuple))
or list_item in recognized_values
for list_item in value])):
# Not all its values are acceptable for the hyperparameter,
# therefore enclose the value in a list.
hyperparameter_grid[param] = [value]
elif value in recognized_values:
# Not a list, but the value is acceptable
hyperparameter_grid[param] = [value]
else:
raise ValueError(
f"The value for {param} must be a list, tuple, or one of {recognized_values}, "
f"found {value}.")
else:
# Any list or distribution is allowed.
# Violating elements are enclosed the value in a list
if not is_list and not is_distribution:
hyperparameter_grid[param] = [value]
return hyperparameter_grid
The provided code snippet includes necessary dependencies for implementing the `dictionaries_values_to_lists` function. Write a Python function `def dictionaries_values_to_lists(hyperparameter_dicts, hyperparameters_list_type=None)` to solve the following problem:
Calls `~greykite.common.utils.python_utils.dictionary_values_to_lists` on the provided dictionary or on each item in a list of dictionaries. ``dictionary_values_to_lists`` returns a copy whose values are either lists, distributions with a ``rvs`` method, or None. Parameters ---------- hyperparameter_dicts : `dict` [`str`, `any`] or `list` [`dict` [`str`, `any`]] Dictionary of hyperparameters, or list of such dictionaries hyperparameters_list_type : `set` [`str`] or `dict` [`str`, `list`] or None, optional, default None Hyperparameters that must be a `list` or other recognized value. e.g. ``regressor_cols`` is `list` or None, ``holiday_lookup_countries`` is `list` or "auto" or None. Thus, a flat list must become nested. E.g. ["US", "UK"] must be converted to [["US", "UK"]]. Specifically, the values in ``hyperparameter_dict`` must be of type `list` [`list` or other accepted value]. * If a set, other accepted value = [None] * If a dict, other accepted value is specified by the key's value, a list of valid options. For example, to allow `list` or "auto" or None, use ``hyperparameters_list_type={"regressor_cols": [None, "auto"]}``. For example, ``hyperparameters_list_type={"regressor_cols": [None]}`` is equivalent to ``hyperparameters_list_type={"regressor_cols"}`` using the set specification. Returns ------- hyperparameter_grid : `dict` [`str`, `list` [`any`], or distribution with ``rvs`` method] or `list` [`dict] A dictionary or list of dictionaries suitable to pass as ``param_distributions`` to `sklearn.model_selection.RandomizedSearchCV` for grid search. As explained in `sklearn.model_selection.RandomizedSearchCV`: Dictionary with parameters names (string) as keys and distributions or lists of parameters to try. Distributions must provide a ``rvs`` method for sampling (such as those from scipy.stats.distributions). If a list is given, it is sampled uniformly. See Also -------- `~greykite.common.utils.python_utils.dictionary_values_to_lists`
Here is the function:
def dictionaries_values_to_lists(hyperparameter_dicts, hyperparameters_list_type=None):
"""Calls `~greykite.common.utils.python_utils.dictionary_values_to_lists`
on the provided dictionary or on each item in a list of dictionaries.
``dictionary_values_to_lists`` returns a copy whose values are
either lists, distributions with a ``rvs`` method, or None.
Parameters
----------
hyperparameter_dicts : `dict` [`str`, `any`] or `list` [`dict` [`str`, `any`]]
Dictionary of hyperparameters, or list of such dictionaries
hyperparameters_list_type : `set` [`str`] or `dict` [`str`, `list`] or None, optional, default None
Hyperparameters that must be a `list` or other recognized value.
e.g. ``regressor_cols`` is `list` or None, ``holiday_lookup_countries`` is `list` or "auto"
or None.
Thus, a flat list must become nested. E.g. ["US", "UK"] must be converted to [["US", "UK"]].
Specifically, the values in ``hyperparameter_dict`` must be of type
`list` [`list` or other accepted value].
* If a set, other accepted value = [None]
* If a dict, other accepted value is specified by the key's value, a
list of valid options.
For example, to allow `list` or "auto" or None, use
``hyperparameters_list_type={"regressor_cols": [None, "auto"]}``.
For example, ``hyperparameters_list_type={"regressor_cols": [None]}`` is equivalent to
``hyperparameters_list_type={"regressor_cols"}`` using the set specification.
Returns
-------
hyperparameter_grid : `dict` [`str`, `list` [`any`], or distribution with ``rvs`` method] or `list` [`dict]
A dictionary or list of dictionaries suitable to pass as
``param_distributions`` to `sklearn.model_selection.RandomizedSearchCV`
for grid search.
As explained in `sklearn.model_selection.RandomizedSearchCV`:
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
See Also
--------
`~greykite.common.utils.python_utils.dictionary_values_to_lists`
"""
if isinstance(hyperparameter_dicts, (list, tuple)):
hyperparameter_grids = [
dictionary_values_to_lists(
item,
hyperparameters_list_type=hyperparameters_list_type)
for item in hyperparameter_dicts]
else:
hyperparameter_grids = dictionary_values_to_lists(
hyperparameter_dicts,
hyperparameters_list_type=hyperparameters_list_type)
return hyperparameter_grids | Calls `~greykite.common.utils.python_utils.dictionary_values_to_lists` on the provided dictionary or on each item in a list of dictionaries. ``dictionary_values_to_lists`` returns a copy whose values are either lists, distributions with a ``rvs`` method, or None. Parameters ---------- hyperparameter_dicts : `dict` [`str`, `any`] or `list` [`dict` [`str`, `any`]] Dictionary of hyperparameters, or list of such dictionaries hyperparameters_list_type : `set` [`str`] or `dict` [`str`, `list`] or None, optional, default None Hyperparameters that must be a `list` or other recognized value. e.g. ``regressor_cols`` is `list` or None, ``holiday_lookup_countries`` is `list` or "auto" or None. Thus, a flat list must become nested. E.g. ["US", "UK"] must be converted to [["US", "UK"]]. Specifically, the values in ``hyperparameter_dict`` must be of type `list` [`list` or other accepted value]. * If a set, other accepted value = [None] * If a dict, other accepted value is specified by the key's value, a list of valid options. For example, to allow `list` or "auto" or None, use ``hyperparameters_list_type={"regressor_cols": [None, "auto"]}``. For example, ``hyperparameters_list_type={"regressor_cols": [None]}`` is equivalent to ``hyperparameters_list_type={"regressor_cols"}`` using the set specification. Returns ------- hyperparameter_grid : `dict` [`str`, `list` [`any`], or distribution with ``rvs`` method] or `list` [`dict] A dictionary or list of dictionaries suitable to pass as ``param_distributions`` to `sklearn.model_selection.RandomizedSearchCV` for grid search. As explained in `sklearn.model_selection.RandomizedSearchCV`: Dictionary with parameters names (string) as keys and distributions or lists of parameters to try. Distributions must provide a ``rvs`` method for sampling (such as those from scipy.stats.distributions). If a list is given, it is sampled uniformly. See Also -------- `~greykite.common.utils.python_utils.dictionary_values_to_lists` |
167,475 | import copy
import dataclasses
import functools
import math
import re
import warnings
from dataclasses import field
from typing import List
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from pandas.testing import assert_index_equal
from pandas.testing import assert_series_equal
The provided code snippet includes necessary dependencies for implementing the `flatten_list` function. Write a Python function `def flatten_list(array)` to solve the following problem:
Flattens an array by removing 1 level of nesting. Parameters ---------- array : `list` [`list`] List of lists. Returns ------- flat_arr : `list` Removes one level of nesting from the array. [[4], [3, 2], [1, [0]]] becomes [4, 3, 2, 1, [0]].
Here is the function:
def flatten_list(array):
"""Flattens an array by removing 1 level of nesting.
Parameters
----------
array : `list` [`list`]
List of lists.
Returns
-------
flat_arr : `list`
Removes one level of nesting from the array.
[[4], [3, 2], [1, [0]]] becomes [4, 3, 2, 1, [0]].
"""
return [item for sublist in array for item in sublist] | Flattens an array by removing 1 level of nesting. Parameters ---------- array : `list` [`list`] List of lists. Returns ------- flat_arr : `list` Removes one level of nesting from the array. [[4], [3, 2], [1, [0]]] becomes [4, 3, 2, 1, [0]]. |
167,476 | import copy
import dataclasses
import functools
import math
import re
import warnings
from dataclasses import field
from typing import List
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from pandas.testing import assert_index_equal
from pandas.testing import assert_series_equal
The provided code snippet includes necessary dependencies for implementing the `reorder_columns` function. Write a Python function `def reorder_columns(df, order_dict=None)` to solve the following problem:
Orders columns according to ``order_dict``. Can be used to order columns according to hierarchical constraints. Consider the tree where a parent is the sum of its children. Let a node's label be its BFS traversal order, with the root as 0. Use ``order_dict`` to map column names to these node labels, to get the dataframe in BFS traversal order, matching the structure of the tree. Parameters ---------- df : `pandas.DataFrame` Input data frame. order_dict : `dict` [`str`, `float`] or None How to order the columns. The key is the column name, the value is its position. Columns are returned in ascending order by value from left to right. Only column specified by ``order_dict`` are included in the output. If None, returns the original ``df``. Returns ------- reordered_df : `pandas.DataFrame` ``df`` with the selected columns reordered.
Here is the function:
def reorder_columns(df, order_dict=None):
"""Orders columns according to ``order_dict``.
Can be used to order columns according to hierarchical
constraints. Consider the tree where a parent is the sum
of its children. Let a node's label be its BFS traversal order,
with the root as 0. Use ``order_dict`` to map column names
to these node labels, to get the dataframe in BFS traversal order,
matching the structure of the tree.
Parameters
----------
df : `pandas.DataFrame`
Input data frame.
order_dict : `dict` [`str`, `float`] or None
How to order the columns.
The key is the column name, the value is its position.
Columns are returned in ascending order by value from left to right.
Only column specified by ``order_dict`` are included in the output.
If None, returns the original ``df``.
Returns
-------
reordered_df : `pandas.DataFrame`
``df`` with the selected columns reordered.
"""
if order_dict is not None:
order_tuples = list(order_dict.items())
order_tuples = sorted(order_tuples, key=lambda x: x[1])
order_names = [x[0] for x in order_tuples]
df = df[order_names]
return df | Orders columns according to ``order_dict``. Can be used to order columns according to hierarchical constraints. Consider the tree where a parent is the sum of its children. Let a node's label be its BFS traversal order, with the root as 0. Use ``order_dict`` to map column names to these node labels, to get the dataframe in BFS traversal order, matching the structure of the tree. Parameters ---------- df : `pandas.DataFrame` Input data frame. order_dict : `dict` [`str`, `float`] or None How to order the columns. The key is the column name, the value is its position. Columns are returned in ascending order by value from left to right. Only column specified by ``order_dict`` are included in the output. If None, returns the original ``df``. Returns ------- reordered_df : `pandas.DataFrame` ``df`` with the selected columns reordered. |
167,477 | import copy
import dataclasses
import functools
import math
import re
import warnings
from dataclasses import field
from typing import List
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from pandas.testing import assert_index_equal
from pandas.testing import assert_series_equal
The provided code snippet includes necessary dependencies for implementing the `apply_func_to_columns` function. Write a Python function `def apply_func_to_columns(row_func, cols)` to solve the following problem:
Returns a function that applies ``row_func`` to the selected ``cols``. Helper function for `~greykite.framework.output.univariate_forecast.UnivariateForecast.autocomplete_map_func_dict`. Parameters ---------- row_func : callable A function. cols : `list` [`str` or `int`] Names of the columns (or dictionary keys, list indices) to pass to ``row_func``. Returns ------- new_func : callable Takes ``row`` and returns the result of ``row_func`` applied to the selected values ``row[col]``.
Here is the function:
def apply_func_to_columns(row_func, cols):
"""Returns a function that applies ``row_func`` to
the selected ``cols``. Helper function for
`~greykite.framework.output.univariate_forecast.UnivariateForecast.autocomplete_map_func_dict`.
Parameters
----------
row_func : callable
A function.
cols : `list` [`str` or `int`]
Names of the columns (or dictionary keys, list indices)
to pass to ``row_func``.
Returns
-------
new_func : callable
Takes ``row`` and returns the result of ``row_func``
applied to the selected values ``row[col]``.
"""
def new_func(row):
return row_func(*[row[col] for col in cols])
return new_func | Returns a function that applies ``row_func`` to the selected ``cols``. Helper function for `~greykite.framework.output.univariate_forecast.UnivariateForecast.autocomplete_map_func_dict`. Parameters ---------- row_func : callable A function. cols : `list` [`str` or `int`] Names of the columns (or dictionary keys, list indices) to pass to ``row_func``. Returns ------- new_func : callable Takes ``row`` and returns the result of ``row_func`` applied to the selected values ``row[col]``. |
167,478 | import copy
import dataclasses
import functools
import math
import re
import warnings
from dataclasses import field
from typing import List
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from pandas.testing import assert_index_equal
from pandas.testing import assert_series_equal
The provided code snippet includes necessary dependencies for implementing the `mutable_field` function. Write a Python function `def mutable_field(mutable_default_value) -> dataclasses.field` to solve the following problem:
Can be used to set the default value in a dataclass to a mutable value. Provides a factory function that returns a copy of the provided argument. Parameters ---------- mutable_default_value : Any The default value to use for the field. Returns ------- field : `dataclasses.field` Set the default value to this value. Examples -------- >>> from dataclasses import dataclass >>> from typing import List >>> from greykite.common.python_utils import mutable_field >>> @dataclass >>> class D: >>> x: List = mutable_field([1, 2, 3]) >>> >>> assert D().x is not D().x >>> assert D().x == [1, 2, 3]
Here is the function:
def mutable_field(mutable_default_value) -> dataclasses.field:
"""Can be used to set the default value in a dataclass
to a mutable value.
Provides a factory function that returns a copy of the provided argument.
Parameters
----------
mutable_default_value : Any
The default value to use for the field.
Returns
-------
field : `dataclasses.field`
Set the default value to this value.
Examples
--------
>>> from dataclasses import dataclass
>>> from typing import List
>>> from greykite.common.python_utils import mutable_field
>>> @dataclass
>>> class D:
>>> x: List = mutable_field([1, 2, 3])
>>>
>>> assert D().x is not D().x
>>> assert D().x == [1, 2, 3]
"""
return field(default_factory=lambda: copy.deepcopy(mutable_default_value)) | Can be used to set the default value in a dataclass to a mutable value. Provides a factory function that returns a copy of the provided argument. Parameters ---------- mutable_default_value : Any The default value to use for the field. Returns ------- field : `dataclasses.field` Set the default value to this value. Examples -------- >>> from dataclasses import dataclass >>> from typing import List >>> from greykite.common.python_utils import mutable_field >>> @dataclass >>> class D: >>> x: List = mutable_field([1, 2, 3]) >>> >>> assert D().x is not D().x >>> assert D().x == [1, 2, 3] |
167,479 | import copy
import dataclasses
import functools
import math
import re
import warnings
from dataclasses import field
from typing import List
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from pandas.testing import assert_index_equal
from pandas.testing import assert_series_equal
The provided code snippet includes necessary dependencies for implementing the `ignore_warnings` function. Write a Python function `def ignore_warnings(category)` to solve the following problem:
Returns a decorator to ignore all warnings in the specified category. Parameters ---------- category : class Any warning that is a subclass of this category is ignored. Returns ------- decorator_ignore : function A decorator that ignores all warnings in the category.
Here is the function:
def ignore_warnings(category):
"""Returns a decorator to ignore all warnings
in the specified category.
Parameters
----------
category : class
Any warning that is a subclass of this category is ignored.
Returns
-------
decorator_ignore : function
A decorator that ignores all warnings in the category.
"""
def decorator_ignore(fn):
@functools.wraps(fn)
def fn_ignore(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category)
return fn(*args, **kwargs)
return fn_ignore
return decorator_ignore | Returns a decorator to ignore all warnings in the specified category. Parameters ---------- category : class Any warning that is a subclass of this category is ignored. Returns ------- decorator_ignore : function A decorator that ignores all warnings in the category. |
167,480 | import math
from datetime import timedelta
from greykite.common.constants import TIME_COL
from greykite.common.constants import VALUE_COL
from greykite.common.enums import SimpleTimeFrequencyEnum
from greykite.common.enums import TimeEnum
from greykite.common.features.timeseries_features import get_default_origin_for_time_vars
from greykite.common.time_properties import get_canonical_data
from greykite.common.time_properties import min_gap_in_seconds
def get_default_horizon_from_period(period, num_observations=None):
"""Returns default forecast horizon based on input data period and num_observations
:param period: float
Period of each observation (i.e. average time between observations, in seconds)
:param num_observations: Optional[int]
Number of observations for training
:return: int
default number of periods to forecast
"""
default_from_period = get_simple_time_frequency_from_period(period).value.default_horizon
if num_observations is not None:
default_from_observations = num_observations // 2 # twice as much training data as forecast horizon
return min(default_from_period, default_from_observations) # horizon based on limiting factor
else:
return default_from_period
def get_simple_time_frequency_from_period(period):
"""Returns SimpleTimeFrequencyEnum based on input data period
:param period: float
Period of each observation (i.e. average time between observations, in seconds)
:return: SimpleTimeFrequencyEnum
SimpleTimeFrequencyEnum is used to define default values for horizon, seasonality, etc.
(but original data frequency is not modified)
"""
freq_threshold = [
(SimpleTimeFrequencyEnum.MINUTE, 10.05), # <= 10 minutes is considered minute-level, buffer for abnormalities
(SimpleTimeFrequencyEnum.HOUR, 6.05), # <= 6 hours is considered hourly, buffer for abnormalities
(SimpleTimeFrequencyEnum.DAY, 2.05), # <= 2 days is considered daily, buffer for daylight savings
(SimpleTimeFrequencyEnum.WEEK, 2.05), # <= 2 weeks is considered weekly, buffer for daylight savings
(SimpleTimeFrequencyEnum.MONTH, 2.05), # <= 2 months is considered monthly, buffer for 31-day month
(SimpleTimeFrequencyEnum.YEAR, 1.01), # <= 1 years is considered yearly, buffer for leap year
]
for simple_freq, threshold in freq_threshold:
if period <= simple_freq.value.seconds_per_observation * threshold:
return simple_freq
return SimpleTimeFrequencyEnum.MULTIYEAR
TIME_COL = "ts"
VALUE_COL = "y"
class TimeEnum(Enum):
"""Time constants"""
ONE_WEEK_IN_DAYS = 7
ONE_MONTH_IN_DAYS = 30
ONE_QUARTER_IN_DAYS = 90
ONE_YEAR_IN_DAYS = 365
# Approximate number of seconds corresponding to each period.
# May vary for leap year, daylight savings, etc.
ONE_MINUTE_IN_SECONDS = 60
ONE_HOUR_IN_SECONDS = 3600
ONE_DAY_IN_SECONDS = 24 * 3600
ONE_WEEK_IN_SECONDS = 7 * 24 * 3600
ONE_MONTH_IN_SECONDS = 30 * 24 * 3600
ONE_QUARTER_IN_SECONDS = 90 * 24 * 3600
ONE_YEAR_IN_SECONDS = 365 * 24 * 3600
def get_default_origin_for_time_vars(df, time_col):
"""Sets default value for origin_for_time_vars
Parameters
----------
df : `pandas.DataFrame`
Training data. A data frame which includes the timestamp and value columns
time_col : `str`
The column name in `df` representing time for the time series data.
Returns
-------
dt_continuous_time : `float`
The time origin used to create continuous variables for time
"""
date = pd.to_datetime(df[time_col].iloc[0])
return convert_date_to_continuous_time(date)
def min_gap_in_seconds(df, time_col):
"""Returns the smallest gap between observations in df[time_col].
Assumes df[time_col] is sorted in ascending order without duplicates.
:param df: pd.DataFrame
input timeseries
:param time_col: str
time column name in `df`
:return: float
minimum gap between observations, in seconds
"""
if df.shape[0] < 2:
raise ValueError(f"Must provide at least two data points. Found {df.shape[0]}.")
timestamps = pd.to_datetime(df[time_col])
period = (timestamps - timestamps.shift()).min()
return period.days*24*3600 + period.seconds
def get_canonical_data(
df: pd.DataFrame,
time_col: str = TIME_COL,
value_col: str = VALUE_COL,
freq: str = None,
date_format: str = None,
tz: str = None,
train_end_date: Optional[Union[str, datetime.datetime]] = None,
regressor_cols: List[str] = None,
lagged_regressor_cols: List[str] = None,
anomaly_info: Optional[Union[Dict, List[Dict]]] = None):
"""Loads data to internal representation. Parses date column,
sets timezone aware index.
Checks for irregularities and raises an error if input is invalid.
Adjusts for anomalies according to ``anomaly_info``.
Parameters
----------
df : `pandas.DataFrame`
Input timeseries. A data frame which includes the timestamp column
as well as the value column.
time_col : `str`
The column name in ``df`` representing time for the time series data.
The time column can be anything that can be parsed by pandas DatetimeIndex.
value_col: `str`
The column name which has the value of interest to be forecasted.
freq : `str` or None, default None
Timeseries frequency, DateOffset alias, If None automatically inferred.
date_format : `str` or None, default None
strftime format to parse time column, eg ``%m/%d/%Y``.
Note that ``%f`` will parse all the way up to nanoseconds.
If None (recommended), inferred by `pandas.to_datetime`.
tz : `str` or pytz.timezone object or None, default None
Passed to `pandas.tz_localize` to localize the timestamp.
train_end_date : `str` or `datetime.datetime` or None, default None
Last date to use for fitting the model. Forecasts are generated after this date.
If None, it is set to the minimum of ``self.last_date_for_val`` and
``self.last_date_for_reg``.
regressor_cols: `list` [`str`] or None, default None
A list of regressor columns used in the training and prediction DataFrames.
If None, no regressor columns are used.
Regressor columns that are unavailable in ``df`` are dropped.git
lagged_regressor_cols: `list` [`str`] or None, default None
A list of additional columns needed for lagged regressors in thggede training and prediction DataFrames.
This list can have overlap with ``regressor_cols``.
If None, no additional columns are added to the DataFrame.
Lagged regressor columns that are unavailable in ``df`` are dropped.
anomaly_info : `dict` or `list` [`dict`] or None, default None
Anomaly adjustment info. Anomalies in ``df``
are corrected before any forecasting is done.
If None, no adjustments are made.
A dictionary containing the parameters to
`~greykite.common.features.adjust_anomalous_data.adjust_anomalous_data`.
See that function for details.
The possible keys are:
``"value_col"`` : `str`
The name of the column in ``df`` to adjust. You may adjust the value
to forecast as well as any numeric regressors.
``"anomaly_df"`` : `pandas.DataFrame`
Adjustments to correct the anomalies.
``"start_time_col"``: `str`, default START_TIME_COL
Start date column in ``anomaly_df``.
``"end_time_col"``: `str`, default END_TIME_COL
End date column in ``anomaly_df``.
``"adjustment_delta_col"``: `str` or None, default None
Impact column in ``anomaly_df``.
``"filter_by_dict"``: `dict` or None, default None
Used to filter ``anomaly_df`` to the relevant anomalies for
the ``value_col`` in this dictionary.
Key specifies the column name, value specifies the filter value.
``"filter_by_value_col""``: `str` or None, default None
Adds ``{filter_by_value_col: value_col}`` to ``filter_by_dict``
if not None, for the ``value_col`` in this dictionary.
``"adjustment_method"`` : `str` ("add" or "subtract"), default "add"
How to make the adjustment, if ``adjustment_delta_col`` is provided.
Accepts a list of such dictionaries to adjust multiple columns in ``df``.
Returns
-------
canonical_data_dict : `dict`
Dictionary containing the dataset in canonical form, and information such as
train end date. Keys:
``"df"`` : `pandas.DataFrame`
Data frame containing timestamp and value, with standardized column names for internal use
(TIME_COL, VALUE_COL). Rows are sorted by time index, and missing gaps between dates are filled
in so that dates are spaced at regular intervals. Values are adjusted for anomalies
according to ``anomaly_info``.
The index can be timezone aware (but TIME_COL is not).
``"df_before_adjustment"`` : `pandas.DataFrame` or None
``df`` before adjustment by ``anomaly_info``.
If ``anomaly_info`` is None, this is None.
``"fit_df"`` : `pandas.DataFrame`
A subset of the returned ``df``, with data up until ``train_end_date``.
``"freq"`` : `pandas.DataFrame`
timeseries frequency, inferred if not provided
``"time_stats"`` : `dict`
Information about the time column:
``"gaps"``: missing_dates
``"added_timepoints"``: added_timepoints
``"dropped_timepoints"``: dropped_timepoints
``"regressor_cols"`` : `list` [`str`]
A list of regressor columns.
``"lagged_regressor_cols"`` : `list` [`str`]
A list of lagged regressor columns.
``"fit_cols"`` : `list` [`str`]
Names of time column, value column, regressor columns, and lagged regressor columns.
``"train_end_date"`` : `datetime.datetime`
Last date or timestamp for training. It is always less than or equal to
minimum non-null values of ``last_date_for_val`` and ``last_date_for_reg``.
``"last_date_for_val"`` : `datetime.datetime`
Date or timestamp corresponding to last non-null value in ``df[value_col]``.
``"last_date_for_reg"`` : `datetime.datetime` or None
Date or timestamp corresponding to last non-null value in ``df[regressor_cols]``.
If ``regressor_cols`` is None, ``last_date_for_reg`` is None.
``"last_date_for_lag_reg"`` : `datetime.datetime` or None
Date or timestamp corresponding to last non-null value in ``df[lagged_regressor_cols]``.
If ``lagged_regressor_cols`` is None, ``last_date_for_lag_reg`` is None.
"""
if time_col not in df.columns:
raise ValueError(f"{time_col} column is not in input data")
if value_col not in df.columns:
raise ValueError(f"{value_col} column is not in input data")
if df.shape[0] <= 2:
raise ValueError(
f"Time series has < 3 observations. More data are needed for forecasting.")
# Standardizes the time column name.
# `value_col` is standardized after anomalies are adjusted.
df_standardized = df.rename({
time_col: TIME_COL,
}, axis=1)
df_standardized[TIME_COL] = pd.to_datetime(
df_standardized[TIME_COL],
format=date_format,
infer_datetime_format=True)
# Drops data points from duplicate time stamps
df_standardized.drop_duplicates(
subset=[TIME_COL],
keep='first',
inplace=True)
if df.shape[0] > df_standardized.shape[0]:
warnings.warn(
f"Duplicate timestamps have been removed.",
UserWarning)
df = df_standardized.sort_values(by=TIME_COL)
# Infers data frequency
inferred_freq = infer_freq(df, TIME_COL)
if freq is None:
freq = inferred_freq
elif inferred_freq is not None and freq != inferred_freq:
warnings.warn(
f"Provided frequency '{freq}' does not match inferred frequency '{inferred_freq}'."
f" Using '{freq}'.", UserWarning) # NB: with missing data, it's better to provide freq
# Handles gaps in time series
missing_dates = find_missing_dates(df[TIME_COL])
df, added_timepoints, dropped_timepoints = fill_missing_dates(
df,
time_col=TIME_COL,
freq=freq)
time_stats = {
"gaps": missing_dates,
"added_timepoints": added_timepoints,
"dropped_timepoints": dropped_timepoints
}
# Creates index with localized timestamp
df.index = df[TIME_COL]
df.index.name = None
if tz is not None:
df = df.tz_localize(tz)
# Replaces infinity values in `value_col` by `np.nan`
df[value_col].replace([np.inf, -np.inf], np.nan, inplace=True)
# Saves values before adjustment.
df_original_value_col = df.copy()
# Standardizes `value_col` name.
df.rename({
value_col: VALUE_COL
}, axis=1, inplace=True)
# Finds date of last available value.
# - `last_date_for_val` is the last timestamp with non-null values in `VALUE_COL`.
# - `last_date_for_reg` is the last timestamp with non-null values in `regressor_cols`.
# - `max_train_end_date` is inferred as the minimum of the above two.
# `max_train_end_date` will be used to determine `train_end_date` when the latter is not provided.
last_date_available = df[TIME_COL].max()
last_date_for_val = df[df[VALUE_COL].notnull()][TIME_COL].max()
last_date_for_reg = None
if regressor_cols:
available_regressor_cols = [col for col in df.columns if col not in [TIME_COL, VALUE_COL]]
cols_not_selected = set(regressor_cols) - set(available_regressor_cols)
regressor_cols = [col for col in regressor_cols if col in available_regressor_cols]
if cols_not_selected:
warnings.warn(f"The following columns are not available to use as "
f"regressors: {sorted(cols_not_selected)}")
last_date_for_reg = df[df[regressor_cols].notnull().any(axis=1)][TIME_COL].max()
max_train_end_date = min(last_date_for_val, last_date_for_reg)
else:
regressor_cols = []
max_train_end_date = last_date_for_val
# Chooses appropriate `train_end_date`.
# Case 1: if not provided, the last timestamp with a non-null value (`max_train_end_date`) is used.
# Case 2: if it is out of the range of the data, raises an error since it should not be allowed.
# Case 3: otherwise, we respect the user's input `train_end_date`. NAs are kept and can be imputed in the pipeline.
train_end_date = pd.to_datetime(train_end_date)
if train_end_date is None:
train_end_date = max_train_end_date
warnings.warn(
f"`train_end_date` is not provided, or {value_col} column of the provided time series contains "
f"null values at the end, or the input `train_end_date` is beyond the last timestamp available. "
f"Setting `train_end_date` to the last timestamp with a non-null value ({train_end_date}).",
UserWarning)
elif train_end_date > last_date_available:
# TODO: replace the warning with a `ValueError` and bump the version since it changes the behavior.
train_end_date = max_train_end_date
warnings.warn(
f"{value_col} column of the provided time series contains "
f"null values at the end, or the input `train_end_date` is beyond the last timestamp available. "
f"Setting `train_end_date` to the last timestamp with a non-null value ({train_end_date}).",
UserWarning)
elif train_end_date > max_train_end_date:
# Does not modify the user-input `train_end_date`, but raises a warning.
warnings.warn(
f"{value_col} column of the provided time series contains trailing NAs. "
f"These NA values will be imputed in the pipeline.",
UserWarning)
df_before_adjustment = None
if anomaly_info is not None:
# Saves values before adjustment.
df_before_adjustment = df_original_value_col.copy()
# Adjusts columns in df (e.g. `value_col`, `regressor_cols`)
# using the anomaly info. One dictionary of parameters
# for `adjust_anomalous_data` is provided for each column to adjust.
if not isinstance(anomaly_info, (list, tuple)):
anomaly_info = [anomaly_info]
for single_anomaly_info in anomaly_info:
adjusted_df_dict = adjust_anomalous_data(
df=df_original_value_col,
time_col=TIME_COL,
**single_anomaly_info)
# `self.df` with values for single_anomaly_info["value_col"] adjusted.
df_original_value_col = adjusted_df_dict["adjusted_df"]
# Standardizes `value_col` name.
df_before_adjustment.rename({
value_col: VALUE_COL
}, axis=1, inplace=True)
# Standardizes `value_col` name.
df = df_original_value_col.rename({
value_col: VALUE_COL
}, axis=1, inplace=False)
# Processes lagged regressors.
last_date_for_lag_reg = None
if lagged_regressor_cols:
available_regressor_cols = [col for col in df.columns if col not in [TIME_COL, VALUE_COL]]
cols_not_selected = set(lagged_regressor_cols) - set(available_regressor_cols)
lagged_regressor_cols = [col for col in lagged_regressor_cols if col in available_regressor_cols]
if cols_not_selected:
warnings.warn(f"The following columns are not available to use as "
f"lagged regressors: {sorted(cols_not_selected)}")
last_date_for_lag_reg = df[df[lagged_regressor_cols].notnull().any(axis=1)][TIME_COL].max()
else:
lagged_regressor_cols = []
extra_reg_cols = [col for col in df.columns if col not in regressor_cols and col in lagged_regressor_cols]
fit_cols = [TIME_COL, VALUE_COL] + regressor_cols + extra_reg_cols
fit_df = df[df[TIME_COL] <= train_end_date][fit_cols]
return {
"df": df,
"df_before_adjustment": df_before_adjustment,
"fit_df": fit_df,
"freq": freq,
"time_stats": time_stats,
"regressor_cols": regressor_cols,
"lagged_regressor_cols": lagged_regressor_cols,
"fit_cols": fit_cols,
"train_end_date": train_end_date,
"last_date_for_val": last_date_for_val,
"last_date_for_reg": last_date_for_reg,
"last_date_for_lag_reg": last_date_for_lag_reg,
}
The provided code snippet includes necessary dependencies for implementing the `get_forecast_time_properties` function. Write a Python function `def get_forecast_time_properties( df, time_col=TIME_COL, value_col=VALUE_COL, freq=None, date_format=None, regressor_cols=None, lagged_regressor_cols=None, train_end_date=None, forecast_horizon=None)` to solve the following problem:
Returns the number of training points in `df`, the start year, and prediction end year Parameters ---------- df : `pandas.DataFrame` with columns [``time_col``, ``value_col``] Univariate timeseries data to forecast time_col : `str`, default ``TIME_COL`` in constants.py Name of timestamp column in df value_col : `str`, default ``VALUE_COL`` in constants.py Name of value column in df (the values to forecast) freq : `str` or None, default None Frequency of input data. Used to generate future dates for prediction. Frequency strings can have multiples, e.g. '5H'. See https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases for a list of frequency aliases. If None, inferred by `~greykite.common.time_properties.infer_freq`. Provide this parameter if ``df`` has missing timepoints. date_format : `str` or None, default None strftime format to parse time column, eg ``%m/%d/%Y``. Note that ``%f`` will parse all the way up to nanoseconds. If None (recommended), inferred by `pandas.to_datetime`. regressor_cols : `list` [`str`] or None, optional, default None A list of regressor columns used in the training and prediction DataFrames. If None, no regressor columns are used. Regressor columns that are unavailable in ``df`` are dropped. lagged_regressor_cols : `list` [`str`] or None, optional, default None A list of lagged regressor columns used in the training and prediction DataFrames. If None, no lagged regressor columns are used. Lagged regressor columns that are unavailable in ``df`` are dropped. train_end_date : `datetime.datetime`, optional, default None Last date to use for fitting the model. Forecasts are generated after this date. If None, it is set to the last date with a non-null value in ``value_col`` of ``df``. forecast_horizon : `int` or None, default None Number of periods to forecast into the future. Must be > 0 If None, default is determined from input data frequency Returns ------- time_properties : `dict` [`str`, `any`] Time properties dictionary with keys: ``"period"`` : `int` Period of each observation (i.e. minimum time between observations, in seconds). ``"simple_freq"`` : `SimpleTimeFrequencyEnum` ``SimpleTimeFrequencyEnum`` member corresponding to data frequency. ``"num_training_points"`` : `int` Number of observations for training. ``"num_training_days"`` : `int` Number of days for training. ``"days_per_observation"``: `float` The time frequency in day units. ``"forecast_horizon"``: `int` The number of time intervals for which forecast is needed. ``"forecast_horizon_in_timedelta"``: `datetime.timedelta` The forecast horizon length in timedelta units. ``"forecast_horizon_in_days"``: `float` The forecast horizon length in day units. ``"start_year"`` : `int` Start year of the training period. ``"end_year"`` : `int` End year of the forecast period. ``"origin_for_time_vars"`` : `float` Continuous time representation of the first date in ``df``.
Here is the function:
def get_forecast_time_properties(
df,
time_col=TIME_COL,
value_col=VALUE_COL,
freq=None,
date_format=None,
regressor_cols=None,
lagged_regressor_cols=None,
train_end_date=None,
forecast_horizon=None):
"""Returns the number of training points in `df`, the start year, and prediction end year
Parameters
----------
df : `pandas.DataFrame` with columns [``time_col``, ``value_col``]
Univariate timeseries data to forecast
time_col : `str`, default ``TIME_COL`` in constants.py
Name of timestamp column in df
value_col : `str`, default ``VALUE_COL`` in constants.py
Name of value column in df (the values to forecast)
freq : `str` or None, default None
Frequency of input data. Used to generate future dates for prediction.
Frequency strings can have multiples, e.g. '5H'.
See https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases
for a list of frequency aliases.
If None, inferred by
`~greykite.common.time_properties.infer_freq`.
Provide this parameter if ``df`` has missing timepoints.
date_format : `str` or None, default None
strftime format to parse time column, eg ``%m/%d/%Y``.
Note that ``%f`` will parse all the way up to nanoseconds.
If None (recommended), inferred by `pandas.to_datetime`.
regressor_cols : `list` [`str`] or None, optional, default None
A list of regressor columns used in the training and prediction DataFrames.
If None, no regressor columns are used.
Regressor columns that are unavailable in ``df`` are dropped.
lagged_regressor_cols : `list` [`str`] or None, optional, default None
A list of lagged regressor columns used in the training and prediction DataFrames.
If None, no lagged regressor columns are used.
Lagged regressor columns that are unavailable in ``df`` are dropped.
train_end_date : `datetime.datetime`, optional, default None
Last date to use for fitting the model. Forecasts are generated after this date.
If None, it is set to the last date with a non-null value in
``value_col`` of ``df``.
forecast_horizon : `int` or None, default None
Number of periods to forecast into the future. Must be > 0
If None, default is determined from input data frequency
Returns
-------
time_properties : `dict` [`str`, `any`]
Time properties dictionary with keys:
``"period"`` : `int`
Period of each observation (i.e. minimum time between observations, in seconds).
``"simple_freq"`` : `SimpleTimeFrequencyEnum`
``SimpleTimeFrequencyEnum`` member corresponding to data frequency.
``"num_training_points"`` : `int`
Number of observations for training.
``"num_training_days"`` : `int`
Number of days for training.
``"days_per_observation"``: `float`
The time frequency in day units.
``"forecast_horizon"``: `int`
The number of time intervals for which forecast is needed.
``"forecast_horizon_in_timedelta"``: `datetime.timedelta`
The forecast horizon length in timedelta units.
``"forecast_horizon_in_days"``: `float`
The forecast horizon length in day units.
``"start_year"`` : `int`
Start year of the training period.
``"end_year"`` : `int`
End year of the forecast period.
``"origin_for_time_vars"`` : `float`
Continuous time representation of the first date in ``df``.
"""
if regressor_cols is None:
regressor_cols = []
# Defines ``fit_df``, the data available for fitting the model
# and its time column (in `datetime.datetime` format)
canonical_data_dict = get_canonical_data(
df=df,
time_col=time_col,
value_col=value_col,
freq=freq,
date_format=date_format,
train_end_date=train_end_date,
regressor_cols=regressor_cols,
lagged_regressor_cols=lagged_regressor_cols)
fit_df = canonical_data_dict["fit_df"]
# Calculates basic time properties
train_start = fit_df[TIME_COL].min()
start_year = int(train_start.strftime("%Y"))
origin_for_time_vars = get_default_origin_for_time_vars(fit_df, TIME_COL)
period = min_gap_in_seconds(df=fit_df, time_col=TIME_COL)
simple_freq = get_simple_time_frequency_from_period(period)
num_training_points = fit_df.shape[0]
# Calculates number of (fractional) days in the training set
time_delta = fit_df[TIME_COL].max() - train_start
num_training_days = (
time_delta.days
+ (time_delta.seconds + period) / TimeEnum.ONE_DAY_IN_SECONDS.value)
# Calculates forecast horizon (as a number of periods)
if forecast_horizon is None:
# expected to be kept in sync with default value set in ``get_default_time_parameters``
forecast_horizon = get_default_horizon_from_period(
period=period,
num_observations=num_training_points)
days_per_observation = period / TimeEnum.ONE_DAY_IN_SECONDS.value
forecast_horizon_in_days = forecast_horizon * days_per_observation
forecast_horizon_in_timedelta = timedelta(days=forecast_horizon_in_days)
# Calculates forecast end year
train_end = fit_df[TIME_COL].max()
days_to_forecast = math.ceil(forecast_horizon * days_per_observation)
future_end = train_end + timedelta(days=days_to_forecast)
end_year = int(future_end.strftime("%Y"))
return {
"period": period,
"simple_freq": simple_freq,
"num_training_points": num_training_points,
"num_training_days": num_training_days,
"days_per_observation": days_per_observation,
"forecast_horizon": forecast_horizon,
"forecast_horizon_in_timedelta": forecast_horizon_in_timedelta,
"forecast_horizon_in_days": forecast_horizon_in_days,
"start_year": start_year,
"end_year": end_year,
"origin_for_time_vars": origin_for_time_vars
} | Returns the number of training points in `df`, the start year, and prediction end year Parameters ---------- df : `pandas.DataFrame` with columns [``time_col``, ``value_col``] Univariate timeseries data to forecast time_col : `str`, default ``TIME_COL`` in constants.py Name of timestamp column in df value_col : `str`, default ``VALUE_COL`` in constants.py Name of value column in df (the values to forecast) freq : `str` or None, default None Frequency of input data. Used to generate future dates for prediction. Frequency strings can have multiples, e.g. '5H'. See https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases for a list of frequency aliases. If None, inferred by `~greykite.common.time_properties.infer_freq`. Provide this parameter if ``df`` has missing timepoints. date_format : `str` or None, default None strftime format to parse time column, eg ``%m/%d/%Y``. Note that ``%f`` will parse all the way up to nanoseconds. If None (recommended), inferred by `pandas.to_datetime`. regressor_cols : `list` [`str`] or None, optional, default None A list of regressor columns used in the training and prediction DataFrames. If None, no regressor columns are used. Regressor columns that are unavailable in ``df`` are dropped. lagged_regressor_cols : `list` [`str`] or None, optional, default None A list of lagged regressor columns used in the training and prediction DataFrames. If None, no lagged regressor columns are used. Lagged regressor columns that are unavailable in ``df`` are dropped. train_end_date : `datetime.datetime`, optional, default None Last date to use for fitting the model. Forecasts are generated after this date. If None, it is set to the last date with a non-null value in ``value_col`` of ``df``. forecast_horizon : `int` or None, default None Number of periods to forecast into the future. Must be > 0 If None, default is determined from input data frequency Returns ------- time_properties : `dict` [`str`, `any`] Time properties dictionary with keys: ``"period"`` : `int` Period of each observation (i.e. minimum time between observations, in seconds). ``"simple_freq"`` : `SimpleTimeFrequencyEnum` ``SimpleTimeFrequencyEnum`` member corresponding to data frequency. ``"num_training_points"`` : `int` Number of observations for training. ``"num_training_days"`` : `int` Number of days for training. ``"days_per_observation"``: `float` The time frequency in day units. ``"forecast_horizon"``: `int` The number of time intervals for which forecast is needed. ``"forecast_horizon_in_timedelta"``: `datetime.timedelta` The forecast horizon length in timedelta units. ``"forecast_horizon_in_days"``: `float` The forecast horizon length in day units. ``"start_year"`` : `int` Start year of the training period. ``"end_year"`` : `int` End year of the forecast period. ``"origin_for_time_vars"`` : `float` Continuous time representation of the first date in ``df``. |
167,481 | import dataclasses
import functools
from typing import Dict
from typing import Optional
import numpy as np
import pandas as pd
from greykite.algo.forecast.silverkite.forecast_silverkite import SilverkiteForecast
from greykite.common.constants import TimeFeaturesEnum
from greykite.common.features.timeseries_lags import build_autoreg_df_multi
from greykite.common.python_utils import dictionaries_values_to_lists
from greykite.common.python_utils import unique_in_list
from greykite.common.python_utils import update_dictionaries
from greykite.common.python_utils import update_dictionary
from greykite.framework.templates.autogen.forecast_config import ForecastConfig
from greykite.framework.templates.autogen.forecast_config import ModelComponentsParam
from greykite.framework.templates.base_template import BaseTemplate
from greykite.sklearn.estimator.base_forecast_estimator import BaseForecastEstimator
from greykite.sklearn.estimator.silverkite_estimator import SilverkiteEstimator
def unique_in_list(array, ignored_elements=()):
"""Returns unique elements in ``array``, removing
all levels of nesting if found.
Parameters
----------
array : `list` [`any`] or None
List of items, with arbitrary level of nesting.
ignored_elements : `tuple`, default ()
Elements not to include in the output
Returns
-------
unique_elements : `list`
Unique elements in array, ignoring up to
`level` levels of nesting.
Elements that are `None` are removed from the output.
"""
unique_elements = set()
if array is not None:
for item in array:
if isinstance(item, (list, tuple)):
unique_in_item = unique_in_list(item, ignored_elements=ignored_elements)
unique_in_item = set(unique_in_item) if unique_in_item is not None else {}
unique_elements.update(unique_in_item)
elif item not in ignored_elements:
unique_elements.add(item)
return list(unique_elements) if unique_elements else None
The provided code snippet includes necessary dependencies for implementing the `get_extra_pred_cols` function. Write a Python function `def get_extra_pred_cols(model_components=None)` to solve the following problem:
Gets extra predictor columns from the model components for :func:`~greykite.framework.templates.silverkite_templates.silverkite_template`. Parameters ---------- model_components : :class:`~greykite.framework.templates.autogen.forecast_config.ModelComponentsParam` or None, default None Configuration of model growth, seasonality, events, etc. See :func:`~greykite.framework.templates.silverkite_templates.silverkite_template` for details. Returns ------- extra_pred_cols : `list` [`str`] All extra predictor columns used in any hyperparameter set requested by ``model_components.custom["extra_pred_cols]``. Regressors are included in this list. None if there are no extra predictor columns.
Here is the function:
def get_extra_pred_cols(model_components=None):
"""Gets extra predictor columns from the model components for
:func:`~greykite.framework.templates.silverkite_templates.silverkite_template`.
Parameters
----------
model_components : :class:`~greykite.framework.templates.autogen.forecast_config.ModelComponentsParam` or None, default None
Configuration of model growth, seasonality, events, etc.
See :func:`~greykite.framework.templates.silverkite_templates.silverkite_template`
for details.
Returns
-------
extra_pred_cols : `list` [`str`]
All extra predictor columns used in any hyperparameter set
requested by ``model_components.custom["extra_pred_cols]``.
Regressors are included in this list.
None if there are no extra predictor columns.
"""
if model_components is not None and model_components.custom is not None:
# ``extra_pred_cols`` is a list of strings to initialize
# SilverkiteEstimator.extra_pred_cols, or a list of
# such lists.
extra_pred_cols = model_components.custom.get("extra_pred_cols", [])
else:
extra_pred_cols = []
return unique_in_list(
array=extra_pred_cols,
ignored_elements=(None,)) | Gets extra predictor columns from the model components for :func:`~greykite.framework.templates.silverkite_templates.silverkite_template`. Parameters ---------- model_components : :class:`~greykite.framework.templates.autogen.forecast_config.ModelComponentsParam` or None, default None Configuration of model growth, seasonality, events, etc. See :func:`~greykite.framework.templates.silverkite_templates.silverkite_template` for details. Returns ------- extra_pred_cols : `list` [`str`] All extra predictor columns used in any hyperparameter set requested by ``model_components.custom["extra_pred_cols]``. Regressors are included in this list. None if there are no extra predictor columns. |
167,482 | import dataclasses
import functools
from typing import Dict
from typing import Optional
import numpy as np
import pandas as pd
from greykite.algo.forecast.silverkite.forecast_silverkite import SilverkiteForecast
from greykite.common.constants import TimeFeaturesEnum
from greykite.common.features.timeseries_lags import build_autoreg_df_multi
from greykite.common.python_utils import dictionaries_values_to_lists
from greykite.common.python_utils import unique_in_list
from greykite.common.python_utils import update_dictionaries
from greykite.common.python_utils import update_dictionary
from greykite.framework.templates.autogen.forecast_config import ForecastConfig
from greykite.framework.templates.autogen.forecast_config import ModelComponentsParam
from greykite.framework.templates.base_template import BaseTemplate
from greykite.sklearn.estimator.base_forecast_estimator import BaseForecastEstimator
from greykite.sklearn.estimator.silverkite_estimator import SilverkiteEstimator
class SilverkiteForecast():
def __init__(
self,
constants: SilverkiteSeasonalityEnumMixin = default_silverkite_constant):
self._silverkite_seasonality_enum: Type[SilverkiteSeasonalityEnum] = constants.get_silverkite_seasonality_enum()
def forecast(
self,
df,
time_col,
value_col,
freq=None,
origin_for_time_vars=None,
extra_pred_cols=None,
drop_pred_cols=None,
explicit_pred_cols=None,
train_test_thresh=None,
training_fraction=0.9, # This is for internal ML models validation. The final returned model will be trained on all data.
fit_algorithm="linear",
fit_algorithm_params=None,
daily_event_df_dict=None,
daily_event_neighbor_impact=None,
daily_event_shifted_effect=None,
fs_components_df=pd.DataFrame({
"name": [
TimeFeaturesEnum.tod.value,
TimeFeaturesEnum.tow.value,
TimeFeaturesEnum.toy.value],
"period": [24.0, 7.0, 1.0],
"order": [3, 3, 5],
"seas_names": ["daily", "weekly", "yearly"]}),
autoreg_dict=None,
past_df=None,
lagged_regressor_dict=None,
changepoints_dict=None,
seasonality_changepoints_dict=None,
changepoint_detector=None,
min_admissible_value=None,
max_admissible_value=None,
uncertainty_dict=None,
normalize_method=None,
adjust_anomalous_dict=None,
impute_dict=None,
regression_weight_col=None,
forecast_horizon=None,
simulation_based=False,
simulation_num=10,
fast_simulation=False,
remove_intercept=False):
"""A function for forecasting.
It captures growth, seasonality, holidays and other patterns.
See "Capturing the time-dependence in the precipitation process for
weather risk assessment" as a reference:
https://link.springer.com/article/10.1007/s00477-016-1285-8
Parameters
----------
df : `pandas.DataFrame`
A data frame which includes the timestamp column
as well as the value column.
time_col : `str`
The column name in ``df`` representing time for the time series data.
The time column can be anything that can be parsed by pandas DatetimeIndex.
value_col: `str`
The column name which has the value of interest to be forecasted.
freq: `str`, optional, default None
The intended timeseries frequency, DateOffset alias.
See https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases.
If None automatically inferred. This frequency will be passed through
this function as a part of the trained model and used at predict time
if needed.
If data include missing timestamps, and frequency is monthly/annual,
user should pass this parameter, as it cannot be inferred.
origin_for_time_vars : `float`, optional, default None
The time origin used to create continuous variables for time.
If None, uses the first record in ``df``.
extra_pred_cols : `list` of `str`, default None
Names of the extra predictor columns.
If None, uses ["ct1"], a simple linear growth term.
It can leverage regressors included in ``df`` and those generated
by the other parameters. The following effects will not be modeled
unless specified in ``extra_pred_cols``:
- included in ``df``: e.g. macro-economic factors, related timeseries
- from `~greykite.common.features.timeseries_features.build_time_features_df`:
e.g. ct1, ct_sqrt, dow, ...
- from ``daily_event_df_dict``: e.g. "events_India", ...
The columns corresponding to the following parameters are included
in the model without specification in ``extra_pred_cols``.
``extra_pred_cols`` can be used to add interactions with these terms.
changepoints_dict: e.g. changepoint0, changepoint1, ...
fs_components_df: e.g. sin2_dow, cos4_dow_weekly
autoreg_dict: e.g. x_lag1, x_avglag_2_3_4, y_avglag_1_to_5
If a regressor is passed in ``df``, it needs to be provided to
the associated predict function:
``predict_silverkite``: via ``fut_df`` or ``new_external_regressor_df``
``silverkite.predict_n(_no_sim``: via ``new_external_regressor_df``
drop_pred_cols : `list` [`str`] or None, default None
Names of predictor columns to be dropped from the final model.
Ignored if None
explicit_pred_cols : `list` [`str`] or None, default None
Names of the explicit predictor columns which will be
the only variables in the final model. Note that this overwrites
the generated predictors in the model and may include new
terms not appearing in the predictors (e.g. interaction terms).
Ignored if None
train_test_thresh : `datetime.datetime`, optional
e.g. datetime.datetime(2019, 6, 30)
The threshold for training and testing split.
Note that the final returned model is trained using all data.
If None, training split is based on ``training_fraction``
training_fraction : `float`, optional
The fraction of data used for training (0.0 to 1.0)
Used only if ``train_test_thresh`` is None.
If this is also None or 1.0, then we skip testing
and train on the entire dataset.
fit_algorithm : `str`, optional, default "linear"
The type of predictive model used in fitting.
See `~greykite.algo.common.ml_models.fit_model_via_design_matrix`
for available options and their parameters.
fit_algorithm_params : `dict` or None, optional, default None
Parameters passed to the requested fit_algorithm.
If None, uses the defaults in `~greykite.algo.common.ml_models.fit_model_via_design_matrix`.
daily_event_df_dict : `dict` or None, optional, default None
A dictionary of data frames, each representing events data for the corresponding key.
The DataFrame has two columns:
- The first column contains event dates. Must be in a format
recognized by `pandas.to_datetime`. Must be at daily
frequency for proper join. It is joined against the time
in ``df``, converted to a day:
``pd.to_datetime(pd.DatetimeIndex(df[time_col]).date)``.
- the second column contains the event label for each date
The column order is important; column names are ignored.
The event dates must span their occurrences in both the training
and future prediction period.
During modeling, each key in the dictionary is mapped to a categorical variable
named ``f"{EVENT_PREFIX}_{key}"``, whose value at each timestamp is specified
by the corresponding DataFrame.
For example, to manually specify a yearly event on September 1
during a training/forecast period that spans 2020-2022::
daily_event_df_dict = {
"custom_event": pd.DataFrame({
"date": ["2020-09-01", "2021-09-01", "2022-09-01"],
"label": ["is_event", "is_event", "is_event"]
})
}
It's possible to specify multiple events in the same df. Two events,
``"sep"`` and ``"oct"`` are specified below for 2020-2021::
daily_event_df_dict = {
"custom_event": pd.DataFrame({
"date": ["2020-09-01", "2020-10-01", "2021-09-01", "2021-10-01"],
"event_name": ["sep", "oct", "sep", "oct"]
})
}
Use multiple keys if two events may fall on the same date. These events
must be in separate DataFrames::
daily_event_df_dict = {
"fixed_event": pd.DataFrame({
"date": ["2020-09-01", "2021-09-01", "2022-09-01"],
"event_name": "fixed_event"
}),
"moving_event": pd.DataFrame({
"date": ["2020-09-01", "2021-08-28", "2022-09-03"],
"event_name": "moving_event"
}),
}
The multiple event specification can be used even if events never overlap. An
equivalent specification to the second example::
daily_event_df_dict = {
"sep": pd.DataFrame({
"date": ["2020-09-01", "2021-09-01"],
"event_name": "is_event"
}),
"oct": pd.DataFrame({
"date": ["2020-10-01", "2021-10-01"],
"event_name": "is_event"
}),
}
.. note::
The events you want to use must be specified in ``extra_pred_cols``.
These take the form: ``f"{EVENT_PREFIX}_{key}"``, where
`~greykite.common.constants.EVENT_PREFIX` is the constant.
Do not use `~greykite.common.constants.EVENT_DEFAULT`
in the second column. This is reserved to indicate dates that do not
correspond to an event.
daily_event_neighbor_impact : `int`, `list` [`int`], callable or None, default None
The impact of neighboring timestamps of the events in ``event_df_dict``.
This is for daily events so the units below are all in days.
For example, if the data is weekly ("W-SUN") and an event is daily,
it may not exactly fall on the weekly date.
But you can specify for New Year's day on 1/1, it affects all dates
in the week, e.g. 12/31, 1/1, ..., 1/6, then it will be mapped to the weekly date.
In this case you may want to map a daily event's date to a few dates,
and can specify
``neighbor_impact=lambda x: [x-timedelta(days=x.isocalendar()[2]-1) + timedelta(days=i) for i in range(7)]``.
Another example is that the data is rolling 7 day daily data,
thus a holiday may affect the t, t+1, ..., t+6 dates.
You can specify ``neighbor_impact=7``.
If input is `int`, the mapping is t, t+1, ..., t+neighbor_impact-1.
If input is `list`, the mapping is [t+x for x in neighbor_impact].
If input is a function, it maps each daily event's date to a list of dates.
daily_event_shifted_effect : `list` [`str`] or None, default None
Additional neighbor events based on given events.
For example, passing ["-1D", "7D"] will add extra daily events which are 1 day before
and 7 days after the given events.
Offset format is {d}{freq} with any integer plus a frequency string.
Must be parsable by pandas ``to_offset``.
The new events' names will be the current events' names with suffix "{offset}_before" or "{offset}_after".
For example, if we have an event named "US_Christmas Day",
a "7D" shift will have name "US_Christmas Day_7D_after".
This is useful when you expect an offset of the current holidays also has impact on the
time series, or you want to interact the lagged terms with autoregression.
If ``daily_event_neighbor_impact`` is also specified, this will be applied after adding neighboring days.
fs_components_df : `pandas.DataFrame` or None, optional
A dataframe with information about fourier series generation.
Must contain columns with following names:
"name": name of the timeseries feature e.g. "tod", "tow" etc.
"period": Period of the fourier series, optional, default 1.0
"order": Order of the fourier series, optional, default 1.0
"seas_names": season names corresponding to the name
(e.g. "daily", "weekly" etc.), optional.
Default includes daily, weekly , yearly seasonality.
autoreg_dict : `dict` or `str` or None, optional, default `None`
If a `dict`: A dictionary with arguments for `~greykite.common.features.timeseries_lags.build_autoreg_df`.
That function's parameter ``value_col`` is inferred from the input of
current function ``self.forecast``. Other keys are:
``"lag_dict"`` : `dict` or None
``"agg_lag_dict"`` : `dict` or None
``"series_na_fill_func"`` : callable
If a `str`: The string will represent a method and a dictionary will be
constructed using that `str`.
Currently only implemented method is "auto" which uses
`~greykite.algo.forecast.silverkite.SilverkiteForecast.__get_default_autoreg_dict`
to create a dictionary.
See more details for above parameters in
`~greykite.common.features.timeseries_lags.build_autoreg_df`.
past_df : `pandas.DataFrame` or None, default None
The past df used for building autoregression features.
This is not necessarily needed since imputation is possible.
However, it is recommended to provide ``past_df`` for more accurate
autoregression features and faster training (by skipping imputation).
The columns are:
time_col : `pandas.Timestamp` or `str`
The timestamps.
value_col : `float`
The past values.
addition_regressor_cols : `float`
Any additional regressors.
Note that this ``past_df`` is assumed to immediately precede ``df`` without gaps,
otherwise an error will be raised.
lagged_regressor_dict : `dict` or None, default None
A dictionary with arguments for `~greykite.common.features.timeseries_lags.build_autoreg_df_multi`.
The keys of the dictionary are the target lagged regressor column names.
It can leverage the regressors included in ``df``.
The value of each key is either a `dict` or `str`.
If `dict`, it has the following keys:
``"lag_dict"`` : `dict` or None
``"agg_lag_dict"`` : `dict` or None
``"series_na_fill_func"`` : callable
If `str`, it represents a method and a dictionary will be constructed using that `str`.
Currently the only implemented method is "auto" which uses
`~greykite.algo.forecast.silverkite.SilverkiteForecast.__get_default_lagged_regressor_dict`
to create a dictionary for each lagged regressor.
An example::
lagged_regressor_dict = {
"regressor1": {
"lag_dict": {"orders": [1, 2, 3]},
"agg_lag_dict": {
"orders_list": [[7, 7 * 2, 7 * 3]],
"interval_list": [(8, 7 * 2)]},
"series_na_fill_func": lambda s: s.bfill().ffill()},
"regressor2": "auto"}
Check the docstring of `~greykite.common.features.timeseries_lags.build_autoreg_df_multi`
for more details for each argument.
changepoints_dict : `dict` or None, optional, default None
Specifies the changepoint configuration.
"method": `str`
The method to locate changepoints.
Valid options:
- "uniform". Places n_changepoints evenly spaced changepoints to allow growth to change.
- "custom". Places changepoints at the specified dates.
- "auto". Automatically detects change points. For configuration, see
`~greykite.algo.changepoint.adalasso.changepoint_detector.ChangepointDetector.find_trend_changepoints`
Additional keys to provide parameters for each particular method are described below.
"continuous_time_col": `str`, optional
Column to apply ``growth_func`` to, to generate changepoint features
Typically, this should match the growth term in the model
"growth_func": Optional[func]
Growth function (scalar -> scalar). Changepoint features are created
by applying ``growth_func`` to ``continuous_time_col`` with offsets.
If None, uses identity function to use ``continuous_time_col`` directly
as growth term
If changepoints_dict["method"] == "uniform", this other key is required:
``"n_changepoints"``: int
number of changepoints to evenly space across training period
If changepoints_dict["method"] == "custom", this other key is required:
``"dates"``: Iterable[Union[int, float, str, datetime]]
Changepoint dates. Must be parsable by pd.to_datetime.
Changepoints are set at the closest time on or after these dates
in the dataset.
If changepoints_dict["method"] == "auto", the keys that matches the parameters in
`~greykite.algo.changepoint.adalasso.changepoint_detector.ChangepointDetector.find_trend_changepoints`,
except ``df``, ``time_col`` and ``value_col``, are optional.
Extra keys also include "dates", "combine_changepoint_min_distance" and "keep_detected" to specify
additional custom trend changepoints. These three parameters correspond to the three parameters
"custom_changepoint_dates", "min_distance" and "keep_detected" in
`~greykite.algo.changepoint.adalasso.changepoints_utils.combine_detected_and_custom_trend_changepoints`.
seasonality_changepoints_dict : `dict` or None, default `None`
The parameter dictionary for seasonality change point detection. Parameters are in
`~greykite.algo.changepoint.adalasso.changepoint_detector.ChangepointDetector.find_seasonality_changepoints`.
Note ``df``, ``time_col``, ``value_col`` and ``trend_changepoints`` are auto populated,
and do not need to be provided.
changepoint_detector : `ChangepointDetector` or `None`, default `None`
The ChangepointDetector class
:class:`~greykite.algo.changepoint.adalasso.changepoint_detector.ChangepointDetector`.
This is specifically for
`~greykite.algo.forecast.silverkite.forecast_simple_silverkite.forecast_simple_silverkite`
to pass the ChangepointDetector class for plotting purposes, in case that users use
``forecast_simple_silverkite`` with ``changepoints_dict["method"] == "auto"``. The
trend change point detection has to be run there to include possible interaction terms,
so we need to pass the detection result from there to include in the output.
min_admissible_value : `float` or None, optional, default `None`
The minimum admissible value to return during prediction.
If None, no limit is applied.
max_admissible_value : `float` or None, optional, default `None`
The maximum admissible value to return during prediction.
If None, no limit is applied.
uncertainty_dict : `dict` or None, optional, default `None`
How to fit the uncertainty model. A dictionary with keys:
``"uncertainty_method"`` : `str`
The title of the method.
Only "simple_conditional_residuals" is implemented
in ``fit_ml_model`` which calculates CIs using residuals
``"params"`` : `dict`
A dictionary of parameters needed for
the requested ``uncertainty_method``. For example, for
``uncertainty_method="simple_conditional_residuals"``, see
parameters of `~greykite.algo.uncertainty.conditional.conf_interval.conf_interval`:
``"conditional_cols"``
``"quantiles"``
``"quantile_estimation_method"``
``"sample_size_thresh"``
``"small_sample_size_method"``
``"small_sample_size_quantile"``
If None, no uncertainty intervals are calculated.
normalize_method : `str` or None, default None
If a string is provided, it will be used as the normalization method
in `~greykite.common.features.normalize.normalize_df`, passed via
the argument ``method``.
Available options are: "zero_to_one", "statistical", "minus_half_to_half", "zero_at_origin".
If None, no normalization will be performed.
See that function for more details.
adjust_anomalous_dict : `dict` or None, default None
If not None, a dictionary with following items:
- "func" : `callable`
A function to perform adjustment of anomalous data with following
signature::
adjust_anomalous_dict["func"](
df=df,
time_col=time_col,
value_col=value_col,
**params) ->
{"adjusted_df": adjusted_df, ...}
- "params" : `dict`
The extra parameters to be passed to the function above.
impute_dict : `dict` or None, default None
If not None, a dictionary with following items:
- "func" : `callable`
A function to perform imputations with following
signature::
impute_dict["func"](
df=df,
value_col=value_col,
**impute_dict["params"] ->
{"df": imputed_df, ...}
- "params" : `dict`
The extra parameters to be passed to the function above.
regression_weight_col : `str` or None, default None
The column name for the weights to be used in weighted regression version
of applicable machine-learning models.
forecast_horizon : `int` or None, default None
The number of periods for which forecast is needed.
Note that this is only used in deciding what parameters should be
used for certain components e.g. autoregression, if automatic methods
are requested. While, the prediction time forecast horizon could be different
from this variable, ideally they should be the same.
simulation_based : `bool`, default False
Boolean to specify if the future predictions are to be using simulations
or not.
Note that this is only used in deciding what parameters should be
used for certain components e.g. autoregression, if automatic methods
are requested. However, the auto-settings and the prediction settings
regarding using simulations should match.
simulation_num : `int`, default 10
The number of simulations for when simulations are used for generating
forecasts and prediction intervals.
fast_simulation: `bool`, default False
Deterimes if fast simulations are to be used. This only impacts models
which include auto-regression. This method will only generate one simulation
without any error being added and then add the error using the volatility
model. The advantage is a major boost in speed during inference and the
disadvantage is potentially less accurate prediction intervals.
remove_intercept : `bool`, default False
Whether to remove explicit and implicit intercepts.
By default, `patsy` will make the design matrix always full rank.
It will always include an intercept term unless we specify "-1" or "+0".
However, if there are categorical variables, even we specify "-1" or "+0",
it will include an implicit intercept by adding all levels of a categorical
variable into the design matrix.
Sometimes we don't want this to happen.
Setting this parameter to True will remove both explicit and implicit intercepts.
Returns
-------
trained_model : `dict`
A dictionary that includes the fitted model from the function
:func:`~greykite.algo.common.ml_models.fit_ml_model_with_evaluation`.
The keys are:
df_dropna: `pandas.DataFrame`
The ``df`` with NAs dropped.
df: `pandas.DataFrame`
The original ``df``.
num_training_points: `int`
The number of training points.
features_df: `pandas.DataFrame`
The ``df`` with augmented time features.
min_timestamp: `pandas.Timestamp`
The minimum timestamp in data.
max_timestamp: `pandas.Timestamp`
The maximum timestamp in data.
freq: `str`
The data frequency.
inferred_freq: `str`
The data freqency inferred from data.
inferred_freq_in_secs : `float`
The data frequency inferred from data in seconds.
inferred_freq_in_days: `float`
The data frequency inferred from data in days.
time_col: `str`
The time column name.
value_col: `str`
The value column name.
origin_for_time_vars: `float`
The first time stamp converted to a float number.
fs_components_df: `pandas.DataFrame`
The dataframe that specifies the seasonality Fourier configuration.
autoreg_dict: `dict`
The dictionary that specifies the autoregression configuration.
lagged_regressor_dict: `dict`
The dictionary that specifies the lagged regressors configuration.
lagged_regressor_cols: `list` [`str`]
List of regressor column names used for lagged regressor
normalize_method: `str`
The normalization method.
See the function input parameter ``normalize_method``.
daily_event_df_dict: `dict`
The dictionary that specifies daily events configuration.
changepoints_dict: `dict`
The dictionary that specifies changepoints configuration.
changepoint_values: `list` [`float`]
The list of changepoints in continuous time values.
normalized_changepoint_values : `list` [`float`]
The list of changepoints in continuous time values normalized to 0 to 1.
continuous_time_col: `str`
The continuous time column name in ``features_df``.
growth_func: `func`
The growth function used in changepoints, None is linear function.
fs_func: `func`
The function used to generate Fourier series for seasonality.
has_autoreg_structure: `bool`
Whether the model has autoregression structure.
autoreg_func: `func`
The function to generate autoregression columns.
min_lag_order: `int`
Minimal lag order in autoregression.
max_lag_order: `int`
Maximal lag order in autoregression.
has_lagged_regressor_structure: `bool`
Whether the model has lagged regressor structure.
lagged_regressor_func: `func`
The function to generate lagged regressor columns.
min_lagged_regressor_order: `int`
Minimal lag order in lagged regressors.
max_lagged_regressor_order: `int`
Maximal lag order in lagged regressors.
uncertainty_dict: `dict`
The dictionary that specifies uncertainty model configuration.
pred_cols: `list` [`str`]
List of predictor names.
last_date_for_fit: `str` or `pandas.Timestamp`
The last timestamp used for fitting.
trend_changepoint_dates: `list` [`pandas.Timestamp`]
List of trend changepoints.
changepoint_detector: `class`
The `ChangepointDetector` class used to detected trend changepoints.
seasonality_changepoint_dates: `list` [`pandas.Timestamp`]
List of seasonality changepoints.
seasonality_changepoint_result: `dict`
The seasonality changepoint detection results.
fit_algorithm: `str`
The algorithm used to fit the model.
fit_algorithm_params: `dict`
The dictionary of parameters for ``fit_algorithm``.
adjust_anomalous_info: `dict`
A dictionary that has anomaly adjustment results.
impute_info: `dict`
A dictionary that has the imputation results.
forecast_horizon: `int`
The forecast horizon in steps.
forecast_horizon_in_days: `float`
The forecast horizon in days.
forecast_horizon_in_timedelta: `datetime.timmdelta`
The forecast horizon in timedelta.
simulation_based: `bool`
Whether to use simulation in prediction with autoregression terms.
simulation_num : `int`, default 10
The number of simulations for when simulations are used for generating
forecasts and prediction intervals.
train_df : `pandas.DataFrame`
The past dataframe used to generate AR terms.
It includes the concatenation of ``past_df`` and ``df`` if ``past_df`` is provided,
otherwise it is the ``df`` itself.
drop_intercept_col : `str` or None
The intercept column, explicit or implicit, to be dropped.
"""
df = df.copy()
df[time_col] = pd.to_datetime(df[time_col])
num_training_points = df.shape[0]
adjust_anomalous_info = None
if simulation_num is not None:
assert simulation_num > 0, "simulation number must be a natural number"
if past_df is not None:
if past_df.shape[0] == 0:
past_df = None
else:
past_df = past_df.copy()
past_df[time_col] = pd.to_datetime(past_df[time_col])
past_df = past_df.sort_values(by=time_col)
# Adjusts anomalies if requested
if adjust_anomalous_dict is not None:
adjust_anomalous_info = adjust_anomalous_dict["func"](
df=df,
time_col=time_col,
value_col=value_col,
**adjust_anomalous_dict["params"])
df = adjust_anomalous_info["adjusted_df"]
impute_info = None
if impute_dict is not None:
impute_info = impute_dict["func"](
df=df,
value_col=value_col,
**impute_dict["params"])
df = impute_info["df"]
# Calculates time properties of the series
# We include these properties in the returned `trained_model` object
time_stats = describe_timeseries(df, time_col=time_col)
max_timestamp = time_stats["max_timestamp"]
min_timestamp = time_stats["min_timestamp"]
# This infers a constant length freq (in seconds units or days) from data.
# Note that in some cases e.g. monthly or annual data the real frequency can
# be non-constant in length.
# This inferred freq is of `pandas._libs.tslibs.timedeltas.Timedelta` type.
inferred_freq = time_stats["freq_in_timedelta"]
inferred_freq_in_secs = time_stats["freq_in_secs"]
inferred_freq_in_days = time_stats["freq_in_days"]
# However in some use cases user might provide more complex
# freq to `predict_n` functions.
# As an example `freq='W-SUN'` which can be passed by user.
# If such freq is not passed, we can attempt to infer it.
# Note that if there are data with gaps, this freq cannot be inferred.
# E.g. if hourly data only include 9am-5pm.
if freq is None:
freq = pd.infer_freq(df[time_col])
# Calculates forecast horizon (as a number of observations)
if forecast_horizon is None:
# expected to be kept in sync with default value set in ``get_default_time_parameters``
forecast_horizon = get_default_horizon_from_period(
period=inferred_freq_in_secs,
num_observations=num_training_points)
forecast_horizon_in_timedelta = inferred_freq * forecast_horizon
forecast_horizon_in_days = inferred_freq_in_days * forecast_horizon
if extra_pred_cols is None:
extra_pred_cols = [TimeFeaturesEnum.ct1.value] # linear in time
# Makes sure the ``train_test_thresh`` is within the data
last_time_available = max(df[time_col])
if train_test_thresh is not None and train_test_thresh >= last_time_available:
raise ValueError(
f"Input timestamp for the parameter 'train_test_threshold' "
f"({train_test_thresh}) exceeds the maximum available timestamp "
f"of the time series ({last_time_available})."
f"Please pass a value within the range.")
# Sets default origin so that "ct1" feature from `build_time_features_df`
# Starts at 0 on train start date
if origin_for_time_vars is None:
origin_for_time_vars = get_default_origin_for_time_vars(df, time_col)
# Updates `changepoints_dict`, unchanged if not "method" == "auto"
changepoints_dict, changepoint_detector_class = get_changepoints_dict(
df=df,
time_col=time_col,
value_col=value_col,
changepoints_dict=changepoints_dict)
if changepoint_detector_class is None:
# Handles the case that user uses `forecast_simple_silverkite` with automatic
# trend change point detection. In that case, the `changepoints_dict` is already
# transformed to "method" = "custom", thus no changepoint detector is returned
# by `get_changepoints_dict`, so we need the original `ChangepointDetector` class
# to include in the output for plotting purpose.
changepoint_detector_class = changepoint_detector
# Defines trend changepoints.
# `df` contains all dates in the training period, including those
# where `value_col` is np.nan and therefore not used in training
# by `fit_ml_model_with_evaluation`.
# Thus, when changepoint "method" = "uniform", all dates are used to uniformly
# place the changepoints. When changepoint "method" = "auto", only dates without
# missing values are used to place potential changepoints, after resampling
# according to `resample_freq`. Seasonality changepoints are also placed using
# resampled dates after excluding the missing values.
trend_changepoint_dates = get_changepoint_dates_from_changepoints_dict(
changepoints_dict=changepoints_dict,
df=df,
time_col=time_col
)
changepoints = get_changepoint_features_and_values_from_config(
df=df,
time_col=time_col,
changepoints_dict=changepoints_dict,
origin_for_time_vars=origin_for_time_vars)
# Checks the provided `extra_pred_cols`. If it contains a feature involving a changepoint,
# the changepoint must be valid
keep_extra_pred_cols = []
for col in extra_pred_cols:
if CHANGEPOINT_COL_PREFIX in col:
for changepoint_col in changepoints["changepoint_cols"]:
if changepoint_col in col:
keep_extra_pred_cols.append(col)
break
else:
keep_extra_pred_cols.append(col)
if len(keep_extra_pred_cols) < len(extra_pred_cols):
removed_pred_cols = set(extra_pred_cols) - set(keep_extra_pred_cols)
extra_pred_cols = keep_extra_pred_cols
warnings.warn(f"The following features in extra_pred_cols are removed for this"
f" training set: {removed_pred_cols}. This is possible if running backtest"
f" or cross validation, but you are fitting on the entire training set,"
f" double check `extra_pred_cols` and other configuration.")
changepoint_values = changepoints["changepoint_values"]
continuous_time_col = changepoints["continuous_time_col"]
changepoint_cols = changepoints["changepoint_cols"]
growth_func = changepoints["growth_func"]
# Adds fourier series for seasonality
# Initializes fourier series function with None
# and alters if fourier components are input
fs_func = None
fs_cols = []
if fs_components_df is not None:
fs_components_df = fs_components_df[fs_components_df["order"] != 0]
fs_components_df = fs_components_df.reset_index()
if len(fs_components_df.index) > 0:
fs_func = fourier_series_multi_fcn(
col_names=fs_components_df["name"], # looks for corresponding column name in input df
periods=fs_components_df.get("period"),
orders=fs_components_df.get("order"),
seas_names=fs_components_df.get("seas_names")
)
# Determines fourier series column names for use in "build_features"
fs_cols = get_fourier_feature_col_names(
df=df,
time_col=time_col,
fs_func=fs_func,
conti_year_origin=origin_for_time_vars
)
# Removes fs_cols with perfect or almost perfect collinearity for OLS.
# For example, yearly seasonality with order 4 and quarterly seasonality with order 1, and etc.
if fit_algorithm in ["linear", "statsmodels_wls", "statsmodels_gls"]:
# Removes fourier columns with perfect or almost perfect collinearity.
fs_cols = self.__remove_fourier_col_with_collinearity(
fs_cols)
# Also removes these terms from interactions.
extra_pred_cols = self.__remove_fourier_col_with_collinearity_and_interaction(
extra_pred_cols, fs_cols)
# Adds seasonality change point features
seasonality_changepoint_result = None
seasonality_changepoints = None
seasonality_changepoint_cols = []
if seasonality_changepoints_dict is not None:
seasonality_changepoint_result = get_seasonality_changepoints(
df=df,
time_col=time_col,
value_col=value_col,
trend_changepoint_dates=trend_changepoint_dates,
seasonality_changepoints_dict=seasonality_changepoints_dict
)
seasonality_changepoints = seasonality_changepoint_result["seasonality_changepoints"]
seasonality_available = list(set([x.split("_")[-1] for x in fs_cols]))
seasonality_changepoint_cols = get_seasonality_changepoint_df_cols(
df=df,
time_col=time_col,
seasonality_changepoints=seasonality_changepoints,
seasonality_components_df=seasonality_changepoint_result["seasonality_components_df"],
include_original_block=False,
include_components=seasonality_available
)
features_df = self.__build_silverkite_features(
df=df,
time_col=time_col,
origin_for_time_vars=origin_for_time_vars,
daily_event_df_dict=daily_event_df_dict,
daily_event_neighbor_impact=daily_event_neighbor_impact,
daily_event_shifted_effect=daily_event_shifted_effect,
changepoint_values=changepoint_values,
continuous_time_col=continuous_time_col,
growth_func=growth_func,
fs_func=fs_func,
seasonality_changepoint_result=seasonality_changepoint_result,
changepoint_dates=trend_changepoint_dates)
# Adds autoregression columns to feature matrix
autoreg_func = None
lag_col_names = []
agg_lag_col_names = []
min_lag_order = None
max_lag_order = None
if autoreg_dict is not None and isinstance(autoreg_dict, str):
if autoreg_dict.lower() == "auto":
autoreg_info = self.__get_default_autoreg_dict(
freq_in_days=inferred_freq_in_days,
forecast_horizon=forecast_horizon,
simulation_based=simulation_based)
autoreg_dict = autoreg_info["autoreg_dict"]
else:
raise ValueError(f"The method {autoreg_dict} is not implemented.")
has_autoreg_structure = False
if autoreg_dict is not None:
has_autoreg_structure = True
autoreg_components = build_autoreg_df(
value_col=value_col,
**autoreg_dict)
autoreg_func = autoreg_components["build_lags_func"]
lag_col_names = autoreg_components["lag_col_names"]
agg_lag_col_names = autoreg_components["agg_lag_col_names"]
min_lag_order = autoreg_components["min_order"]
max_lag_order = autoreg_components["max_order"]
if autoreg_func is not None:
if past_df is not None:
# Fills in the gaps for imputation.
expected_last_timestamp = df[time_col].min() - to_offset(freq)
if past_df[time_col].iloc[-1] < expected_last_timestamp: # ``past_df`` is already sorted.
# If ``past_df`` and ``df`` have gap in between, adds the last timestamp before ``df``.
# Then the rest will be filled with NA.
log_message(
message="There is gaps between ``past_df`` and ``df``. "
"Filling the missing timestamps.",
level=LoggingLevelEnum.DEBUG
)
last_timestamp_df = pd.DataFrame({
col: [np.nan] if col != time_col else [expected_last_timestamp] for col in past_df.columns
})
past_df = past_df.append(last_timestamp_df).reset_index(drop=True)
past_df = fill_missing_dates(
df=past_df,
time_col=time_col,
freq=freq)[0] # `fill_missing_dates` returns a tuple where the first one is the df.
# Only takes ``past_df`` that are before ``df``.
past_df = past_df[past_df[time_col] <= expected_last_timestamp]
autoreg_df = self.__build_autoreg_features(
df=df,
value_col=value_col,
autoreg_func=autoreg_func,
phase="fit",
past_df=past_df)
features_df = pd.concat([features_df, autoreg_df], axis=1, sort=False)
# Adds lagged regressor columns to feature matrix
lagged_regressor_func = None
lagged_regressor_col_names = []
lagged_regressor_cols = []
min_lagged_regressor_order = None
max_lagged_regressor_order = None
if lagged_regressor_dict is not None:
key_remove = []
for key, value in lagged_regressor_dict.items():
if isinstance(value, str):
if value.lower() != "auto":
raise ValueError(f"The method {value} is not implemented.")
lag_reg_dict_info = self.__get_default_lagged_regressor_dict(
freq_in_days=inferred_freq_in_days,
forecast_horizon=forecast_horizon)
# If "auto" determines that no lag is needed, remove the key
if lag_reg_dict_info["lag_reg_dict"] is None:
key_remove += [key]
else:
lagged_regressor_dict[key] = lag_reg_dict_info["lag_reg_dict"]
for key in key_remove:
lagged_regressor_dict.pop(key, None)
log_message(f"Column {key} has been dropped from `lagged_regressor_dict` and was not "
f"used for lagged regressor as determined by 'auto' option.", LoggingLevelEnum.INFO)
# Converts empty dictionary to None if all keys are removed
if lagged_regressor_dict == {}:
lagged_regressor_dict = None
has_lagged_regressor_structure = False
if lagged_regressor_dict is not None:
has_lagged_regressor_structure = True
lagged_regressor_components = build_autoreg_df_multi(value_lag_info_dict=lagged_regressor_dict)
lagged_regressor_func = lagged_regressor_components["autoreg_func"]
lagged_regressor_col_names = lagged_regressor_components["autoreg_col_names"]
lagged_regressor_cols = lagged_regressor_components["autoreg_orig_col_names"]
min_lagged_regressor_order = lagged_regressor_components["min_order"]
max_lagged_regressor_order = lagged_regressor_components["max_order"]
lagged_regressor_df = self.__build_lagged_regressor_features(
df=df,
lagged_regressor_cols=lagged_regressor_cols,
lagged_regressor_func=lagged_regressor_func,
phase="fit",
past_df=None)
features_df = pd.concat([features_df, lagged_regressor_df], axis=1, sort=False)
features_df[value_col] = df[value_col].values
# prediction cols
# (Includes growth, interactions, if specified in extra_pred_cols)
pred_cols = extra_pred_cols + fs_cols
if changepoint_cols is not None:
pred_cols = pred_cols + changepoint_cols
if seasonality_changepoint_cols:
pred_cols = pred_cols + seasonality_changepoint_cols
if lag_col_names is not None:
pred_cols = pred_cols + lag_col_names
if agg_lag_col_names is not None:
pred_cols = pred_cols + agg_lag_col_names
if lagged_regressor_col_names is not None:
pred_cols = pred_cols + lagged_regressor_col_names
pred_cols = unique_elements_in_list(pred_cols)
# Drops un-desired predictors
if drop_pred_cols is not None:
pred_cols = [col for col in pred_cols if col not in drop_pred_cols]
# Only uses predictors appearing in ``explicit_pred_cols``
if explicit_pred_cols is not None:
pred_cols = explicit_pred_cols
# Makes sure we don't have an empty regressor string, which will cause patsy formula error.
if not pred_cols:
pred_cols = ["1"]
explan_str = "+".join(pred_cols)
model_formula_str = value_col + "~" + explan_str
ind_train = None
ind_test = None
if train_test_thresh is not None:
ind_train = np.where(df[time_col] < train_test_thresh)[0].tolist()
ind_test = np.where(df[time_col] >= train_test_thresh)[0].tolist()
trained_model = fit_ml_model_with_evaluation(
df=features_df,
model_formula_str=model_formula_str,
fit_algorithm=fit_algorithm,
fit_algorithm_params=fit_algorithm_params,
ind_train=ind_train,
ind_test=ind_test,
training_fraction=training_fraction,
randomize_training=False,
min_admissible_value=min_admissible_value,
max_admissible_value=max_admissible_value,
uncertainty_dict=uncertainty_dict,
normalize_method=normalize_method,
regression_weight_col=regression_weight_col,
remove_intercept=remove_intercept)
# Normalizes the changepoint_values
normalized_changepoint_values = self.__normalize_changepoint_values(
changepoint_values=changepoint_values,
pred_cols=pred_cols,
continuous_time_col=continuous_time_col,
normalize_df_func=trained_model["normalize_df_func"]
)
# Excludes points with NA that are not used in fitting, similar to "y" and "x_mat".
trained_model["df_dropna"] = df.loc[trained_model["y"].index]
# Includes points with NA
trained_model["df"] = df
trained_model["num_training_points"] = num_training_points
trained_model["features_df"] = features_df
trained_model["min_timestamp"] = min_timestamp
trained_model["max_timestamp"] = max_timestamp
trained_model["freq"] = freq
trained_model["inferred_freq"] = inferred_freq
trained_model["inferred_freq_in_secs"] = inferred_freq_in_secs
trained_model["inferred_freq_in_days"] = inferred_freq_in_days
trained_model["time_col"] = time_col
trained_model["value_col"] = value_col
trained_model["origin_for_time_vars"] = origin_for_time_vars
trained_model["fs_components_df"] = fs_components_df
trained_model["autoreg_dict"] = autoreg_dict
trained_model["lagged_regressor_dict"] = lagged_regressor_dict
trained_model["lagged_regressor_cols"] = lagged_regressor_cols
trained_model["normalize_method"] = normalize_method
trained_model["daily_event_df_dict"] = daily_event_df_dict
trained_model["daily_event_neighbor_impact"] = daily_event_neighbor_impact
trained_model["daily_event_shifted_effect"] = daily_event_shifted_effect
trained_model["changepoints_dict"] = changepoints_dict
trained_model["changepoint_values"] = changepoint_values
trained_model["normalized_changepoint_values"] = normalized_changepoint_values
trained_model["continuous_time_col"] = continuous_time_col
trained_model["growth_func"] = growth_func
trained_model["fs_func"] = fs_func
trained_model["has_autoreg_structure"] = has_autoreg_structure
trained_model["autoreg_func"] = autoreg_func
# ``past_df`` has been manipulated to have all timestamps (could be with NA) and immediately
# precedes ``df``. If ``past_df`` is not None, the stored ``past_df`` will be the concatenation of
# ``past_df`` and ``df``. Otherwise it will be ``df``.
trained_model["train_df"] = pd.concat([past_df, df], axis=0).reset_index(drop=True)
trained_model["min_lag_order"] = min_lag_order
trained_model["max_lag_order"] = max_lag_order
trained_model["has_lagged_regressor_structure"] = has_lagged_regressor_structure
trained_model["lagged_regressor_func"] = lagged_regressor_func
trained_model["min_lagged_regressor_order"] = min_lagged_regressor_order
trained_model["max_lagged_regressor_order"] = max_lagged_regressor_order
trained_model["uncertainty_dict"] = uncertainty_dict
trained_model["pred_cols"] = pred_cols # predictor column names
trained_model["last_date_for_fit"] = max(df[time_col])
trained_model["trend_changepoint_dates"] = trend_changepoint_dates
trained_model["changepoint_detector"] = changepoint_detector_class # the ChangepointDetector class with detection results
trained_model["seasonality_changepoint_dates"] = seasonality_changepoints
trained_model["seasonality_changepoint_result"] = seasonality_changepoint_result
trained_model["fit_algorithm"] = fit_algorithm
trained_model["fit_algorithm_params"] = fit_algorithm_params
trained_model["adjust_anomalous_info"] = adjust_anomalous_info
trained_model["impute_info"] = impute_info
trained_model["forecast_horizon"] = forecast_horizon
trained_model["forecast_horizon_in_days"] = forecast_horizon_in_days
trained_model["forecast_horizon_in_timedelta"] = forecast_horizon_in_timedelta
trained_model["simulation_based"] = simulation_based
trained_model["simulation_num"] = simulation_num
trained_model["fast_simulation"] = fast_simulation
return trained_model
def predict_no_sim(
self,
fut_df,
trained_model,
past_df=None,
new_external_regressor_df=None,
time_features_ready=False,
regressors_ready=False):
"""Performs predictions for the dates in ``fut_df``.
If ``extra_pred_cols`` refers to a column in ``df``, either ``fut_df``
or ``new_external_regressor_df`` must contain the regressors and the columns needed for lagged regressors.
Parameters
----------
fut_df: `pandas.DataFrame`
The data frame which includes the timestamps.
for prediction and any regressors.
trained_model : `dict`
A fitted silverkite model which is the output of ``self.forecast``.
past_df: `pandas.DataFrame`, optional
A data frame with past values if autoregressive methods are called
via autoreg_dict parameter of ``greykite.algo.forecast.silverkite.SilverkiteForecast.py``.
new_external_regressor_df : `pandas.DataFrame`, optional
Contains the regressors not already included in `fut_df`.
time_features_ready : `bool`
Boolean to denote if time features are already given in df or not.
regressors_ready : `bool`
Boolean to denote if regressors are already added to data (``fut_df``).
Return
--------
result: `dict`
A dictionary with following items
- "fut_df": `pandas.DataFrame`
The same as input dataframe with an added column for the response.
If value_col already appears in ``fut_df``, it will be over-written.
If ``uncertainty_dict`` is provided as input,
it will also contain a ``QUANTILE_SUMMARY_COL`` column.
- "x_mat": `pandas.DataFrame`
Design matrix of the predictive machine-learning model
- "features_df": `pandas.DataFrame`
The features dataframe used for prediction.
"""
fut_df = fut_df.copy()
time_col = trained_model["time_col"]
value_col = trained_model["value_col"]
max_lag_order = trained_model["max_lag_order"]
max_lagged_regressor_order = trained_model["max_lagged_regressor_order"]
min_lagged_regressor_order = trained_model["min_lagged_regressor_order"]
lagged_regressor_cols = trained_model["lagged_regressor_cols"]
if max_lag_order is not None and (past_df is None or past_df.shape[0] < max_lag_order):
warnings.warn(
"The autoregression lags data had to be interpolated at predict time."
"`past_df` was either not passed to `predict_silverkite` "
"or it was not long enough to calculate all necessery lags "
f"which is equal to `max_lag_order`={max_lag_order}")
if max_lagged_regressor_order is not None and (
past_df is None or past_df.shape[0] < max_lagged_regressor_order):
warnings.warn(
"The lagged regressor data had to be interpolated at predict time."
"`past_df` was either not passed to `predict_silverkite` "
"or it was not long enough to calculate all necessery lags "
f"which is equal to `max_lagged_regressor_order`={max_lagged_regressor_order}")
# This is the overall maximum lag order
max_order = None
if max_lag_order is not None and max_lagged_regressor_order is not None:
max_order = np.max([
max_lag_order,
max_lagged_regressor_order])
elif max_lag_order is not None:
max_order = max_lag_order
elif max_lagged_regressor_order is not None:
max_order = max_lagged_regressor_order
# We only keep the rows needed in ``past_df``
if past_df is not None and max_order is not None and len(past_df) > max_order:
past_df = past_df.tail(max_order).reset_index(drop=True)
# adds extra regressors if provided
if new_external_regressor_df is None or regressors_ready:
features_df_fut = fut_df
else:
new_external_regressor_df = new_external_regressor_df.reset_index(
drop=True)
features_df_fut = pd.concat(
[fut_df, new_external_regressor_df],
axis=1,
sort=False)
# adds the other features
if time_features_ready is not True:
features_df_fut = self.__build_silverkite_features(
df=features_df_fut,
time_col=trained_model["time_col"],
origin_for_time_vars=trained_model["origin_for_time_vars"],
daily_event_df_dict=trained_model["daily_event_df_dict"],
daily_event_neighbor_impact=trained_model["daily_event_neighbor_impact"],
daily_event_shifted_effect=trained_model["daily_event_shifted_effect"],
changepoint_values=trained_model["changepoint_values"],
continuous_time_col=trained_model["continuous_time_col"],
growth_func=trained_model["growth_func"],
fs_func=trained_model["fs_func"],
seasonality_changepoint_result=trained_model["seasonality_changepoint_result"],
changepoint_dates=trained_model["trend_changepoint_dates"])
# adds autoregression columns to future feature matrix
if trained_model["autoreg_func"] is not None:
if past_df is None:
raise ValueError(
"Autoregression was used but no past_df was passed to "
"`predict_no_sim`")
else:
# If the timestamps in ``fut_df`` are all before ``train_end_timestamp``,
# then the phase is to calculate fitted values.
# In this case if there are any values in ``fut_df``,
# they can be used since the information is known by ``train_end_timestamp``.
train_end_timestamp = trained_model["max_timestamp"]
fut_df_max_timestamp = pd.to_datetime(fut_df[time_col]).max()
phase = "predict" if train_end_timestamp < fut_df_max_timestamp else "fit"
if phase == "predict":
# If phase is predict, we do not allow using ``value_col``.
# The AR lags should be enough since otherwise one would use ``predict_via_sim``.
df = pd.DataFrame({value_col: [np.nan] * fut_df.shape[0]})
df.index = fut_df.index
else:
# If phase is fit, we keep the values in ``value_col``.
df = fut_df[[value_col]].copy()
autoreg_df = self.__build_autoreg_features(
df=df,
value_col=trained_model["value_col"],
autoreg_func=trained_model["autoreg_func"],
phase=phase,
past_df=past_df[[value_col]])
features_df_fut = pd.concat(
[features_df_fut, autoreg_df],
axis=1,
sort=False)
# adds lagged regressor columns to future feature matrix
if trained_model["lagged_regressor_func"] is not None:
if past_df is None:
raise ValueError(
"Lagged regressor(s) were used but no past_df was passed to "
"`predict_no_sim`")
else:
# `build_lagged_regressor_features` requires both ``df`` and ``past_df``
# to contain the columns needed for lagged regressors
# case 1: ``min_lagged_regressor_order`` >= ``fut_df.shape[0]``
# In this case we do not need ``fut_df`` to contain any values for
# those columns, but we do need to make sure these columns are included
# as required by `build_lagged_regressor_features`.
if min_lagged_regressor_order >= fut_df.shape[0]:
for col in lagged_regressor_cols:
if col not in fut_df.columns:
features_df_fut[col] = np.nan
# case 2: ``min_lagged_regressor_order`` < ``fut_df.shape[0]``
# In this case ``fut_df`` has to contain those columns, and an error
# will be raised when `build_lagged_regressor_features` is called.
lagged_regressor_df = self.__build_lagged_regressor_features(
df=features_df_fut.copy(),
lagged_regressor_cols=trained_model["lagged_regressor_cols"],
lagged_regressor_func=trained_model["lagged_regressor_func"],
phase="predict",
past_df=past_df[trained_model["lagged_regressor_cols"]])
features_df_fut = pd.concat(
[features_df_fut, lagged_regressor_df],
axis=1,
sort=False)
if value_col in features_df_fut.columns:
# This is to remove duplicate ``value_col`` generated by building features.
# The duplicates happen during calculating extended fitted values
# when we intentionally include ``value_col``.
del features_df_fut[value_col]
features_df_fut[value_col] = 0.0
if trained_model["uncertainty_dict"] is None:
# predictions are stored to ``value_col``
pred_res = predict_ml(
fut_df=features_df_fut,
trained_model=trained_model)
fut_df = pred_res["fut_df"]
x_mat = pred_res["x_mat"]
else:
# predictions are stored to ``value_col``
# quantiles are stored to ``QUANTILE_SUMMARY_COL``
pred_res = predict_ml_with_uncertainty(
fut_df=features_df_fut,
trained_model=trained_model)
fut_df = pred_res["fut_df"]
x_mat = pred_res["x_mat"]
# Makes sure to return only necessary columns
potential_forecast_cols = [time_col, value_col, QUANTILE_SUMMARY_COL, ERR_STD_COL]
existing_forecast_cols = [col for col in potential_forecast_cols if col in fut_df.columns]
fut_df = fut_df[existing_forecast_cols]
return {
"fut_df": fut_df,
"x_mat": x_mat,
"features_df": features_df_fut}
def predict_n_no_sim(
self,
fut_time_num,
trained_model,
freq,
new_external_regressor_df=None,
time_features_ready=False,
regressors_ready=False):
"""This is the forecast function which can be used to forecast.
It accepts extra regressors (``extra_pred_cols``) originally in
``df`` via ``new_external_regressor_df``.
Parameters
----------
fut_time_num : `int`
number of needed future values
trained_model : `dict`
A fitted silverkite model which is the output of ``self.forecast``
freq : `str`
Frequency of future predictions.
Accepts any valid frequency for ``pd.date_range``.
new_external_regressor_df : `pandas.DataFrame` or None
Contains the extra regressors if specified.
time_features_ready : `bool`
Boolean to denote if time features are already given in df or not.
regressors_ready : `bool`
Boolean to denote if regressors are already added to data (``fut_df``).
Returns
-------
result: `dict`
A dictionary with following items
- "fut_df": `pandas.DataFrame`
The same as input dataframe with an added column for the response.
If value_col already appears in ``fut_df``, it will be over-written.
If ``uncertainty_dict`` is provided as input,
it will also contain a ``QUANTILE_SUMMARY_COL`` column.
- "x_mat": `pandas.DataFrame`
Design matrix of the predictive machine-learning model
"""
# creates the future time grid
dates = pd.date_range(
start=trained_model["last_date_for_fit"],
periods=fut_time_num + 1,
freq=freq)
dates = dates[dates > trained_model["last_date_for_fit"]] # drops values up to last_date_for_fit
fut_df = pd.DataFrame({trained_model["time_col"]: dates.tolist()})
return self.predict_no_sim(
fut_df=fut_df,
trained_model=trained_model,
past_df=trained_model["df"].copy(), # observed data used for training the model
new_external_regressor_df=new_external_regressor_df,
time_features_ready=time_features_ready,
regressors_ready=regressors_ready)
def simulate(
self,
fut_df,
trained_model,
past_df=None,
new_external_regressor_df=None,
include_err=True,
time_features_ready=False,
regressors_ready=False):
"""A function to simulate future series.
If the fitted model supports uncertainty e.g. via ``uncertainty_dict``,
errors are incorporated into the simulations.
Parameters
----------
fut_df : `pandas.DataFrame`
The data frame which includes the timestamps
for prediction and any regressors.
trained_model : `dict`
A fitted silverkite model which is the output of ``self.forecast``.
past_df : `pandas.DataFrame`, optional
A data frame with past values if autoregressive methods are called
via ``autoreg_dict`` parameter of ``greykite.algo.forecast.silverkite.SilverkiteForecast.py``
new_external_regressor_df: `pandas.DataFrame`, optional
Contains the regressors not already included in ``fut_df``.
include_err : `bool`
Boolean to determine if errors are to be incorporated in the simulations.
time_features_ready : `bool`
Boolean to denote if time features are already given in df or not.
regressors_ready : `bool`
Boolean to denote if regressors are already added to data (``fut_df``).
Returns
-------
result: `dict`
A dictionary with following items
- "fut_df": `pandas.DataFrame`
The same as input dataframe with an added column for the response.
If value_col already appears in ``fut_df``, it will be over-written.
If ``uncertainty_dict`` is provided as input,
it will also contain a ``QUANTILE_SUMMARY_COL`` column.
Here are the expected columns:
(1) A time column with the column name being ``trained_model["time_col"]``
(2) The predicted response in ``value_col`` column.
(3) Quantile summary response in ``QUANTILE_SUMMARY_COL`` column.
This column only appears if the model includes uncertainty.
(4) Error std in `ERR_STD_COL` column.
This column only appears if the model includes uncertainty.
- "x_mat": `pandas.DataFrame`
Design matrix of the predictive machine-learning model
- "features_df": `pandas.DataFrame`
The features dataframe used for prediction.
"""
n = len(fut_df)
past_df_sim = None if past_df is None else past_df.copy()
fut_df = fut_df.reset_index(drop=True)
fut_df_sim = fut_df.copy()
time_col = trained_model["time_col"]
value_col = trained_model["value_col"]
fut_df_sim[value_col] = np.nan
fut_df_sim = fut_df_sim.astype({value_col: "float64"})
max_lag_order = trained_model["max_lag_order"]
max_lagged_regressor_order = trained_model["max_lagged_regressor_order"]
# overall maximum lag order
max_order = None
if max_lag_order is not None and max_lagged_regressor_order is not None:
max_order = np.max([
max_lag_order,
max_lagged_regressor_order])
elif max_lag_order is not None:
max_order = max_lag_order
elif max_lagged_regressor_order is not None:
max_order = max_lagged_regressor_order
# Only need to keep the last relevant rows to calculate AR terms
if past_df_sim is not None and max_order is not None and len(past_df_sim) > max_order:
past_df_sim = past_df_sim.tail(max_order)
# adds the other features
if time_features_ready is not True:
fut_df = self.__build_silverkite_features(
df=fut_df,
time_col=time_col,
origin_for_time_vars=trained_model["origin_for_time_vars"],
daily_event_df_dict=trained_model["daily_event_df_dict"],
daily_event_neighbor_impact=trained_model["daily_event_neighbor_impact"],
daily_event_shifted_effect=trained_model["daily_event_shifted_effect"],
changepoint_values=trained_model["changepoint_values"],
continuous_time_col=trained_model["continuous_time_col"],
growth_func=trained_model["growth_func"],
fs_func=trained_model["fs_func"],
seasonality_changepoint_result=trained_model["seasonality_changepoint_result"],
changepoint_dates=trained_model["trend_changepoint_dates"])
if new_external_regressor_df is not None and not regressors_ready:
new_external_regressor_df = new_external_regressor_df.reset_index(
drop=True)
fut_df = pd.concat(
[fut_df, new_external_regressor_df],
axis=1,
sort=False)
x_mat_list = []
features_df_list = []
for i in range(n):
fut_df_sub = fut_df.iloc[[i]].reset_index(drop=True)
assert len(fut_df_sub) == 1, "the subset dataframe must have only one row"
pred_res = self.predict_no_sim(
fut_df=fut_df_sub,
trained_model=trained_model,
past_df=past_df_sim,
new_external_regressor_df=None,
time_features_ready=True,
regressors_ready=True)
fut_df_sub = pred_res["fut_df"]
# we expect the returned prediction will have only one row
assert len(fut_df_sub) == 1
x_mat = pred_res["x_mat"]
features_df = pred_res["features_df"]
x_mat_list.append(x_mat)
features_df_list.append(features_df)
fut_df_sim.at[i, value_col] = fut_df_sub[value_col].values[0]
if include_err:
if ERR_STD_COL in list(fut_df_sub.columns):
scale = fut_df_sub[ERR_STD_COL].values[0]
err = np.random.normal(
loc=0.0,
scale=scale)
fut_df_sim.at[i, value_col] = (
fut_df_sub[value_col].values[0]
+ err)
else:
raise ValueError(
"Error is requested via ``include_err = True``. "
f"However the std column ({ERR_STD_COL}) "
"does not appear in the prediction")
# Here after assigning values for future forecast, we clip the values based on ``min_admissible_value`` and ``max_admissible_value``
# saved in ``trained_model``. The clip should only function when error terms are added, as ``predict_no_sim`` ensures the predicted
# values (before errors are added) are bounded.
min_admissible_value = trained_model["min_admissible_value"]
max_admissible_value = trained_model["max_admissible_value"]
if min_admissible_value is not None or max_admissible_value is not None:
fut_df_sim.at[i, value_col] = np.clip(
a=fut_df_sim.at[i, value_col],
a_min=min_admissible_value,
a_max=max_admissible_value)
# we get the last prediction value and concat that to the end of
# ``past_df``
past_df_increment = fut_df_sim.iloc[[i]].reset_index(drop=True)[[value_col]]
assert len(past_df_increment) == 1
if past_df_sim is None:
past_df_sim = past_df_increment
else:
past_df_sim = pd.concat(
[past_df_sim, past_df_increment],
axis=0,
sort=False)
# Only need to keep the last relevant rows to calculate AR terms
if past_df_sim is not None and max_order is not None and len(past_df_sim) > max_order:
past_df_sim = past_df_sim.tail(max_order)
past_df_sim = past_df_sim.reset_index(drop=True)
x_mat = pd.concat(
x_mat_list,
axis=0,
ignore_index=True, # The index does not matter as we simply want to stack up the data
sort=False)
assert len(x_mat) == len(fut_df), "The design matrix size (number of rows) used in simulation must have same size as the input"
features_df = pd.concat(
features_df_list,
axis=0,
ignore_index=True, # The index does not matter as we simply want to stack up the data
sort=False)
assert len(features_df) == len(fut_df), "The features data size (number of rows) used in simulation must have same size as the input"
return {
"sim_df": fut_df_sim[[time_col, value_col]],
"x_mat": x_mat,
"features_df": features_df}
def simulate_multi(
self,
fut_df,
trained_model,
simulation_num=10,
past_df=None,
new_external_regressor_df=None,
include_err=None):
"""A function to simulate future series.
If the fitted model supports uncertainty e.g. via ``uncertainty_dict``,
errors are incorporated into the simulations.
Parameters
----------
fut_df : `pandas.DataFrame`
The data frame which includes the timestamps
for prediction and any regressors.
trained_model : `dict`
A fitted silverkite model which is the output of ``self.forecast``.
simulation_num : `int`
The number of simulated series,
(each of which have the same number of rows as ``fut_df``)
to be stacked up row-wise. This number must be larger than zero.
past_df : `pandas.DataFrame`, optional
A data frame with past values if autoregressive methods are called
via ``autoreg_dict`` parameter of ``greykite.algo.forecast.silverkite.SilverkiteForecast.py``.
new_external_regressor_df: `pandas.DataFrame`, optional
Contains the regressors not already included in ``fut_df``.
include_err : `bool`, optional, default None
Boolean to determine if errors are to be incorporated in the simulations.
If None, it will be set to True if uncertainty is passed to the model and
otherwise will be set to False.
Returns
-------
result: `dict`
A dictionary with follwing items
- "fut_df_sim" : `pandas.DataFrame`
Row-wise concatenation of dataframes each being the same as
input dataframe (``fut_df``) with an added column for the response
and a new column: "sim_label" to differentiate various simulations.
The row number of the returned dataframe is:
``simulation_num`` times the row number of ``fut_df``.
If ``value_col`` already appears in ``fut_df``, it will be over-written.
- "x_mat": `pandas.DataFrame`
``simulation_num`` copies of design matrix of the predictive machine-learning model
concatenated. An extra index column ("original_row_index") is also added
for aggregation when needed.
Note that the all copies will be the same except for the case where
auto-regression is utilized.
"""
assert simulation_num > 0, "simulation number has to be a natural number."
if include_err is None:
include_err = trained_model["uncertainty_dict"] is not None
if trained_model["uncertainty_dict"] is None and include_err:
raise ValueError(
"`include_err=True` was passed. "
"However model does not support uncertainty. "
"To support uncertainty pass `uncertainty_dict` to the model.")
value_col = trained_model["value_col"]
fut_df = fut_df.reset_index(drop=True) # reset_index returns a copy
fut_df = self.__build_silverkite_features(
df=fut_df,
time_col=trained_model["time_col"],
origin_for_time_vars=trained_model["origin_for_time_vars"],
daily_event_df_dict=trained_model["daily_event_df_dict"],
daily_event_neighbor_impact=trained_model["daily_event_neighbor_impact"],
daily_event_shifted_effect=trained_model["daily_event_shifted_effect"],
changepoint_values=trained_model["changepoint_values"],
continuous_time_col=trained_model["continuous_time_col"],
growth_func=trained_model["growth_func"],
fs_func=trained_model["fs_func"],
seasonality_changepoint_result=trained_model["seasonality_changepoint_result"],
changepoint_dates=trained_model["trend_changepoint_dates"])
if new_external_regressor_df is not None:
new_external_regressor_df = new_external_regressor_df.reset_index(
drop=True)
fut_df = pd.concat(
[fut_df, new_external_regressor_df],
axis=1)
def one_sim_func(label):
"""Creates one simulation and labels it with ``label`` in an added
column : "sim_label"
"""
sim_res = self.simulate(
fut_df=fut_df,
trained_model=trained_model,
past_df=past_df,
new_external_regressor_df=None,
include_err=include_err,
time_features_ready=True,
regressors_ready=True)
sim_df = sim_res["sim_df"]
x_mat = sim_res["x_mat"]
sim_df["sim_label"] = label
# ``x_mat`` does not necessarily have an index column.
# We keep track of the original index, to be able to aggregate
# across simulations later.
x_mat["original_row_index"] = range(len(fut_df))
return {
"sim_df": sim_df,
"x_mat": x_mat}
sim_res_list = [one_sim_func(i) for i in range(simulation_num)]
sim_df_list = [sim_res_list[i]["sim_df"] for i in range(simulation_num)]
x_mat_list = [sim_res_list[i]["x_mat"] for i in range(simulation_num)]
sim_df = pd.concat(
sim_df_list,
axis=0,
ignore_index=True, # The index does not matter as we simply want to stack up the data
sort=False)
sim_df[value_col] = sim_df[value_col].astype(float)
x_mat = pd.concat(
x_mat_list,
axis=0,
ignore_index=True, # The index does not matter as we simply want to stack up the data
sort=False)
sim_df[value_col] = sim_df[value_col].astype(float)
assert len(sim_df) == len(fut_df) * simulation_num
assert len(x_mat) == len(fut_df) * simulation_num
return {
"sim_df": sim_df,
"x_mat": x_mat}
def predict_via_sim(
self,
fut_df,
trained_model,
past_df=None,
new_external_regressor_df=None,
simulation_num=10,
include_err=None):
"""Performs predictions and calculate uncertainty using
multiple simulations.
Parameters
----------
fut_df : `pandas.DataFrame`
The data frame which includes the timestamps for prediction
and possibly regressors.
trained_model : `dict`
A fitted silverkite model which is the output of ``self.forecast``
past_df : `pandas.DataFrame`, optional
A data frame with past values if autoregressive methods are called
via autoreg_dict parameter of ``greykite.algo.forecast.silverkite.SilverkiteForecast.py``
new_external_regressor_df: `pandas.DataFrame`, optional
Contains the regressors not already included in ``fut_df``.
simulation_num : `int`, optional, default 10
The number of simulated series to be used in prediction.
include_err : `bool`, optional, default None
Boolean to determine if errors are to be incorporated in the simulations.
If None, it will be set to True if uncertainty is passed to the model and
otherwise will be set to False
Returns
-------
result: `dict`
A dictionary with following items
- "fut_df": `pandas.DataFrame`
The same as input dataframe with an added column for the response.
If value_col already appears in ``fut_df``, it will be over-written.
If ``uncertainty_dict`` is provided as input,
it will also contain a ``QUANTILE_SUMMARY_COL`` column.
Here are the expected columns:
(1) A time column with the column name being ``trained_model["time_col"]``
(2) The predicted response in ``value_col`` column.
(3) Quantile summary response in ``QUANTILE_SUMMARY_COL`` column.
This column only appears if the model includes uncertainty.
(4) Error std in `ERR_STD_COL` column.
This column only appears if the model includes uncertainty.
- "x_mat": `pandas.DataFrame`
Design matrix of the predictive machine-learning model
"""
fut_df = fut_df.copy()
if include_err is None:
include_err = trained_model["uncertainty_dict"] is not None
if trained_model["uncertainty_dict"] is None and include_err:
raise ValueError(
"`include_err=True` was passed. "
"However model does not support uncertainty. "
"To support uncertainty pass `uncertainty_dict` to the model.")
sim_res = self.simulate_multi(
fut_df=fut_df,
trained_model=trained_model,
simulation_num=simulation_num,
past_df=past_df,
new_external_regressor_df=new_external_regressor_df,
include_err=include_err)
sim_df = sim_res["sim_df"]
x_mat = sim_res["x_mat"]
try:
quantiles = trained_model["uncertainty_dict"].get(
"params").get("quantiles")
except AttributeError:
quantiles = [0.025, 0.975]
def quantile_summary(x):
return tuple(np.quantile(a=x, q=quantiles))
value_col = trained_model["value_col"]
time_col = trained_model["time_col"]
agg_dict = {value_col: ["mean", quantile_summary, "std"]}
agg_df = sim_df.groupby([time_col], as_index=False).agg(agg_dict)
# we flatten multi-index (result of aggregation)
agg_df.columns = [f"{a}_{b}" if b else a for (a, b) in agg_df.columns]
agg_df.columns = [
time_col,
value_col,
QUANTILE_SUMMARY_COL,
ERR_STD_COL]
# When there is no uncertainty dict, the uncertainty columns are NA.
# In this case, we only keep the other two columns.
if trained_model["uncertainty_dict"] is None:
agg_df = agg_df[[time_col, value_col]]
x_mat = x_mat.groupby(
["original_row_index"], as_index=False).agg(np.mean)
del x_mat["original_row_index"]
# Checks to see if ``x_mat`` has the right number of rows
assert len(x_mat) == len(agg_df)
# Checks to see if predict ``x_mat`` has the same columns as fitted ``x_mat``
assert list(trained_model["x_mat"].columns) == list(x_mat.columns)
return {
"fut_df": agg_df,
"x_mat": x_mat,
"sim_res": sim_res}
def predict_via_sim_fast(
self,
fut_df,
trained_model,
past_df=None,
new_external_regressor_df=None):
"""Performs predictions and calculates uncertainty using
one simulation of future and calculate the error separately
(not relying on multiple simulations). Due to this the prediction
intervals well into future will be narrower than ``predict_via_sim``
and therefore less accurate. However there will be a major speed gain
which might be important in some use cases.
Parameters
----------
fut_df : `pandas.DataFrame`
The data frame which includes the timestamps for prediction
and possibly regressors.
trained_model : `dict`
A fitted silverkite model which is the output of ``self.forecast``.
past_df : `pandas.DataFrame` or None, default None
A data frame with past values if autoregressive methods are called
via ``autoreg_dict`` parameter of ``greykite.algo.forecast.silverkite.SilverkiteForecast.py``
new_external_regressor_df: `pandas.DataFrame` or None, default None
Contains the regressors not already included in ``fut_df``.
Returns
-------
result: `dict`
A dictionary with following items
- "fut_df": `pandas.DataFrame`
The same as input dataframe with an added column for the response.
If value_col already appears in ``fut_df``, it will be over-written.
If ``uncertainty_dict`` is provided as input,
it will also contain a ``QUANTILE_SUMMARY_COL`` column.
Here are the expected columns:
(1) A time column with the column name being ``trained_model["time_col"]``
(2) The predicted response in ``value_col`` column.
(3) Quantile summary response in ``QUANTILE_SUMMARY_COL`` column.
This column only appears if the model includes uncertainty.
(4) Error std in `ERR_STD_COL` column.
This column only appears if the model includes uncertainty.
- "x_mat": `pandas.DataFrame`
Design matrix of the predictive machine-learning model
- "features_df": `pandas.DataFrame`
The features dataframe used for prediction.
"""
fut_df = fut_df.copy()
time_col = trained_model["time_col"]
value_col = trained_model["value_col"]
# We only simulate one series without using any error during simulations
sim_res = self.simulate(
fut_df=fut_df,
trained_model=trained_model,
past_df=past_df,
new_external_regressor_df=new_external_regressor_df,
include_err=False)
x_mat = sim_res["x_mat"]
features_df = sim_res["features_df"]
if trained_model["uncertainty_dict"] is None:
# predictions are stored to ``value_col``
pred_res = predict_ml(
fut_df=features_df,
trained_model=trained_model)
fut_df = pred_res["fut_df"]
x_mat = pred_res["x_mat"]
else:
# predictions are stored to ``value_col``
# quantiles are stored to ``QUANTILE_SUMMARY_COL``
pred_res = predict_ml_with_uncertainty(
fut_df=features_df,
trained_model=trained_model)
fut_df = pred_res["fut_df"]
x_mat = pred_res["x_mat"]
# Makes sure to return only necessary columns
potential_forecast_cols = [time_col, value_col, QUANTILE_SUMMARY_COL, ERR_STD_COL]
existing_forecast_cols = [col for col in potential_forecast_cols if col in fut_df.columns]
fut_df = fut_df[existing_forecast_cols]
return {
"fut_df": fut_df,
"x_mat": x_mat,
"features_df": features_df}
def predict_n_via_sim(
self,
fut_time_num,
trained_model,
freq,
new_external_regressor_df=None,
simulation_num=10,
fast_simulation=False,
include_err=None):
"""This is the forecast function which can be used to forecast.
This function's predictions are constructed using simulations
from the fitted series. This supports both ``predict_silverkite_via_sim``
and ````predict_silverkite_via_sim_fast`` depending on value of the
passed argument ``fast_simulation``.
The ``past_df`` is set to be the training data which is available
in ``trained_model``.
It accepts extra regressors (``extra_pred_cols``) originally in
``df`` via ``new_external_regressor_df``.
Parameters
----------
fut_time_num : `int`
number of needed future values
trained_model : `dict`
A fitted silverkite model which is the output of ``self.forecast``
freq : `str`
Frequency of future predictions.
Accepts any valid frequency for ``pd.date_range``.
new_external_regressor_df : `pandas.DataFrame` or None
Contains the extra regressors if specified.
simulation_num : `int`, optional, default 10
The number of simulated series to be used in prediction.
fast_simulation: `bool`, default False
Deterimes if fast simulations are to be used. This only impacts models
which include auto-regression. This method will only generate one simulation
without any error being added and then add the error using the volatility
model. The advantage is a major boost in speed during inference and the
disadvantage is potentially less accurate prediction intervals.
include_err : `bool`, optional, default None
Boolean to determine if errors are to be incorporated in the simulations.
If None, it will be set to True if uncertainty is passed to the model and
otherwise will be set to False
Returns
-------
result: `dict`
A dictionary with following items
- "fut_df": `pandas.DataFrame`
The same as input dataframe with an added column for the response.
If value_col already appears in ``fut_df``, it will be over-written.
If ``uncertainty_dict`` is provided as input,
it will also contain a ``QUANTILE_SUMMARY_COL`` column.
Here are the expected columns:
(1) A time column with the column name being ``trained_model["time_col"]``
(2) The predicted response in ``value_col`` column.
(3) Quantile summary response in ``QUANTILE_SUMMARY_COL`` column.
This column only appears if the model includes uncertainty.
(4) Error std in `ERR_STD_COL` column.
This column only appears if the model includes uncertainty.
- "x_mat": `pandas.DataFrame`
Design matrix of the predictive machine-learning model
"""
if include_err is None:
include_err = trained_model["uncertainty_dict"] is not None
if trained_model["uncertainty_dict"] is None and include_err:
raise ValueError(
"`include_err=True` was passed. "
"However model does not support uncertainty. "
"To support uncertainty pass `uncertainty_dict` to the model.")
time_col = trained_model["time_col"]
value_col = trained_model["value_col"]
# creates the future time grid
dates = pd.date_range(
start=trained_model["last_date_for_fit"],
periods=fut_time_num + 1,
freq=freq)
dates = dates[dates > trained_model["last_date_for_fit"]] # drops values up to last_date_for_fit
fut_df = pd.DataFrame({time_col: dates.tolist()})
past_df = trained_model["df"][[value_col]].reset_index(drop=True)
if fast_simulation:
return self.predict_via_sim_fast(
fut_df=fut_df,
trained_model=trained_model,
past_df=past_df, # observed data used for training the model
new_external_regressor_df=new_external_regressor_df)
return self.predict_via_sim(
fut_df=fut_df,
trained_model=trained_model,
past_df=past_df, # observed data used for training the model
new_external_regressor_df=new_external_regressor_df,
simulation_num=simulation_num,
include_err=include_err)
def predict(
self,
fut_df,
trained_model,
freq=None,
past_df=None,
new_external_regressor_df=None,
include_err=None,
force_no_sim=False,
simulation_num=None,
fast_simulation=None,
na_fill_func=lambda s: s.interpolate().bfill().ffill()):
"""Performs predictions using silverkite model.
It determines if the prediction should be simulation-based or not and then
predicts using that setting.
The function determines if it should use simulation-based predictions or
that is not necessary.
Here is the logic for determining if simulations are needed:
- If the model is not autoregressive, then clearly no simulations are needed
- If the model is autoregressive, however the minimum lag appearing in the model
is larger than the forecast horizon, then simulations are not needed.
This is because the lags can be calculated fully without predicting the future.
User can overwrite the above behavior and force no simulations using
``force_no_sim`` argument, in which case some lags will be imputed.
This option should not be used by most users.
Some scenarios where advanced user might want to use
this is (a) when ``min_lag_order >= forecast_horizon`` does not hold strictly
but close to hold. (b) user want to predict fast, the autoregression
lags are normalized. In that case the predictions returned could correspond
to an approximation of a model without autoregression.
Parameters
----------
fut_df : `pandas.DataFrame`
The data frame which includes the timestamps for prediction
and possibly regressors.
trained_model : `dict`
A fitted silverkite model which is the output of ``self.forecast``
freq : `str`, optional, default None
Timeseries frequency, DateOffset alias.
See https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases
for the allowed strings.
If None, it is extracted from ``trained_model`` input.
past_df : `pandas.DataFrame` or None, default None
A data frame with past values if autoregressive methods are called
via autoreg_dict parameter of ``greykite.algo.forecast.silverkite.SilverkiteForecast.py``.
Note that this ``past_df`` can be anytime before the training end timestamp, but can not
exceed it.
new_external_regressor_df: `pandas.DataFrame` or None, default None
Contains the regressors not already included in ``fut_df``.
include_err : `bool`, optional, default None
Boolean to determine if errors are to be incorporated in the simulations.
If None, it will be set to True if uncertainty is passed to the model and
otherwise will be set to False
force_no_sim : `bool`, default False
If True, prediction with no simulations is forced.
This can be useful when speed is of concern or for validation purposes.
In this case, the potential non-available lags will be imputed.
Most users should not set this to True as the consequences could be
hard to quantify.
simulation_num : `int` or None, default None
The number of simulations for when simulations are used for generating
forecasts and prediction intervals. If None, it will be inferred from
the model (``trained_model``).
fast_simulation: `bool` or None, default None
Deterimes if fast simulations are to be used. This only impacts models
which include auto-regression. This method will only generate one simulation
without any error being added and then add the error using the volatility
model. The advantage is a major boost in speed during inference and the
disadvantage is potentially less accurate prediction intervals.
If None, it will be inferred from the model (``trained_model``).
na_fill_func : callable (`pd.DataFrame` -> `pd.DataFrame`)
default::
lambda df: df.interpolate().bfill().ffill()
A function which interpolates missing values in a dataframe.
The main usage is invoked when there is a gap between the timestamps in ``fut_df``.
The main use case is when the user wants to predict a period which is not an immediate period
after training.
In that case to fill in the gaps, the regressors need to be interpolated/filled.
The default works by first interpolating the continuous variables.
Then it uses back-filling and then forward-filling for categorical variables.
Returns
-------
result: `dict`
A dictionary with following items
- "fut_df": `pandas.DataFrame`
The same as input dataframe with an added column for the response.
If value_col already appears in ``fut_df``, it will be over-written.
If ``uncertainty_dict`` is provided as input,
it will also contain a ``QUANTILE_SUMMARY_COL`` column.
Here are the expected columns:
(1) A time column with the column name being ``trained_model["time_col"]``
(2) The predicted response in ``value_col`` column.
(3) Quantile summary response in ``QUANTILE_SUMMARY_COL`` column.
This column only appears if the model includes uncertainty.
(4) Error std in `ERR_STD_COL` column.
This column only appears if the model includes uncertainty.
- "x_mat": `pandas.DataFrame`
Design matrix of the predictive machine-learning model
"""
fut_df = fut_df.copy()
time_col = trained_model["time_col"]
value_col = trained_model["value_col"]
min_lag_order = trained_model["min_lag_order"]
max_lag_order = trained_model["max_lag_order"]
if simulation_num is None:
simulation_num = trained_model["simulation_num"]
if fast_simulation is None:
fast_simulation = trained_model["fast_simulation"]
if freq is None:
freq = trained_model["freq"]
if fut_df.shape[0] <= 0:
raise ValueError("``fut_df`` must be a dataframe of non-zero size.")
if time_col not in fut_df.columns:
raise ValueError(
f"``fut_df`` must include {time_col} as time column, "
"which is what ``trained_model`` considers to be the time column.")
fut_df[time_col] = pd.to_datetime(fut_df[time_col])
# Handles ``past_df``.
training_past_df = trained_model["train_df"].copy()
if past_df is None or len(past_df) == 0:
# In the case that we use ``train_df`` from the ``forecast`` method,
# we don't check the quality since it's constructed by the method.
log_message(
message="``past_df`` not provided during prediction, use the ``train_df`` from training results.",
level=LoggingLevelEnum.DEBUG
)
# The ``past_df`` has been manipulated in the training method to immediately precede the future periods.
past_df = training_past_df
else:
# In the case that ``past_df`` is passed, we combine it with the known dfs.
past_df[time_col] = pd.to_datetime(past_df[time_col])
if past_df[time_col].max() > training_past_df[time_col].max():
raise ValueError("``past_df`` can not have timestamps later than the training end timestamp.")
# Combines ``past_df`` with ``training_past_df`` to get all available values.
past_df = (past_df
.append(training_past_df)
.dropna(subset=[value_col])
# When there are duplicates, the value passed from ``past_df`` is kept.
.drop_duplicates(subset=time_col)
.reset_index(drop=True)
)
# Fills any missing timestamps in ``past_df``. These values will be imputed.
past_df = fill_missing_dates(
df=past_df,
time_col=time_col,
freq=freq)[0] # `fill_missing_dates` returns a tuple where the first one is the df.
# If ``value_col`` appears in user provided ``fut_df``,
# we remove it to avoid issues in merging
# also note that such column is unnecessary
if value_col in fut_df.columns:
del fut_df[value_col]
if include_err is None:
include_err = trained_model["uncertainty_dict"] is not None
if trained_model["uncertainty_dict"] is None and include_err:
raise ValueError(
"`include_err=True` was passed. "
"However model does not support uncertainty. "
"To support uncertainty pass `uncertainty_dict` to the model.")
# If the minimal lag order for lagged regressors is less than the size of fut_df,
# raise a warning of potential imputation of lagged regressor columns.
# Note that all lagged regressor columns must be included in ``fut_df`` or ``new_external_regressor_df``
min_lagged_regressor_order = trained_model["min_lagged_regressor_order"]
lagged_regressor_dict = trained_model['lagged_regressor_dict']
if min_lagged_regressor_order is not None and min_lagged_regressor_order < fut_df.shape[0]:
warnings.warn(
f"Trained model's `min_lagged_regressor_order` ({int(min_lagged_regressor_order)}) "
f"is less than the size of `fut_df` ({fut_df.shape[0]}), "
f"NaN values (if there are any) in lagged regressor columns have been imputed. "
f"More info: {lagged_regressor_dict}.",
UserWarning)
has_autoreg_structure = trained_model["has_autoreg_structure"]
# In absence of autoregression, we can return quickly.
# Also note ``fut_df`` can overlap with training times without any issues,
# ``past_df`` is not needed for autoregression, but may be needed for lagged regression
# and we do not need to track the overlap.
if not has_autoreg_structure:
pred_res = self.predict_no_sim(
fut_df=fut_df,
trained_model=trained_model,
past_df=past_df,
new_external_regressor_df=new_external_regressor_df,
time_features_ready=False,
regressors_ready=False)
fut_df = pred_res["fut_df"]
x_mat = pred_res["x_mat"]
return {
"fut_df": fut_df,
"x_mat": x_mat,
"simulations_not_used": None,
"fut_df_info": None,
"min_lag_order": None}
# From here we assume model has autoregression,
# because otherwise we would have returned above.
# Checks if imputation is needed.
# Writes to log message if imputation is needed for debugging purposes.
# This happens when
# (1) ``past_df`` is too short and does not cover the earliest lag needed.
# (2) ``past_df`` has missing values.
past_df_sufficient = True
# The check happens when ``freq`` is not None.
# We made sure ``freq`` is not None before but wanna add a safeguard.
if freq is not None:
pred_min_ts = fut_df[time_col].min() # the prediction period's minimum timestamp
past_df_min_ts = past_df[time_col].min() # the past df's minimum timestamp
lag_min_ts_needed = pred_min_ts - to_offset(freq) * max_lag_order # the minimum timestamp needed (max lag)
# Checks (1) if ``past_df`` covers the period after ``lag_min_ts_needed``.
past_df_sufficient = past_df_sufficient and (past_df_min_ts <= lag_min_ts_needed)
if past_df_sufficient:
# Checks (2) if ``past_df`` has any missing value after ``lag_min_ts_needed``
past_df_after_min_ts = past_df[past_df[time_col] >= lag_min_ts_needed]
past_df_sufficient = past_df_sufficient and (past_df_after_min_ts[value_col].isna().sum() == 0)
if not past_df_sufficient:
log_message(
message="``past_df`` is not sufficient, imputation is performed when creating autoregression terms.",
level=LoggingLevelEnum.DEBUG)
if new_external_regressor_df is not None:
fut_df = pd.concat(
[fut_df, new_external_regressor_df],
axis=1,
ignore_index=False,
sort=False)
fut_df_info = self.partition_fut_df(
fut_df=fut_df,
trained_model=trained_model,
na_fill_func=na_fill_func,
freq=freq)
fut_df_before_training = fut_df_info["fut_df_before_training"]
fut_df_within_training = fut_df_info["fut_df_within_training"]
fut_df_after_training_expanded = fut_df_info["fut_df_after_training_expanded"]
index_after_training_original = fut_df_info["index_after_training_original"]
inferred_forecast_horizon = fut_df_info["inferred_forecast_horizon"]
fut_df_list = []
x_mat_list = []
# We allow calculating extended fitted values on a longer backward
# history with imputation if ``past_df`` is not sufficient.
if fut_df_before_training.shape[0] > 0:
min_timestamp = fut_df_before_training[time_col].min()
past_df_before_min_timestamp = past_df[past_df[time_col] < min_timestamp]
# Since ``fut_df_before_training`` does not have ``value_col`` (dropped above),
# but we need the actual values for ``fut_df_before_training`` in case the lags
# are not enough and we don't have simulation, we try to find the values from
# ``past_df``. If some values are still missing, those values will be imputed.
fut_df_before_training = fut_df_before_training.merge(
past_df[[time_col, value_col]],
on=time_col,
how="left"
)
# Imputation will be done during ``self.predict_no_sim`` if ``past_df_before_min_timestamp``
# does not have sufficient AR terms.
pred_res = self.predict_no_sim(
fut_df=fut_df_before_training,
trained_model=trained_model,
past_df=past_df_before_min_timestamp,
new_external_regressor_df=None,
time_features_ready=False,
regressors_ready=True)
fut_df0 = pred_res["fut_df"]
x_mat0 = pred_res["x_mat"]
fut_df_list.append(fut_df0.reset_index(drop=True))
x_mat_list.append(x_mat0)
fitted_df = trained_model["fitted_df"]
fitted_x_mat = trained_model["x_mat"]
potential_forecast_cols = [time_col, value_col, QUANTILE_SUMMARY_COL, ERR_STD_COL]
existing_forecast_cols = [col for col in potential_forecast_cols if col in fitted_df.columns]
fitted_df = fitted_df[existing_forecast_cols]
# For within training times, we simply use the fitted data
if fut_df_within_training.shape[0] > 0:
# Creates a dummy index to get the consistent index on ``fitted_x_mat``
fut_df0 = pd.merge(
fut_df_within_training.reset_index(drop=True),
fitted_df.reset_index(drop=True),
on=[time_col])
# Finds out where ``fut_df_within_training`` intersects with ``fitted_df``
# This is for edge cases where ```fut_df_within_training``` does not have all the
# times appearing in ``fitted_df``
fut_df_within_training["dummy_bool_index"] = True
fut_df_index = pd.merge(
fut_df_within_training.reset_index(drop=True)[[time_col, "dummy_bool_index"]],
fitted_df.reset_index(drop=True)[[time_col]],
on=[time_col],
how="right")
ind = fut_df_index["dummy_bool_index"].fillna(False)
del fut_df_index
fitted_x_mat = fitted_x_mat.reset_index(drop=True).loc[ind]
del fut_df_within_training["dummy_bool_index"]
assert fut_df0.shape[0] == fut_df_within_training.shape[0]
fut_df_list.append(fut_df0.reset_index(drop=True))
x_mat_list.append(fitted_x_mat)
# The future timestamps need to be predicted
# There are two cases: either simulations are needed or not
# This is decided as follows:
simulations_not_used = (not has_autoreg_structure) or force_no_sim or (
inferred_forecast_horizon <= min_lag_order)
# ``new_external_regressor_df`` will be passed as None
# since it is already included in ``fut_df``.
# ``past_df`` doesn't need to change because either (1) it is passed from
# this ``predict`` method directly, in which case it should be immediately preceding the
# ``fut_df_after_training_expanded``;
# or (2) it is from the training model, where the last term is the last training timestamp,
# which should also immediately precedes the ``fut_df_after_training_expanded``.
if fut_df_after_training_expanded is not None and fut_df_after_training_expanded.shape[0] > 0:
if simulations_not_used:
pred_res = self.predict_no_sim(
fut_df=fut_df_after_training_expanded,
trained_model=trained_model,
past_df=past_df,
new_external_regressor_df=None,
time_features_ready=False,
regressors_ready=True)
fut_df0 = pred_res["fut_df"]
x_mat0 = pred_res["x_mat"]
elif fast_simulation:
pred_res = self.predict_via_sim_fast(
fut_df=fut_df_after_training_expanded,
trained_model=trained_model,
past_df=past_df,
new_external_regressor_df=None)
fut_df0 = pred_res["fut_df"]
x_mat0 = pred_res["x_mat"]
else:
pred_res = self.predict_via_sim(
fut_df=fut_df_after_training_expanded,
trained_model=trained_model,
past_df=past_df,
new_external_regressor_df=None,
simulation_num=simulation_num,
include_err=include_err)
fut_df0 = pred_res["fut_df"]
x_mat0 = pred_res["x_mat"]
fut_df0 = fut_df0[index_after_training_original]
x_mat0 = x_mat0[index_after_training_original]
fut_df_list.append(fut_df0.reset_index(drop=True))
x_mat_list.append(x_mat0)
fut_df_final = pd.concat(
fut_df_list,
axis=0,
ignore_index=True,
sort=False)
x_mat_final = pd.concat(
x_mat_list,
axis=0,
ignore_index=True,
sort=False)
# Makes sure to return only necessary columns
potential_forecast_cols = [time_col, value_col, QUANTILE_SUMMARY_COL, ERR_STD_COL]
existing_forecast_cols = [col for col in potential_forecast_cols if col in fut_df_final.columns]
fut_df_final = fut_df_final[existing_forecast_cols]
# Expects the created data has same size as the passed ``fut_df``
assert len(fut_df_final) == len(fut_df), "The generated data at predict phase must have same length as input ``fut_df``"
assert len(x_mat_final) == len(fut_df), "The generated data at predict phase must have same length as input ``fut_df``"
return {
"fut_df": fut_df_final,
"x_mat": x_mat_final,
"simulations_not_used": simulations_not_used,
"fut_df_info": fut_df_info,
"min_lag_order": min_lag_order}
def predict_n(
self,
fut_time_num,
trained_model,
freq=None,
past_df=None,
new_external_regressor_df=None,
include_err=None,
force_no_sim=False,
simulation_num=None,
fast_simulation=None,
na_fill_func=lambda s: s.interpolate().bfill().ffill()):
"""This is the forecast function which can be used to forecast a number of
periods into the future.
It determines if the prediction should be simulation-based or not and then
predicts using that setting. Currently if the silverkite model uses
autoregression simulation-based prediction/CIs are used.
Parameters
----------
fut_time_num : `int`
number of needed future values
trained_model : `dict`
A fitted silverkite model which is the output of ``self.forecast``
freq : `str`, optional, default None
Timeseries frequency, DateOffset alias.
See https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases
for the allowed frequencies.
If None, it is extracted from ``trained_model`` input.
new_external_regressor_df : `pandas.DataFrame` or None
Contains the extra regressors if specified.
simulation_num : `int`, optional, default 10
The number of simulated series to be used in prediction.
fast_simulation: `bool` or None, default None
Deterimes if fast simulations are to be used. This only impacts models
which include auto-regression. This method will only generate one simulation
without any error being added and then add the error using the volatility
model. The advantage is a major boost in speed during inference and the
disadvantage is potentially less accurate prediction intervals.
If None, it will be inferred from the model (``trained_model``).
include_err : `bool` or None, default None
Boolean to determine if errors are to be incorporated in the simulations.
If None, it will be set to True if uncertainty is passed to the model and
otherwise will be set to False
force_no_sim: `bool`, default False
If True, prediction with no simulations is forced.
This can be useful when speed is of concern or for validation purposes.
na_fill_func : callable (`pd.DataFrame` -> `pd.DataFrame`)
default::
lambda df: df.interpolate().bfill().ffill()
A function which interpolated missing values in a dataframe.
The main usage is invoked when there is a gap between the timestamps.
In that case to fill in the gaps, the regressors need to be interpolated/filled.
The default works by first interpolating the continuous variables.
Then it uses back-filling and then forward-filling for categorical variables.
Returns
-------
result: `dict`
A dictionary with following items
- "fut_df": `pandas.DataFrame`
The same as input dataframe with an added column for the response.
If value_col already appears in ``fut_df``, it will be over-written.
If ``uncertainty_dict`` is provided as input,
it will also contain a ``QUANTILE_SUMMARY_COL`` column.
Here are the expected columns:
(1) A time column with the column name being ``trained_model["time_col"]``
(2) The predicted response in ``value_col`` column.
(3) Quantile summary response in ``QUANTILE_SUMMARY_COL`` column.
This column only appears if the model includes uncertainty.
(4) Error std in `ERR_STD_COL` column.
This column only appears if the model includes uncertainty.
- "x_mat": `pandas.DataFrame`
Design matrix of the predictive machine-learning model
"""
if freq is None:
freq = trained_model["freq"]
# Creates the future time grid
dates = pd.date_range(
start=trained_model["last_date_for_fit"],
periods=fut_time_num + 1,
freq=freq)
dates = dates[dates > trained_model["last_date_for_fit"]] # drops values up to last_date_for_fit
fut_df = pd.DataFrame({trained_model["time_col"]: dates.tolist()})
return self.predict(
fut_df=fut_df,
trained_model=trained_model,
past_df=past_df,
new_external_regressor_df=new_external_regressor_df,
include_err=include_err,
force_no_sim=force_no_sim,
simulation_num=simulation_num,
fast_simulation=fast_simulation,
na_fill_func=na_fill_func)
def partition_fut_df(
self,
fut_df,
trained_model,
freq,
na_fill_func=lambda s: s.interpolate().bfill().ffill()):
"""This function takes a dataframe ``fut_df`` which includes the timestamps to forecast
and a ``trained_model`` returned by
`~greykite.algo.forecast.silverkite.SilverkiteForecast.forecast`
and decomposes
``fut_df`` to various dataframes which reflect if the timestamps are before,
during or after the training periods.
It also determines if: 'the future timestamps after the training period' are immediately
after 'the last training period' or if there is some extra gap.
In that case, this function creates an expanded dataframe which includes the missing
timestamps as well.
If ``fut_df`` also includes extra columns (they could be regressor columns),
this function will interpolate the extra regressor columns.
Parameters
----------
fut_df : `pandas.DataFrame`
The data frame which includes the timestamps for prediction
and possibly regressors. Note that the timestamp column in ``fut_df``
must be the same as ``trained_model["time_col"]``.
We assume ``fut_df[time_col]`` is pandas.datetime64 type.
trained_model : `dict`
A fitted silverkite model which is the output of
`~greykite.algo.forecast.silverkite.SilverkiteForecast.forecast`
freq : `str`
Timeseries frequency, DateOffset alias.
See https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases
for the allowed frequencies.
na_fill_func : callable (`pd.DataFrame` -> `pd.DataFrame`)
default::
lambda df: df.interpolate().bfill().ffill()
A function which interpolated missing values in a dataframe.
The main usage is invoked when there is a gap between the timestamps.
In that case to fill in the gaps, the regressors need to be interpolated/filled.
The default works by first interpolating the continuous variables.
Then it uses back-filling and then forward-filling for categorical variables.
Returns
-------
result: `dict`
A dictionary with following items:
- ``"fut_freq_in_secs"``: `float`
The inferred frequency in ``fut_df``
- ``"training_freq_in_secs"``: `float`
The inferred frequency in training data
- ``"index_before_training"``: `list` [`bool`]
A boolean list to determine which rows of ``fut_df`` include a time
which is before the training start.
- ``"index_within_training"``: `list` [`bool`]
A boolean list to determine which rows of ``fut_df`` include a time
which is during the training period.
- ``"index_after_training"``: `list` [`bool`]
A boolean list to determine which rows of ``fut_df`` include a time
which is after the training end date.
- ``"fut_df_before_training"``: `pandas.DataFrame`
A partition of ``fut_df`` with timestamps before the training start date
- ``"fut_df_within_training"``: `pandas.DataFrame`
A partition of ``fut_df`` with timestamps during the training period
- ``"fut_df_after_training"``: `pandas.DataFrame`
A partition of ``fut_df`` with timestamps after the training start date
- ``"fut_df_gap"``: `pandas.DataFrame` or None
If there is a gap between training end date and the first timestamp
after the training end date in ``fut_df``, this dataframe can fill the
gap between the two. In case ``fut_df`` includes extra columns as well,
the values for those columns will be filled using ``na_fill_func``.
- ``"fut_df_after_training_expanded"``: `pandas.DataFrame`
If there is a gap between training end date and the first timestamp
after the training end date in ``fut_df``, this dataframe will include
the data for the gaps (``fut_df_gap``) as well as ``fut_df_after_training``.
- ``"index_after_training_original"``: `list` [`bool`]
A boolean list to determine which rows of ``fut_df_after_training_expanded``
correspond to raw data passed by user which are after training end date,
appearing in ``fut_df``.
Note that this partition corresponds to ``fut_df_after_training``
which is the subset of data in ``fut_df`` provided by user and
also returned by this function.
- ``"missing_periods_num"``: `int`
Number of missing timestamps between the last date of training and
first date in ``fut_df`` appearing after the training end date
- ``"inferred_forecast_horizon"``: `int`
This is the inferred forecast horizon from ``fut_df``.
This is defined to be the distance between the last training end date
and last date appearing in ``fut_df``.
Note that this value can be smaller or larger than the number of
rows of ``fut_df``.
This is calculated by adding the number of potentially missing timestamps
and the number of time periods appearing after the training end point.
Also note if there are no timestamps after the training end point in
``fut_df``, this value will be zero.
- ``"forecast_partition_summary"``: `dict`
A dictionary which includes the size of various partitions of ``fut_df``
as well as the missing timestamps if needed. The dictionary keys are as
follows:
- ``"len_before_training"``: the number of time periods before training start
- ``"len_within_training"``: the number of time periods within training
- ``"len_after_training"``: the number of time periods after training
- ``"len_gap"``: the number of missing time periods between training data and
future time stamps in ``fut_df``
"""
fut_df = fut_df.copy()
training_start_timestamp = trained_model["min_timestamp"]
training_end_timestamp = trained_model["max_timestamp"]
training_freq_in_secs = trained_model["inferred_freq_in_secs"]
time_col = trained_model["time_col"]
if len(fut_df) > 1:
fut_df_time_stats = describe_timeseries(
df=fut_df,
time_col=time_col)
if not fut_df_time_stats["regular_increments"]:
warnings.warn(
"``fut_df`` does not have regular time increments")
if not fut_df_time_stats["increasing"]:
raise ValueError(f"``fut_df``'s time column {time_col} must be increasing in time")
fut_freq_in_secs = fut_df_time_stats["freq_in_secs"]
else:
# When test_horizon/cv_horizon/forecast_horizon is 1, not all stats above
# are available, thus it produces an error.
# The "else" handles this case.
fut_freq_in_secs = None
index_before_training = (fut_df[time_col] < training_start_timestamp)
index_within_training = (
(fut_df[time_col] >= training_start_timestamp) &
(fut_df[time_col] <= training_end_timestamp))
index_after_training = (fut_df[time_col] > training_end_timestamp)
fut_df_before_training = fut_df[index_before_training]
fut_df_within_training = fut_df[index_within_training]
fut_df_after_training = fut_df[index_after_training]
fut_df_gap = None # a dataframe which fills in the missing time periods
missing_periods_num = 0 # the number of missing time periods
if fut_df_after_training.shape[0] > 0:
min_timestamp_after_training = min(
fut_df_after_training[time_col])
expected_timestamp_after_training = pd.date_range(
start=training_end_timestamp,
periods=2,
freq=freq)[1]
if min_timestamp_after_training < expected_timestamp_after_training:
raise ValueError(
"The most immediate time in the future is off "
f"The last training date: {training_end_timestamp}. "
f"The first future period: {min_timestamp_after_training}. "
f"Expected first future period is {expected_timestamp_after_training}")
elif min_timestamp_after_training > expected_timestamp_after_training:
missing_dates = pd.date_range(
start=expected_timestamp_after_training,
end=min_timestamp_after_training,
freq=freq)
# The last timestamp is already there, therefore we drop it
missing_dates = missing_dates[:-1]
missing_periods_num = len(missing_dates)
# The length of missing dates is non-zero since there are missing timestamps
# since ``min_timestamp_after_training > next_period_after_training``
assert missing_periods_num > 0
fut_df_gap = pd.DataFrame({time_col: missing_dates.tolist()})
# `fut_df` might include other columns than `time_col`
# Those extra columns might be the regressors passed through `fut_df`
# Therefore we need to ensure `fut_df_gap` includes those columns
# Also note that those extra columns need to be imputed in that case
if fut_df_gap is not None and len(fut_df.columns) > 1:
fut_df_expanded = pd.concat(
[fut_df_within_training, fut_df_gap, fut_df_after_training],
axis=0,
ignore_index=True,
sort=False)
# Imputes the missing values
# Excludes time column which doesn't need imputation,
# otherwise it causes error with pandas>=1.4.
fut_df_expanded.loc[:, fut_df_expanded.columns != time_col] = na_fill_func(
fut_df_expanded.loc[:, fut_df_expanded.columns != time_col])
index = (
[False] * fut_df_within_training.shape[0] +
[True] * fut_df_gap.shape[0] +
[False] * fut_df_after_training.shape[0])
fut_df_gap = fut_df_expanded[index].copy()
inferred_forecast_horizon = fut_df_after_training.shape[0]
if fut_df_gap is not None:
inferred_forecast_horizon += fut_df_gap.shape[0]
# Creates an expanded dataframe which includes the missing times
# between the end of training and the forecast period
fut_df_after_training_expanded = fut_df_after_training
index_after_training_original = [True] * fut_df_after_training.shape[0]
if fut_df_gap is not None:
fut_df_after_training_expanded = pd.concat(
[fut_df_gap, fut_df_after_training],
axis=0,
ignore_index=True,
sort=False)
index_after_training_original = (
[False] * fut_df_gap.shape[0] +
[True] * fut_df_after_training.shape[0])
forecast_partition_summary = {
"len_before_training": fut_df_before_training.shape[0],
"len_within_training": fut_df_within_training.shape[0],
"len_after_training": fut_df_after_training.shape[0],
"len_gap": missing_periods_num
}
return {
"fut_freq_in_secs": fut_freq_in_secs,
"training_freq_in_secs": training_freq_in_secs,
"index_before_training": index_before_training,
"index_within_training": index_within_training,
"index_after_training": index_after_training,
"fut_df_before_training": fut_df_before_training,
"fut_df_within_training": fut_df_within_training,
"fut_df_after_training": fut_df_after_training,
"fut_df_gap": fut_df_gap,
"fut_df_after_training_expanded": fut_df_after_training_expanded,
"index_after_training_original": index_after_training_original,
"missing_periods_num": missing_periods_num,
"inferred_forecast_horizon": inferred_forecast_horizon,
"forecast_partition_summary": forecast_partition_summary}
def __build_silverkite_features(
self,
df,
time_col,
origin_for_time_vars,
daily_event_df_dict=None,
daily_event_neighbor_impact=None,
daily_event_shifted_effect=None,
changepoint_values=None,
continuous_time_col=None,
growth_func=None,
fs_func=None,
seasonality_changepoint_result=None,
changepoint_dates=None):
"""This function adds the prediction model features in training and
predict phase for ``self.forecast`` internal use but can be called
outside that context if desired.
The features are time related features such as seasonality, change points,
holidays, ...
Parameters
----------
df : `pandas.DataFrame`
input dataframe, which could be in training phase or predict phase
time_col : `str`
The column name in df representing time for the time series data
The time column values can be anything that can be parsed by pandas DatetimeIndex
origin_for_time_vars : `float`
The time origin used to create continuous variables for time
daily_event_df_dict : `dict` [`str`, `pandas.DataFrame`] or None, default None
A dictionary of data frames, each representing events data for the corresponding key.
The DataFrame has two columns:
- The first column contains event dates. Must be in a format
recognized by `pandas.to_datetime`. Must be at daily
frequency for proper join. It is joined against the time
in ``df``, converted to a day:
``pd.to_datetime(pd.DatetimeIndex(df[time_col]).date)``.
- the second column contains the event label for each date
The column order is important; column names are ignored.
The event dates must span their occurrences in both the training
and future prediction period.
During modeling, each key in the dictionary is mapped to a categorical variable
named ``f"{EVENT_PREFIX}_{key}"``, whose value at each timestamp is specified
by the corresponding DataFrame.
For example, to manually specify a yearly event on September 1
during a training/forecast period that spans 2020-2022::
daily_event_df_dict = {
"custom_event": pd.DataFrame({
"date": ["2020-09-01", "2021-09-01", "2022-09-01"],
"label": ["is_event", "is_event", "is_event"]
})
}
It's possible to specify multiple events in the same df. Two events,
``"sep"`` and ``"oct"`` are specified below for 2020-2021::
daily_event_df_dict = {
"custom_event": pd.DataFrame({
"date": ["2020-09-01", "2020-10-01", "2021-09-01", "2021-10-01"],
"event_name": ["sep", "oct", "sep", "oct"]
})
}
Use multiple keys if two events may fall on the same date. These events
must be in separate DataFrames::
daily_event_df_dict = {
"fixed_event": pd.DataFrame({
"date": ["2020-09-01", "2021-09-01", "2022-09-01"],
"event_name": "fixed_event"
}),
"moving_event": pd.DataFrame({
"date": ["2020-09-01", "2021-08-28", "2022-09-03"],
"event_name": "moving_event"
}),
}
The multiple event specification can be used even if events never overlap. An
equivalent specification to the second example::
daily_event_df_dict = {
"sep": pd.DataFrame({
"date": ["2020-09-01", "2021-09-01"],
"event_name": "is_event"
}),
"oct": pd.DataFrame({
"date": ["2020-10-01", "2021-10-01"],
"event_name": "is_event"
}),
}
Note: The events you want to use must be specified in ``extra_pred_cols``.
These take the form: ``f"{EVENT_PREFIX}_{key}"``, where
`~greykite.common.constants.EVENT_PREFIX` is the constant.
Note: Do not use `~greykite.common.constants.EVENT_DEFAULT`
in the second column. This is reserved to indicate dates that do not
correspond to an event.
daily_event_neighbor_impact : `int`, `list` [`int`], callable or None, default None
The impact of neighboring timestamps of the events in ``event_df_dict``.
This is for daily events so the units below are all in days.
For example, if the data is weekly ("W-SUN") and an event is daily,
it may not exactly fall on the weekly date.
But you can specify for New Year's day on 1/1, it affects all dates
in the week, e.g. 12/31, 1/1, ..., 1/6, then it will be mapped to the weekly date.
In this case you may want to map a daily event's date to a few dates,
and can specify
``neighbor_impact=lambda x: [x-timedelta(days=x.isocalendar()[2]-1) + timedelta(days=i) for i in range(7)]``.
Another example is that the data is rolling 7 day daily data,
thus a holiday may affect the t, t+1, ..., t+6 dates.
You can specify ``neighbor_impact=7``.
If input is `int`, the mapping is t, t+1, ..., t+neighbor_impact-1.
If input is `list`, the mapping is [t+x for x in neighbor_impact].
If input is a function, it maps each daily event's date to a list of dates.
daily_event_shifted_effect : `list` [`str`] or None, default None
Additional neighbor events based on given events.
For example, passing ["-1D", "7D"] will add extra daily events which are 1 day before
and 7 days after the given events.
Offset format is {d}{freq} with any integer plus a frequency string.
Must be parsable by pandas ``to_offset``.
The new events' names will be the current events' names with suffix "{offset}_before" or "{offset}_after".
For example, if we have an event named "US_Christmas Day",
a "7D" shift will have name "US_Christmas Day_7D_after".
This is useful when you expect an offset of the current holidays also has impact on the
time series, or you want to interact the lagged terms with autoregression.
If ``daily_event_neighbor_impact`` is also specified, this will be applied after adding neighboring days.
changepoint_values : `list` of Union[int, float, double]], optional
The values of the growth term at the changepoints
Can be generated by the ``get_evenly_spaced_changepoints``,
`get_custom_changepoints` functions
continuous_time_col : `str`, optional
This parameter is used only if ``changepoint_values`` is not None.
Column to apply growth_func to, to generate changepoint features
growth_func: callable, optional
Growth function (scalar -> scalar).
This parameter is used only if ``changepoint_values`` is not None.
Changepoint features are created by applying
``growth_func`` to ``continuous_time_col`` with offsets.
If None, uses identity function to use ``continuous_time_col`` directly
as growth term.
fs_func: callable, optional
A function which takes a df as input and returns an output df
with fourier terms. ``fs_func`` is expected to be constructed using
``greykite.common.features.timeseries_features.fourier_series_multi_fcn``, but
that is not a hard requirement.
seasonality_changepoint_result: `dict`
The detected seasonality change points result dictionary, returned by
`~greykite.algo.changepoint.adalasso.changepoint_detector.ChangepointDetector.find_seasonality_changepoints`.
changepoint_dates : `list`
List of change point dates with `strftime` attribute.
Returns
-------
features_df : `pandas.DataFrame`
a data frame with the added features as new columns
"""
# adds time features
features_df = add_time_features_df(
df=df,
time_col=time_col,
conti_year_origin=origin_for_time_vars)
# adds daily events (e.g. holidays)
# if daily event data are given, we add them to temporal features data
# ``date_col`` below is used to join with ``daily_events`` data given
# in ``daily_event_df_dict``
# Note: events must be provided for both train and forecast time range
if daily_event_df_dict is not None:
if (df.shape[0] > 1
and min_gap_in_seconds(df, time_col) > TimeEnum.ONE_DAY_IN_SECONDS.value):
warnings.warn("The granularity of data is larger than daily. "
"Ensure the daily events data match the timestamps")
features_df = add_daily_events(
df=features_df,
event_df_dict=daily_event_df_dict,
date_col="date",
neighbor_impact=daily_event_neighbor_impact,
shifted_effect=daily_event_shifted_effect)
# adds changepoints
if changepoint_values is not None:
changepoint_features_df = get_changepoint_features(
features_df,
changepoint_values,
continuous_time_col=continuous_time_col,
growth_func=growth_func,
changepoint_dates=changepoint_dates)
assert features_df.shape[0] == changepoint_features_df.shape[0]
features_df = pd.concat(
[features_df, changepoint_features_df],
axis=1,
sort=False)
# adds seasonality
if fs_func is not None:
fs_features = fs_func(features_df)
fs_df = fs_features["df"]
features_df = pd.concat(
[features_df, fs_df],
axis=1,
sort=False)
# adds seasonality change points
if seasonality_changepoint_result is not None:
seasonality_available = list(set([x.split("_")[-1] for x in fs_df.columns])) if fs_func is not None else []
seasonality_df = build_seasonality_feature_df_from_detection_result(
df=df,
time_col=time_col,
seasonality_changepoints=seasonality_changepoint_result["seasonality_changepoints"],
seasonality_components_df=seasonality_changepoint_result["seasonality_components_df"],
include_original_block=False,
include_components=seasonality_available
)
features_df = pd.concat(
[features_df, seasonality_df.reset_index(drop=True)],
axis=1,
sort=False)
features_df.index = df.index # assigns a copy of original index
return features_df
def __build_autoreg_features(
self,
df,
value_col,
autoreg_func,
phase="fit",
past_df=None):
"""Builds autoregressive df to be used in forecast models.
Parameters
----------
df : `pandas.Dataframe`
Dataframe to predict on, passed to ``autoreg_func``.
value_col : `str`, optional
This is the column name for the values of the time series.
This parameter is only required if autoregressive methods are used.
``value_col`` is needed at the "predict" phase to add to the ``df``
with NULL values so it can be appended to ``past_df``.
autoreg_func : callable, optional
A function constructed by
`~greykite.common.features.timeseries_lags.build_autoreg_df`
with the following signature::
def autoreg_func(df: pd.DataFrame, past_df: pd.DataFrame) ->
dict(lag_df: pd.DataFrame, agg_lag_df: pd.DataFrame)
See more details for above parameters in
`~greykite.common.features.timeseries_lags.build_autoreg_df`.
phase : `str`, optional, default "fit"
It denotes the phase the features are being built. It can be either of
- "fit": indicates the features are being built for the fitting phase
- "predict": indicates the features are being built for predict phase
This argument is used minimally inside the function.
Currently only to throw an exception when ``phase = "predict"`` and
``autoreg_func`` is not None but ``past_df`` is None.
past_df : `pandas.DataFrame`, optional
If autoregressive methods are used by providing ``autoreg_func``,
this parameter is used to append to ``df`` (from left)
before calculating the lags
Returns
-------
autoreg_df : `pandas.DataFrame`
a data frame with autoregression columns
"""
df = df.copy()
# we raise an exception if we are in the 'predict' phase
# and `autoreg_func` is not None
# but either of ``past_df`` or ``value_col`` is not provided
# This is because in that case `autoreg_func` will not be able to provide useful
# lag-based predictors
if phase == "predict":
if value_col is None or past_df is None:
raise ValueError(
"At 'predict' phase, if autoreg_func is not None,"
" 'past_df' and 'value_col' must be provided to "
"`build_autoreg_features`")
else:
# in the predict phase, we add the `value_col` to the df
# to enable `past_df` to be appended
df[value_col] = np.nan
if past_df is not None and df is not None:
assert list(df.columns) == list(past_df.columns), (
"`autoreg_func(df, past_df)` expects "
"`df` and `past_df` to have the same columns. "
"This is not the case: "
f"`df` columns: {list(df.columns)}; "
f"`past_df` columns: {list(past_df.columns)}")
autoreg_data = autoreg_func(df=df, past_df=past_df)
autoreg_df = pd.concat(autoreg_data.values(), axis=1, sort=False)
# Preserves the original index of `df`
autoreg_df.index = df.index
return autoreg_df
def __build_lagged_regressor_features(
self,
df,
lagged_regressor_cols,
lagged_regressor_func,
phase="fit",
past_df=None):
"""Builds lagged regressor df to be used in forecast models.
Parameters
----------
df : `pandas.Dataframe`
Dataframe to predict on, passed to ``lagged_regressor_func``.
lagged_regressor_cols : `list` [`str`], optional
This is the original column names for the lagged regressors.
This parameter is only required if lagged regressor methods are used.
``lagged_regressor_cols`` is needed at the "predict" phase to add to the ``df``
with NULL values so it can be appended to ``past_df``.
lagged_regressor_func : callable, optional
A function constructed by
`~greykite.common.features.timeseries_lags.build_autoreg_df_multi`
with the following signature::
def build_autoreg_df_multi(df: pd.DataFrame, past_df: pd.DataFrame) -> autoreg_df: pd.DataFrame
See more details for above parameters in
`~greykite.common.features.timeseries_lags.build_autoreg_df_multi`.
phase : `str`, optional, default "fit"
It denotes the phase the features are being built. It can be either of
- "fit": indicates the features are being built for the fitting phase
- "predict": indicates the features are being built for predict phase
This argument is used minimally inside the function.
Currently only to throw an exception when ``phase = "predict"`` and
``lagged_regressor_func`` is not None but ``past_df`` is None.
past_df : `pandas.DataFrame`, optional
If lagged regressor methods are used by providing ``lagged_regressor_func``,
this parameter is used to append to ``df`` (from left)
before calculating the lags
Returns
-------
lagged_regressor_df : `pandas.DataFrame`
a data frame with lagged regressor columns
"""
df = df.copy()
# we raise an exception if we are in the 'predict' phase
# and `lagged_regressor_func` is not None
# but either of ``past_df`` or ``lagged_regressor_cols`` is not provided
# This is because in that case `lagged_regressor_func` will not be able to provide useful
# lag-based predictors
if phase == "predict":
if lagged_regressor_cols is None or past_df is None:
raise ValueError(
"At 'predict' phase, if lagged_regressor_func is not None,"
" 'past_df' and 'lagged_regressor_cols' must be provided to "
"`build_lagged_regressor_features`")
if df is not None:
df_col_missing = set(lagged_regressor_cols).difference(set(df.columns))
if len(df_col_missing) > 0:
raise ValueError(
"All columns in `lagged_regressor_cols` must appear in `df`, "
f"but {df_col_missing} is missing in `df`.")
if past_df is not None:
past_df_col_missing = set(lagged_regressor_cols).difference(set(past_df.columns))
if len(past_df_col_missing) > 0:
raise ValueError(
"All columns in `lagged_regressor_cols` must appear in `past_df`, "
f"but {past_df_col_missing} is missing in `past_df`.")
lagged_regressor_df = lagged_regressor_func(df=df, past_df=past_df)
# Preserves the original index of `df`
lagged_regressor_df.index = df.index
return lagged_regressor_df
def __get_default_autoreg_dict(
self,
freq_in_days,
forecast_horizon,
simulation_based=False):
"""Generates the autoregressive components for forecasting
given the forecast horizon and time frequency.
Only if ``forecast_horizon`` is less than or equal to 30 days
auto-regression is used.
If ``forecast_horizon`` is larger than 30, the function returns None.
First, we calculate an integer called ``proper_order`` defined below:
This will be the smallest integer which is
(i) larger than ``forecast_horizon``
(ii) multiple of number of observations per week
For example, for daily data if ``forecast_horizon`` is 2,
we let the ``proper_order`` to be 7.
As another example, if ``forecast_horizon`` is 9, we let the ``proper_order``
to be 14.
This order is useful because often the same day of week is best
correlated with the observed value.
As an example, for daily data, one aggregated lag predictors
can be constructed by averaging these lags:
`[proper_order, proper_order+7, proper_order+7*2]`
which is equal to `[7, 14, 21]` when `forecast_horizon = 1`.
Parameters
----------
freq_in_days : `float`
The frequency of the timeseries in days. e.g. 7.0 for weekly data,
1.0 for daily data, 0.04166... for hourly data.
forecast_horizon : `int`
The number of time intervals into the future which are to be forecasted.
simulation_based : `bool`, default False
A boolean to decide if the forecast is performed via simulations or
without simulations.
Returns
-------
autoreg_dict : `dict` or `None`
A dictionary which can be passed to
`~greykite.algo.forecast.silverkite.SilverkiteForecast.forecast`
to specify the autoregressive structure.
See that function's definition for details.
proper_order : `int` or None
This will be the smallest integer which is
(i) larger than ``forecast_horizon``
(ii) multiple of 7
"""
forecast_horizon_in_days = freq_in_days * forecast_horizon
similar_lag = get_similar_lag(freq_in_days)
proper_order = None
if similar_lag is not None:
proper_order = int(np.ceil(forecast_horizon / similar_lag) * similar_lag)
autoreg_dict = None
orders = None
orders_list = []
interval_list = []
# Following considers two cases:
# (i) simulation-based
# (ii) non-simulation-based
# In simulation-based we are able to use small orders
# even for longer horizon ie we allow a orders for which
# ``order < forecast_horizon``.
# The above is not possible for non-simulation based approach.
if simulation_based:
orders = [1, 2, 3] # 1st, 2nd, 3rd time lags
if similar_lag is not None:
interval_list = [(1, similar_lag), (
similar_lag + 1,
similar_lag * 2)] # weekly average of last week, and weekly average of two weeks ago
orders_list = [[
similar_lag, # (i) same week day in a week which is 7 days prior
similar_lag * 2, # (ii) same week day a week before (i)
similar_lag * 3]] # (iii) same week day in a week before (ii)
else: # non-simulation-based case
if forecast_horizon_in_days <= 30:
orders = [forecast_horizon, forecast_horizon + 1, forecast_horizon + 2]
if similar_lag is not None:
interval_list = [
(forecast_horizon, forecast_horizon + similar_lag - 1),
(forecast_horizon + similar_lag, forecast_horizon + similar_lag * 2 - 1)]
# The following will induce an average between three lags on the same time of week
orders_list = [[
proper_order, # (i) same time in week, in a week which is ``proper_order`` times prior
proper_order + similar_lag, # (ii) same time in a week before (i)
proper_order + similar_lag * 2]] # (iii) same time in a week before (ii)
if forecast_horizon_in_days <= 30:
autoreg_dict = {}
autoreg_dict["lag_dict"] = None
autoreg_dict["agg_lag_dict"] = None
if orders is not None:
autoreg_dict["lag_dict"] = {"orders": orders}
if len(orders_list) > 0 or len(interval_list) > 0:
autoreg_dict["agg_lag_dict"] = {
"orders_list": orders_list,
"interval_list": interval_list}
autoreg_dict["series_na_fill_func"] = (lambda s: s.bfill().ffill())
return {
"proper_order": proper_order,
"autoreg_dict": autoreg_dict
}
def __get_default_lagged_regressor_dict(
self,
freq_in_days,
forecast_horizon):
"""Generates the lagged regressor components for forecasting
given the forecast horizon and time frequency.
This applies to ONE lagged regressor column at a time.
Only if ``forecast_horizon`` is less than or equal to 30 days
lagged regressors are used.
If ``forecast_horizon`` is larger than 30, the function returns None.
First, we calculate an integer called ``proper_order`` defined below:
This will be the smallest integer which is
(i) larger than ``forecast_horizon``
(ii) multiple of number of observations per week
For example, for daily data if ``forecast_horizon`` is 2,
we let the ``proper_order`` to be 7.
As another example, if ``forecast_horizon`` is 9, we let the ``proper_order``
to be 14.
This order is useful because often the same day of week is best
correlated with the observed response.
As an example, for daily data, one aggregated lagged regressor
can be constructed by averaging these lags:
`[proper_order, proper_order+7, proper_order+7*2]`
which is equal to `[7, 14, 21]` when `forecast_horizon = 1`.
Parameters
----------
freq_in_days : `float`
The frequency of the timeseries in days. e.g. 7.0 for weekly data,
1.0 for daily data, 0.04166... for hourly data.
forecast_horizon : `int`
The number of time intervals into the future which are to be forecasted.
Returns
-------
lag_reg_dict : `dict` or `None`
A dictionary which specifies the lagged regressor structure in ``lagged_regressor_dict``,
which then can be passed to
`~greykite.algo.forecast.silverkite.SilverkiteForecast.forecast`.
See that function's definition for details.
proper_order : `int` or None
This will be the smallest integer which is
(i) larger than ``forecast_horizon``
(ii) multiple of 7
"""
forecast_horizon_in_days = freq_in_days * forecast_horizon
similar_lag = get_similar_lag(freq_in_days)
proper_order = None
if similar_lag is not None:
proper_order = int(np.ceil(forecast_horizon / similar_lag) * similar_lag)
lag_reg_dict = None
orders = None
orders_list = []
interval_list = []
# Since simulation is not allowed for lagged regressors,
# the minimal lag order has to be greater than or equal to the forecast horizon.
if forecast_horizon_in_days <= 30:
if forecast_horizon == 1:
orders = [1]
elif proper_order is not None:
orders = [proper_order]
else:
orders = [forecast_horizon]
if similar_lag is not None:
interval_list = [
(forecast_horizon, forecast_horizon + similar_lag - 1)]
# The following will induce an average between three lags on the same time of week
orders_list = [[
proper_order, # (i) same time in week, in a week which is ``proper_order`` times prior
proper_order + similar_lag, # (ii) same time in a week before (i)
proper_order + similar_lag * 2]] # (iii) same time in a week before (ii)
if forecast_horizon_in_days <= 30:
lag_reg_dict = {}
lag_reg_dict["lag_dict"] = None
lag_reg_dict["agg_lag_dict"] = None
if orders is not None:
lag_reg_dict["lag_dict"] = {"orders": orders}
if len(orders_list) > 0 or len(interval_list) > 0:
lag_reg_dict["agg_lag_dict"] = {
"orders_list": orders_list,
"interval_list": interval_list}
lag_reg_dict["series_na_fill_func"] = (lambda s: s.bfill().ffill())
return {
"proper_order": proper_order,
"lag_reg_dict": lag_reg_dict
}
def __normalize_changepoint_values(
self,
changepoint_values,
pred_cols,
continuous_time_col,
normalize_df_func):
"""Normalizes the ``changepoint_values`` in
`~greykite.algo.forecast.silverkite.SilverkiteForecast.forecast`
with the same normalize method specified in the model.
Parameters
----------
changepoint_values : `numpy.array` or `None`
The trend change point values as returned by
`~greykite.common.features.timeseries_features.get_changepoint_features_and_values_from_config`
pred_cols : `list`
List of names of predictors.
continuous_time_col : `str`
The name of continuous time column in ``pred_cols``.
normalize_df_func : `function` or `None`
The normalization function as returned by
`~greykite.common.features.normalize.normalize_df`
It should be compatible with ``pred_cols`` (generated on the same design matrix).
Returns
-------
normalized_changepoint_values : `numpy.array`
The normalized change points, on the same scale as the normalized continuous time column.
"""
if changepoint_values is None:
return None
if normalize_df_func is None:
return changepoint_values
if continuous_time_col is None:
continuous_time_col = TimeFeaturesEnum.ct1.value
new_df = pd.DataFrame(np.zeros([len(changepoint_values), len(pred_cols)]))
new_df.columns = pred_cols
new_df[continuous_time_col] = changepoint_values
normalized_df = normalize_df_func(new_df)
return normalized_df[continuous_time_col].values.ravel()
def __remove_fourier_col_with_collinearity(self, fs_cols):
"""Removes fourier series terms with perfect or almost perfect collinearity.
This function is intended to be used when fitting algorithm is OLS.
These terms include, for example, yearly seasonality with order 4 and quarterly
seasonality with order 1; yearly seasonality with order 12, quarterly seasonality
with order 3 and monthly seasonality with order 1; etc.
Including these terms together is possible to lead to NaN coefficients in OLS models.
Note: the function assumes the user includes ``seas_names`` in ``fs_components_df``
and labels them: weekly, monthly, quarterly and yearly.
Parameters
----------
fs_cols : `list` [`str`]
A list of Fourier series column names generated by
`~greykite.common.features.timeseries_features.fourier_series_multi_fcn`
Returns
-------
fs_cols : `list` [`str`]
The ``fs_cols`` with collinear cols removed.
The removed columns always have shorter periods.
"""
yearly_cols = [col for col in fs_cols if "yearly" in col]
quarterly_cols = [col for col in fs_cols if "quarterly" in col]
monthly_cols = [col for col in fs_cols if "monthly" in col]
weekly_cols = [col for col in fs_cols if "weekly" in col]
# Assuming the provided seasonality orders are in reasonable ranges.
# We need to deal with year/quarter, year/month, quarter/month for cos/sin
# We need to deal with weekly for cos.
# The Fourier series column names are generated by ``get_fourier_col_name``,
# and the maximum order of a component can be parsed from the names.
# For example, yearly seasonality has the form "sin12_ct1_yearly" or "cos12_ct1_yearly".
# Parsing the number after sin/cos and before the first "_" gives the order.
max_yearly_order = max([int(col.split("_")[0][3:]) for col in yearly_cols], default=0)
max_quarterly_order = max([int(col.split("_")[0][3:]) for col in quarterly_cols], default=0)
# Adds columns to be removed for year/quarter, year/month, quarter/month
# These always include components with shorter periods.
# For example, if we have both yearly seasonality with order 4 and quarterly seasonality with order 1,
# quarterly seasonality with order 1 will be removed.
removed_cols = []
# Removes redundant quarterly seasonality with yearly seasonality.
for i in range(4, max_yearly_order + 1, 4):
removed_cols += [col for col in quarterly_cols if f"sin{i // 4}_" in col or f"cos{i // 4}_" in col]
# Removes redundant monthly seasonality with yearly seasonality.
for i in range(12, max_yearly_order + 1, 12):
removed_cols += [col for col in monthly_cols if f"sin{i // 12}_" in col or f"cos{i // 12}_" in col]
# Removes redundant monthly seasonality with quarterly seasonality.
for i in range(3, max_quarterly_order + 1, 3):
removed_cols += [col for col in monthly_cols if f"sin{i // 3}_" in col or f"cos{i // 3}_" in col]
# Adds columns for weekly seasonality.
# Removes higher order cosine terms because order k and order period - k have the same cosine columns.
for i in range(int(self._silverkite_seasonality_enum.WEEKLY_SEASONALITY.value.period) // 2 + 1,
int(self._silverkite_seasonality_enum.WEEKLY_SEASONALITY.value.period) + 1):
removed_cols += [col for col in weekly_cols if f"cos{i}_" in col]
# Removes both sine and cosine terms if the order is greater than the period.
# The reason is that for weekly order 1 is the same as order 8.
# This concern only applies to weekly seasonality, because the period 7 is small.
removed_cols += [col for col in weekly_cols
if (int(col.split("_")[0][3:])
> self._silverkite_seasonality_enum.WEEKLY_SEASONALITY.value.period)]
final_cols = [col for col in fs_cols if col not in removed_cols]
if len(removed_cols) > 0:
warnings.warn(f"The following Fourier series terms are removed due to collinearity:\n{removed_cols}")
return final_cols
def __remove_fourier_col_with_collinearity_and_interaction(
self,
extra_pred_cols,
fs_cols):
"""Removes interaction terms that include fourier series terms removed in
`~greykite.algo.forecast.silverkite.SilverkiteForecast.__remove_fourier_col_with_collinearity`.
This function is intended to be used when fitting algorithm is OLS.
Parameters
----------
extra_pred_cols : `list` [`str`]
A list of features that include extra interaction terms in
`~greykite.algo.forecast.silverkite.SilverkiteForecast.forecast`.
fs_cols : `list` [`str`]
A list of Fourier series column names to keep from
`~greykite.algo.forecast.silverkite.SilverkiteForecast.__remove_fourier_col_with_collinearity`.
Returns
-------
extra_pred_cols : `list` [`str`]
The ``extra_pred_cols`` with interaction terms including fourier series not in ``fs_col`` removed.
"""
seas_cols = get_pattern_cols(extra_pred_cols, SEASONALITY_REGEX)
seas_cols = get_pattern_cols(seas_cols, ":")
removed_cols = []
for term in seas_cols:
if any([(x not in fs_cols) and (re.search(SEASONALITY_REGEX, x)) for x in term.split(":")]):
removed_cols.append(term)
extra_pred_cols = [x for x in extra_pred_cols if x not in removed_cols]
if len(removed_cols) > 0:
warnings.warn(f"The following interaction terms are removed:\n{removed_cols}\n"
f"due to the removal of the corresponding Fourier series terms.")
return extra_pred_cols
class TimeFeaturesEnum(Enum):
"""Time features generated by
`~greykite.common.features.timeseries_features.build_time_features_df`.
The item names are lower-case letters (kept the same as the values) for easier check of existence.
To check if a string s is in this Enum,
use ``s in TimeFeaturesEnum.__dict__["_member_names_"]``.
Direct check of existence ``s in TimeFeaturesEnum`` is deprecated in python 3.8.
"""
# Absolute time features
datetime = "datetime"
date = "date"
year = "year"
year_length = "year_length"
quarter = "quarter"
quarter_start = "quarter_start"
quarter_length = "quarter_length"
month = "month"
month_length = "month_length"
hour = "hour"
minute = "minute"
second = "second"
year_quarter = "year_quarter"
year_month = "year_month"
woy = "woy"
doy = "doy"
doq = "doq"
dom = "dom"
dow = "dow"
str_dow = "str_dow"
str_doy = "str_doy"
is_weekend = "is_weekend"
# Relative time features
year_woy = "year_woy"
month_dom = "month_dom"
year_woy_dow = "year_woy_dow"
woy_dow = "woy_dow"
dow_hr = "dow_hr"
dow_hr_min = "dow_hr_min"
tod = "tod"
tow = "tow"
tom = "tom"
toq = "toq"
toy = "toy"
conti_year = "conti_year"
dow_grouped = "dow_grouped"
# ISO time features
year_iso = "year_iso"
year_woy_iso = "year_woy_iso"
year_woy_dow_iso = "year_woy_dow_iso"
# Continuous time features
ct1 = "ct1"
ct2 = "ct2"
ct3 = "ct3"
ct_sqrt = "ct_sqrt"
ct_root3 = "ct_root3"
us_dst = "us_dst"
eu_dst = "eu_dst"
def update_dictionary(default_dict, overwrite_dict=None, allow_unknown_keys=True):
"""Adds default key-value pairs to items in ``overwrite_dict``.
Merges the items in ``default_dict`` and ``overwrite_dict``,
preferring ``overwrite_dict`` if there are conflicts.
Parameters
----------
default_dict: `dict`
Dictionary of default values.
overwrite_dict: `dict` or None, optional, default None
User-provided dictionary that overrides the defaults.
allow_unknown_keys: `bool`, optional, default True
If false, raises an error if ``overwrite_dict`` contains a key that is
not in ``default_dict``.
Raises
------
ValueError
if ``allow_unknown_keys`` is False and ``overwrite_dict``
has keys that are not in ``default_dict``.
Returns
-------
updated_dict : `dict`
Updated dictionary.
Returns ``overwrite_dicts``, with default values added
based on ``default_dict``.
"""
if overwrite_dict is None:
overwrite_dict = {}
if not allow_unknown_keys:
extra_keys = overwrite_dict.keys() - default_dict.keys()
if extra_keys:
raise ValueError(f"Unexpected key(s) found: {extra_keys}. "
f"The valid keys are: {default_dict.keys()}")
return dict(default_dict, **overwrite_dict)
def update_dictionaries(default_dict, overwrite_dicts=None, allow_unknown_keys=True):
"""Adds default key-value pairs to items in ``overwrite_dicts``.
Merges the items in ``default_dict`` and ``overwrite_dicts``,
preferring ``overwrite_dict`` if there are conflicts.
If ``overwrite_dicts`` is a list of dictionaries, the merge is
applied to each dictionary in the list.
Parameters
----------
default_dict: `dict`
Dictionary of default values.
overwrite_dicts: `dict` or None or `list` [`dict` or None], optional, default None
User-provided dictionary that overrides the defaults,
or a list of such dictionaries.
allow_unknown_keys: `bool`, optional, default True
If false, raises an error if ``overwrite_dicts`` contains a key that is
not in ``default_dict``.
Returns
-------
updated_dict : `dict` or `list` [`dict`]
Updated dictionary of list of dictionaries.
Returns ``overwrite_dicts``, with default values added
to each dictionary based on ``default_dict``.
"""
if isinstance(overwrite_dicts, (list, tuple)):
updated_dict = [
update_dictionary(
default_dict,
overwrite_dict=item,
allow_unknown_keys=allow_unknown_keys)
for item in overwrite_dicts]
else:
updated_dict = update_dictionary(
default_dict,
overwrite_dict=overwrite_dicts,
allow_unknown_keys=allow_unknown_keys)
return updated_dict
class ModelComponentsParam:
"""Parameters to tune the model."""
autoregression: Optional[Dict[str, Any]] = None
"""For modeling autoregression, see template for details"""
changepoints: Optional[Dict[str, Any]] = None
"""For modeling changepoints, see template for details"""
custom: Optional[Dict[str, Any]] = None
"""Additional parameters used by template, see template for details"""
events: Optional[Dict[str, Any]] = None
"""For modeling events, see template for details"""
growth: Optional[Dict[str, Any]] = None
"""For modeling growth (trend), see template for details"""
hyperparameter_override: Optional[Union[Dict, List[Optional[Dict]]]] = None
"""After the above model components are used to create a hyperparameter grid,
the result is updated by this dictionary, to create new keys or override existing ones.
Allows for complete customization of the grid search.
"""
regressors: Optional[Dict[str, Any]] = None
"""For modeling regressors, see template for details"""
lagged_regressors: Optional[Dict[str, Any]] = None
"""For modeling lagged regressors, see template for details"""
seasonality: Optional[Dict[str, Any]] = None
"""For modeling seasonality, see template for details"""
uncertainty: Optional[Dict[str, Any]] = None
"""For modeling uncertainty, see template for details"""
def from_dict(obj: Any) -> 'ModelComponentsParam':
assert isinstance(obj, dict)
autoregression = from_union([lambda x: from_dict(lambda x: x, x), from_none], obj.get("autoregression"))
changepoints = from_union([lambda x: from_dict(lambda x: x, x), from_none], obj.get("changepoints"))
custom = from_union([lambda x: from_dict(lambda x: x, x), from_none], obj.get("custom"))
events = from_union([lambda x: from_dict(lambda x: x, x), from_none], obj.get("events"))
growth = from_union([lambda x: from_dict(lambda x: x, x), from_none], obj.get("growth"))
hyperparameter_override = from_union([
lambda x: from_dict(lambda x: x, x),
lambda x: from_list_dict_or_none(lambda x: x, x),
from_none], obj.get("hyperparameter_override"))
regressors = from_union([lambda x: from_dict(lambda x: x, x), from_none], obj.get("regressors"))
lagged_regressors = from_union([lambda x: from_dict(lambda x: x, x), from_none], obj.get("lagged_regressors"))
seasonality = from_union([lambda x: from_dict(lambda x: x, x), from_none], obj.get("seasonality"))
uncertainty = from_union([lambda x: from_dict(lambda x: x, x), from_none], obj.get("uncertainty"))
return ModelComponentsParam(
autoregression=autoregression,
changepoints=changepoints,
custom=custom,
events=events,
growth=growth,
hyperparameter_override=hyperparameter_override,
regressors=regressors,
lagged_regressors=lagged_regressors,
seasonality=seasonality,
uncertainty=uncertainty)
def to_dict(self) -> dict:
result: dict = {}
result["autoregression"] = from_union([lambda x: from_dict(lambda x: x, x), from_none], self.autoregression)
result["changepoints"] = from_union([lambda x: from_dict(lambda x: x, x), from_none], self.changepoints)
result["custom"] = from_union([lambda x: from_dict(lambda x: x, x), from_none], self.custom)
result["events"] = from_union([lambda x: from_dict(lambda x: x, x), from_none], self.events)
result["growth"] = from_union([lambda x: from_dict(lambda x: x, x), from_none], self.growth)
result["hyperparameter_override"] = from_union([
lambda x: from_dict(lambda x: x, x),
lambda x: from_list_dict_or_none(lambda x: x, x),
from_none], self.hyperparameter_override)
result["regressors"] = from_union([lambda x: from_dict(lambda x: x, x), from_none], self.regressors)
result["lagged_regressors"] = from_union([lambda x: from_dict(lambda x: x, x), from_none], self.lagged_regressors)
result["seasonality"] = from_union([lambda x: from_dict(lambda x: x, x), from_none], self.seasonality)
result["uncertainty"] = from_union([lambda x: from_dict(lambda x: x, x), from_none], self.uncertainty)
return result
The provided code snippet includes necessary dependencies for implementing the `apply_default_model_components` function. Write a Python function `def apply_default_model_components( model_components=None, time_properties=None)` to solve the following problem:
Sets default values for ``model_components``. Parameters ---------- model_components : :class:`~greykite.framework.templates.autogen.forecast_config.ModelComponentsParam` or None, default None Configuration of model growth, seasonality, events, etc. See :func:`~greykite.framework.templates.silverkite_templates.silverkite_template` for details. time_properties : `dict` [`str`, `any`] or None, default None Time properties dictionary (likely produced by `~greykite.common.time_properties_forecast.get_forecast_time_properties`) with keys: ``"period"`` : `int` Period of each observation (i.e. minimum time between observations, in seconds). ``"simple_freq"`` : `SimpleTimeFrequencyEnum` ``SimpleTimeFrequencyEnum`` member corresponding to data frequency. ``"num_training_points"`` : `int` Number of observations for training. ``"num_training_days"`` : `int` Number of days for training. ``"start_year"`` : `int` Start year of the training period. ``"end_year"`` : `int` End year of the forecast period. ``"origin_for_time_vars"`` : `float` Continuous time representation of the first date in ``df``. Returns ------- model_components : :class:`~greykite.framework.templates.autogen.forecast_config.ModelComponentsParam` The provided ``model_components`` with default values set
Here is the function:
def apply_default_model_components(
model_components=None,
time_properties=None):
"""Sets default values for ``model_components``.
Parameters
----------
model_components : :class:`~greykite.framework.templates.autogen.forecast_config.ModelComponentsParam` or None, default None
Configuration of model growth, seasonality, events, etc.
See :func:`~greykite.framework.templates.silverkite_templates.silverkite_template` for details.
time_properties : `dict` [`str`, `any`] or None, default None
Time properties dictionary (likely produced by
`~greykite.common.time_properties_forecast.get_forecast_time_properties`)
with keys:
``"period"`` : `int`
Period of each observation (i.e. minimum time between observations, in seconds).
``"simple_freq"`` : `SimpleTimeFrequencyEnum`
``SimpleTimeFrequencyEnum`` member corresponding to data frequency.
``"num_training_points"`` : `int`
Number of observations for training.
``"num_training_days"`` : `int`
Number of days for training.
``"start_year"`` : `int`
Start year of the training period.
``"end_year"`` : `int`
End year of the forecast period.
``"origin_for_time_vars"`` : `float`
Continuous time representation of the first date in ``df``.
Returns
-------
model_components : :class:`~greykite.framework.templates.autogen.forecast_config.ModelComponentsParam`
The provided ``model_components`` with default values set
"""
if model_components is None:
model_components = ModelComponentsParam()
else:
# makes a copy to avoid mutating input
model_components = dataclasses.replace(model_components)
# sets default values
default_seasonality = {
"fs_components_df": [pd.DataFrame({
"name": [
TimeFeaturesEnum.tod.value,
TimeFeaturesEnum.tow.value,
TimeFeaturesEnum.tom.value,
TimeFeaturesEnum.toq.value,
TimeFeaturesEnum.toy.value],
"period": [24.0, 7.0, 1.0, 1.0, 1.0],
"order": [3, 3, 1, 1, 5],
"seas_names": ["daily", "weekly", "monthly", "quarterly", "yearly"]})],
}
model_components.seasonality = update_dictionary(
default_seasonality,
overwrite_dict=model_components.seasonality,
allow_unknown_keys=False)
# model_components.growth must be empty.
# Pass growth terms via `extra_pred_cols` instead.
default_growth = {}
model_components.growth = update_dictionary(
default_growth,
overwrite_dict=model_components.growth,
allow_unknown_keys=False)
default_events = {
"daily_event_df_dict": [None],
"daily_event_neighbor_impact": [None],
"daily_event_shifted_effect": [None]
}
model_components.events = update_dictionary(
default_events,
overwrite_dict=model_components.events,
allow_unknown_keys=False)
default_changepoints = {
"changepoints_dict": [None],
"seasonality_changepoints_dict": [None],
# Not allowed, to prevent leaking future information
# into the past. Pass `changepoints_dict` with method="auto" for
# automatic detection.
# "changepoint_detector": [None],
}
model_components.changepoints = update_dictionary(
default_changepoints,
overwrite_dict=model_components.changepoints,
allow_unknown_keys=False)
default_autoregression = {
"autoreg_dict": [None],
"simulation_num": [10],
"fast_simulation": [False]
}
model_components.autoregression = update_dictionary(
default_autoregression,
overwrite_dict=model_components.autoregression,
allow_unknown_keys=False)
default_regressors = {}
model_components.regressors = update_dictionary(
default_regressors,
overwrite_dict=model_components.regressors,
allow_unknown_keys=False)
default_lagged_regressors = {
"lagged_regressor_dict": [None],
}
model_components.lagged_regressors = update_dictionary(
default_lagged_regressors,
overwrite_dict=model_components.lagged_regressors,
allow_unknown_keys=False)
default_uncertainty = {
"uncertainty_dict": [None],
}
model_components.uncertainty = update_dictionary(
default_uncertainty,
overwrite_dict=model_components.uncertainty,
allow_unknown_keys=False)
if time_properties is not None:
origin_for_time_vars = time_properties.get("origin_for_time_vars")
else:
origin_for_time_vars = None
default_custom = {
"silverkite": [SilverkiteForecast()], # NB: sklearn creates a copy in grid search
# The same origin for every split, based on start year of full dataset.
# To use first date of each training split, set to `None` in model_components.
"origin_for_time_vars": [origin_for_time_vars],
"extra_pred_cols": [TimeFeaturesEnum.ct1.value], # linear growth
"drop_pred_cols": [None],
"explicit_pred_cols": [None],
"fit_algorithm_dict": [{
"fit_algorithm": "linear",
"fit_algorithm_params": None,
}],
"min_admissible_value": [None],
"max_admissible_value": [None],
"regression_weight_col": [None],
"normalize_method": [None],
"remove_intercept": [False]
}
model_components.custom = update_dictionary(
default_custom,
overwrite_dict=model_components.custom,
allow_unknown_keys=False)
# sets to {} if None, for each item if
# `model_components.hyperparameter_override` is a list of dictionaries
model_components.hyperparameter_override = update_dictionaries(
{},
overwrite_dicts=model_components.hyperparameter_override)
return model_components | Sets default values for ``model_components``. Parameters ---------- model_components : :class:`~greykite.framework.templates.autogen.forecast_config.ModelComponentsParam` or None, default None Configuration of model growth, seasonality, events, etc. See :func:`~greykite.framework.templates.silverkite_templates.silverkite_template` for details. time_properties : `dict` [`str`, `any`] or None, default None Time properties dictionary (likely produced by `~greykite.common.time_properties_forecast.get_forecast_time_properties`) with keys: ``"period"`` : `int` Period of each observation (i.e. minimum time between observations, in seconds). ``"simple_freq"`` : `SimpleTimeFrequencyEnum` ``SimpleTimeFrequencyEnum`` member corresponding to data frequency. ``"num_training_points"`` : `int` Number of observations for training. ``"num_training_days"`` : `int` Number of days for training. ``"start_year"`` : `int` Start year of the training period. ``"end_year"`` : `int` End year of the forecast period. ``"origin_for_time_vars"`` : `float` Continuous time representation of the first date in ``df``. Returns ------- model_components : :class:`~greykite.framework.templates.autogen.forecast_config.ModelComponentsParam` The provided ``model_components`` with default values set |
167,483 | import inspect
import os
import shutil
from collections import OrderedDict
import dill
from patsy.design_info import DesignInfo
The provided code snippet includes necessary dependencies for implementing the `recursive_rm_dir` function. Write a Python function `def recursive_rm_dir(dir_name)` to solve the following problem:
Recursively removes dirs and files in ``dir_name``. This functions removes everything in ``dir_name`` that it has permission to remove. This function is intended to remove the dumped directory. Do not use this function to remove other directories, unless you are sure to remove everything in the directory. Parameters ---------- dir_name : `str` The directory name to be removed. Returns ------- The functions removes the directory from local file system and does not return anything.
Here is the function:
def recursive_rm_dir(dir_name):
"""Recursively removes dirs and files in ``dir_name``.
This functions removes everything in ``dir_name`` that it has permission
to remove. This function is intended to remove the dumped directory.
Do not use this function to remove other directories,
unless you are sure to remove everything in the directory.
Parameters
----------
dir_name : `str`
The directory name to be removed.
Returns
-------
The functions removes the directory from local file system and does not return anything.
"""
if os.path.isdir(dir_name):
files = os.listdir(dir_name)
if not files:
os.rmdir(dir_name)
else:
[recursive_rm_dir(os.path.join(dir_name, file)) for file in files]
os.rmdir(dir_name)
else:
os.remove(dir_name) | Recursively removes dirs and files in ``dir_name``. This functions removes everything in ``dir_name`` that it has permission to remove. This function is intended to remove the dumped directory. Do not use this function to remove other directories, unless you are sure to remove everything in the directory. Parameters ---------- dir_name : `str` The directory name to be removed. Returns ------- The functions removes the directory from local file system and does not return anything. |
167,484 | import inspect
import os
import shutil
from collections import OrderedDict
import dill
from patsy.design_info import DesignInfo
The provided code snippet includes necessary dependencies for implementing the `dump_obj` function. Write a Python function `def dump_obj( obj, dir_name, obj_name="obj", dump_design_info=True, overwrite_exist_dir=False, top_level=True)` to solve the following problem:
Uses DFS to recursively dump an object to pickle files. Originally intended for dumping the `~greykite.framework.pipeline.pipeline.ForecastResult` instance, but could potentially used for other objects. For each object, if it's picklable, a file with {object_name}.pkl will be generated, otherwise, depending on its type, a {object_name}.type file will be generated storing it's type, and a folder with {object_name} will be generated to store each of its elements/attributes. For example, if the folder to store results is forecast_result, the items in the folders could be: - timeseries.pkl: a picklable item. - model.type: model is not picklable, this file includes the class (Pipeline) - model: this folder includes the elements in model. - forecast.type: forecast is not picklable, this file includes the class (UnivariateForecast) - forecast: this folder includes the elements in forecast. - backtest.type: backtest is not picklable, this file includes the class (UnivariateForecast) - backtest: this folder includes the elements in backtest. - grid_search.type: grid_search is not picklable, this file includes the class (GridSearchCV) - grid_search: this folder includes the elements in grid_search. The items in each subfolder follows the same rule. The current supported recursion types are: - list/tuple: type name is "list" or "tuple", each element is attempted to be pickled independently if the entire list/tuple is not picklable. The order is preserved. - OrderedDict: type name is "ordered_dict", each key and value are attempted to be pickled independently if the entire dict is not picklable. The order is preserved. - dict: type name is "dict", each key and value are attempted to be pickled independently if the entire dict is not picklable. The order is not preserved. - class instance: type name is the class object, used to create new instance. Each attribute is attempted to be pickled independently if the entire instance is not picklable. Parameters ---------- obj : `object` The object to be pickled. dir_name : `str` The directory to store the pickled results. obj_name : `str`, default "obj" The name for the pickled items. Applies to the top level object only when recursion is used. dump_design_info : `bool`, default True Whether to dump the design info in `ForecastResult`. The design info is specifically for Silverkite and can be accessed from - ForecastResult.model[-1].model_dict["x_design_info"] - ForecastResult.forecast.estimator.model_dict["x_design_info"] - ForecastResult.backtest.estimator.model_dict["x_design_info"] The design info is a class from `patsy` and contains a significant amount of instances that can not be pickled directly. Recursively pickling them takes longer to run. If speed is important and you don't need these information, you can turn it off. overwrite_exist_dir : `bool`, default False If True and the directory in ``dir_name`` already exists, the existing directory will be removed. If False and the directory in ``dir_name`` already exists, an exception will be raised. top_level : `bool`, default True Whether the implementation is an initial call (applies to the root object you want to pickle, not a recursive call). When you use this function to dump an object, this parameter should always be True. Only top level checks if the dir exists, because subsequent recursive calls may write files to the same directory, and the check for dir exists will not be implemented. Setting this parameter to False may cause problems. Returns ------- The function writes files to local directory and does not return anything.
Here is the function:
def dump_obj(
obj,
dir_name,
obj_name="obj",
dump_design_info=True,
overwrite_exist_dir=False,
top_level=True):
"""Uses DFS to recursively dump an object to pickle files.
Originally intended for dumping the
`~greykite.framework.pipeline.pipeline.ForecastResult` instance,
but could potentially used for other objects.
For each object, if it's picklable, a file with {object_name}.pkl will be
generated, otherwise, depending on its type, a {object_name}.type file will
be generated storing it's type, and a folder with {object_name} will be generated
to store each of its elements/attributes.
For example, if the folder to store results is forecast_result, the items in the
folders could be:
- timeseries.pkl: a picklable item.
- model.type: model is not picklable, this file includes the class (Pipeline)
- model: this folder includes the elements in model.
- forecast.type: forecast is not picklable, this file includes the class (UnivariateForecast)
- forecast: this folder includes the elements in forecast.
- backtest.type: backtest is not picklable, this file includes the class (UnivariateForecast)
- backtest: this folder includes the elements in backtest.
- grid_search.type: grid_search is not picklable, this file includes the class (GridSearchCV)
- grid_search: this folder includes the elements in grid_search.
The items in each subfolder follows the same rule.
The current supported recursion types are:
- list/tuple: type name is "list" or "tuple", each element is attempted to
be pickled independently if the entire list/tuple is not picklable.
The order is preserved.
- OrderedDict: type name is "ordered_dict", each key and value are attempted
to be pickled independently if the entire dict is not picklable.
The order is preserved.
- dict: type name is "dict", each key and value are attempted to be pickled
independently if the entire dict is not picklable.
The order is not preserved.
- class instance: type name is the class object, used to create new instance.
Each attribute is attempted to be pickled independently if the entire
instance is not picklable.
Parameters
----------
obj : `object`
The object to be pickled.
dir_name : `str`
The directory to store the pickled results.
obj_name : `str`, default "obj"
The name for the pickled items. Applies to the top level object only
when recursion is used.
dump_design_info : `bool`, default True
Whether to dump the design info in `ForecastResult`.
The design info is specifically for Silverkite and can be accessed from
- ForecastResult.model[-1].model_dict["x_design_info"]
- ForecastResult.forecast.estimator.model_dict["x_design_info"]
- ForecastResult.backtest.estimator.model_dict["x_design_info"]
The design info is a class from `patsy` and contains a significant amount of
instances that can not be pickled directly. Recursively pickling them takes
longer to run. If speed is important and you don't need these information,
you can turn it off.
overwrite_exist_dir : `bool`, default False
If True and the directory in ``dir_name`` already exists, the existing
directory will be removed.
If False and the directory in ``dir_name`` already exists, an exception
will be raised.
top_level : `bool`, default True
Whether the implementation is an initial call
(applies to the root object you want to pickle, not a recursive call).
When you use this function to dump an object, this parameter should always be True.
Only top level checks if the dir exists,
because subsequent recursive calls may write files to the same directory,
and the check for dir exists will not be implemented.
Setting this parameter to False may cause problems.
Returns
-------
The function writes files to local directory and does not return anything.
"""
# Checks if to dump design info.
if (not dump_design_info) and (isinstance(obj, DesignInfo) or (isinstance(obj, str) and obj == "x_design_info")):
return
# Checks if directory already exists.
if top_level:
dir_already_exist = os.path.exists(dir_name)
if dir_already_exist:
if not overwrite_exist_dir:
raise FileExistsError("The directory already exists. "
"Please either specify a new directory or "
"set overwrite_exist_dir to True to overwrite it.")
else:
if os.path.isdir(dir_name):
# dir exists as a directory.
shutil.rmtree(dir_name)
else:
# dir exists as a file.
os.remove(dir_name)
# Creates the directory.
# None top-level may write to the same directory,
# so we allow existing directory in this case.
try:
os.mkdir(dir_name)
except FileExistsError:
pass
# Start dumping recursively.
try:
# Attempts to directly dump the object.
dill.dump(
obj,
open(os.path.join(dir_name, f"{obj_name}.pkl"), "wb"))
except NotImplementedError:
# Direct dumping fails.
# Removed the failed file.
try:
os.remove(os.path.join(dir_name, f"{obj_name}.pkl"))
except FileNotFoundError:
pass
# Attempts to do recursive dumping depending on the object type.
if isinstance(obj, OrderedDict):
# For OrderedDict (there are a lot in `pasty.design_info.DesignInfo`),
# recursively dumps the keys and values, because keys can be class instances
# and unpicklable, too.
# The keys and values have index number appended to the front,
# so the order is kept.
dill.dump(
"ordered_dict",
open(os.path.join(dir_name, f"{obj_name}.type"), "wb")) # type "ordered_dict"
for i, (key, value) in enumerate(obj.items()):
name = f"{i}_{str(key)}"
dump_obj(
key,
os.path.join(dir_name, obj_name),
f"{name}__key__",
dump_design_info=dump_design_info,
top_level=False)
dump_obj(
value,
os.path.join(dir_name, obj_name),
f"{name}__value__",
dump_design_info=dump_design_info,
top_level=False)
elif isinstance(obj, dict):
# For regular dictionary,
# recursively dumps the keys and values, because keys can be class instances
# and unpicklable, too.
# The order is not important.
dill.dump(
"dict",
open(os.path.join(dir_name, f"{obj_name}.type"), "wb")) # type "dict"
for key, value in obj.items():
name = str(key)
dump_obj(
key,
os.path.join(dir_name, obj_name),
f"{name}__key__",
dump_design_info=dump_design_info,
top_level=False)
dump_obj(
value,
os.path.join(dir_name, obj_name),
f"{name}__value__",
dump_design_info=dump_design_info,
top_level=False)
elif isinstance(obj, (list, tuple)):
# For list and tuples,
# recursively dumps the elements.
# The names have index number appended to the front,
# so the order is kept.
dill.dump(
type(obj).__name__,
open(os.path.join(dir_name, f"{obj_name}.type"), "wb")) # type "list"/"tuple"
for i, value in enumerate(obj):
dump_obj(
value,
os.path.join(dir_name, obj_name),
f"{i}_key",
dump_design_info=dump_design_info,
top_level=False)
elif hasattr(obj, "__class__") and not isinstance(obj, type):
# For class instance,
# recursively dumps the attributes.
dill.dump(
obj.__class__,
open(os.path.join(dir_name, f"{obj_name}.type"), "wb")) # type is class itself
for key, value in obj.__dict__.items():
dump_obj(
value,
os.path.join(dir_name, obj_name),
key,
dump_design_info=dump_design_info,
top_level=False)
else:
# Other unrecognized unpicklable types, not common.
print(f"I Don't recognize type {type(obj)}") | Uses DFS to recursively dump an object to pickle files. Originally intended for dumping the `~greykite.framework.pipeline.pipeline.ForecastResult` instance, but could potentially used for other objects. For each object, if it's picklable, a file with {object_name}.pkl will be generated, otherwise, depending on its type, a {object_name}.type file will be generated storing it's type, and a folder with {object_name} will be generated to store each of its elements/attributes. For example, if the folder to store results is forecast_result, the items in the folders could be: - timeseries.pkl: a picklable item. - model.type: model is not picklable, this file includes the class (Pipeline) - model: this folder includes the elements in model. - forecast.type: forecast is not picklable, this file includes the class (UnivariateForecast) - forecast: this folder includes the elements in forecast. - backtest.type: backtest is not picklable, this file includes the class (UnivariateForecast) - backtest: this folder includes the elements in backtest. - grid_search.type: grid_search is not picklable, this file includes the class (GridSearchCV) - grid_search: this folder includes the elements in grid_search. The items in each subfolder follows the same rule. The current supported recursion types are: - list/tuple: type name is "list" or "tuple", each element is attempted to be pickled independently if the entire list/tuple is not picklable. The order is preserved. - OrderedDict: type name is "ordered_dict", each key and value are attempted to be pickled independently if the entire dict is not picklable. The order is preserved. - dict: type name is "dict", each key and value are attempted to be pickled independently if the entire dict is not picklable. The order is not preserved. - class instance: type name is the class object, used to create new instance. Each attribute is attempted to be pickled independently if the entire instance is not picklable. Parameters ---------- obj : `object` The object to be pickled. dir_name : `str` The directory to store the pickled results. obj_name : `str`, default "obj" The name for the pickled items. Applies to the top level object only when recursion is used. dump_design_info : `bool`, default True Whether to dump the design info in `ForecastResult`. The design info is specifically for Silverkite and can be accessed from - ForecastResult.model[-1].model_dict["x_design_info"] - ForecastResult.forecast.estimator.model_dict["x_design_info"] - ForecastResult.backtest.estimator.model_dict["x_design_info"] The design info is a class from `patsy` and contains a significant amount of instances that can not be pickled directly. Recursively pickling them takes longer to run. If speed is important and you don't need these information, you can turn it off. overwrite_exist_dir : `bool`, default False If True and the directory in ``dir_name`` already exists, the existing directory will be removed. If False and the directory in ``dir_name`` already exists, an exception will be raised. top_level : `bool`, default True Whether the implementation is an initial call (applies to the root object you want to pickle, not a recursive call). When you use this function to dump an object, this parameter should always be True. Only top level checks if the dir exists, because subsequent recursive calls may write files to the same directory, and the check for dir exists will not be implemented. Setting this parameter to False may cause problems. Returns ------- The function writes files to local directory and does not return anything. |
167,485 | import inspect
import os
import shutil
from collections import OrderedDict
import dill
from patsy.design_info import DesignInfo
The provided code snippet includes necessary dependencies for implementing the `load_obj` function. Write a Python function `def load_obj( dir_name, obj=None, load_design_info=True)` to solve the following problem:
Loads the pickled files which are pickled by `~greykite.framework.templates.pickle_utils.dump_obj`. Originally intended for loading the `~greykite.framework.pipeline.pipeline.ForecastResult` instance, but could potentially used for other objects. Parameters ---------- dir_name : `str` The directory that stores the pickled files. Must be the top level dir when having nested pickling results. obj : `object`, default None The object type for the next-level files. Can be one of "list", "tuple", "dict", "ordered_dict" or a class. load_design_info : `bool`, default True Whether to load the design info in `ForecastResult`. The design info is specifically for Silverkite and can be accessed from - ForecastResult.model[-1].model_dict["x_design_info"] - ForecastResult.forecast.estimator.model_dict["x_design_info"] - ForecastResult.backtest.estimator.model_dict["x_design_info"] The design info is a class from `patsy` and contains a significant amount of instances that can not be pickled directly. Recursively loading them takes longer to run. If speed is important and you don't need these information, you can turn it off. Returns ------- result : `object` The loaded object from the pickled files.
Here is the function:
def load_obj(
dir_name,
obj=None,
load_design_info=True):
"""Loads the pickled files which are pickled by
`~greykite.framework.templates.pickle_utils.dump_obj`.
Originally intended for loading the
`~greykite.framework.pipeline.pipeline.ForecastResult` instance,
but could potentially used for other objects.
Parameters
----------
dir_name : `str`
The directory that stores the pickled files.
Must be the top level dir when having nested pickling results.
obj : `object`, default None
The object type for the next-level files.
Can be one of "list", "tuple", "dict", "ordered_dict" or a class.
load_design_info : `bool`, default True
Whether to load the design info in `ForecastResult`.
The design info is specifically for Silverkite and can be accessed from
- ForecastResult.model[-1].model_dict["x_design_info"]
- ForecastResult.forecast.estimator.model_dict["x_design_info"]
- ForecastResult.backtest.estimator.model_dict["x_design_info"]
The design info is a class from `patsy` and contains a significant amount of
instances that can not be pickled directly. Recursively loading them takes
longer to run. If speed is important and you don't need these information,
you can turn it off.
Returns
-------
result : `object`
The loaded object from the pickled files.
"""
# Checks if to load design info.
if (not load_design_info) and (isinstance(obj, type) and obj == DesignInfo):
return None
# Gets file names in the level.
files = os.listdir(dir_name)
if not files:
raise ValueError("dir is empty!")
# Gets the type files if any.
# Stores in a dictionary with key being the name and value being the loaded value.
obj_types = {file.split(".")[0]: dill.load(open(os.path.join(dir_name, file), "rb"))
for file in files if ".type" in file}
# Gets directories and pickled files.
# Every type must have a directory with the same name.
directories = [file for file in files if os.path.isdir(os.path.join(dir_name, file))]
if not all([directory in obj_types for directory in directories]):
raise ValueError("type and directories do not match.")
pickles = [file for file in files if ".pkl" in file]
# Starts loading objects
if obj is None:
# obj is None indicates this is the top level directory.
# This directory can either have 1 .pkl file, or 1 .type file associated with the directory of same name.
if not obj_types:
# The only 1 .pkl file case.
if len(files) > 1:
raise ValueError("Multiple elements found in top level.")
return dill.load(open(os.path.join(dir_name, files[0]), "rb"))
else:
# The .type + dir case.
if len(obj_types) > 1:
raise ValueError("Multiple elements found in top level")
obj_name = list(obj_types.keys())[0]
obj_type = obj_types[obj_name]
return load_obj(
os.path.join(dir_name, obj_name),
obj_type,
load_design_info=load_design_info)
else:
# If obj is not None, does recursive loading depending on the obj type.
if obj in ("list", "tuple"):
# Object is list or tuple.
# Fetches each element according to the number index to preserve orders.
result = []
# Order index is a number appended to the front.
elements = sorted(
pickles + directories,
key=lambda x: int(x.split("_")[0]))
# Recursively loads elements.
for element in elements:
if ".pkl" in element:
result.append(
dill.load(open(os.path.join(dir_name, element), "rb")))
else:
result.append(
load_obj(
os.path.join(dir_name, element),
obj_types[element],
load_design_info=load_design_info))
if obj == "tuple":
result = tuple(result)
return result
elif obj == "dict":
# Object is a dictionary.
# Fetches keys and values recursively.
result = {}
elements = pickles + directories
keys = [element for element in elements if "__key__" in element]
values = [element for element in elements if "__value__" in element]
# Iterates through keys and finds the corresponding values.
for element in keys:
if ".pkl" in element:
key = dill.load(
open(os.path.join(dir_name, element), "rb"))
else:
key = load_obj(
os.path.join(dir_name, element),
obj_types[element],
load_design_info=load_design_info)
# Value name could be either with .pkl or a directory.
value_name = element.replace("__key__", "__value__")
if ".pkl" in value_name:
value_name_alt = value_name.replace(".pkl", "")
else:
value_name_alt = value_name + ".pkl"
# Checks if value name is in the dir.
if (value_name not in values) and (value_name_alt not in values):
raise FileNotFoundError(f"Value not found for key {key}.")
value_name = value_name if value_name in values else value_name_alt
# Gets the value.
if ".pkl" in value_name:
value = dill.load(
open(os.path.join(dir_name, value_name), "rb"))
else:
value = load_obj(
os.path.join(dir_name, value_name),
obj_types[value_name],
load_design_info=load_design_info)
# Sets the key, value pair.
result[key] = value
return result
elif obj == "ordered_dict":
# Object is OrderedDict.
# Fetches keys and values according to the number index to preserve orders.
result = OrderedDict()
# Order index is a number appended to the front.
elements = sorted(pickles + directories, key=lambda x: int(x.split("_")[0]))
keys = [element for element in elements if "__key__" in element]
values = [element for element in elements if "__value__" in element]
# Iterates through keys and finds the corresponding values.
for element in keys:
if ".pkl" in element:
key = dill.load(
open(os.path.join(dir_name, element), "rb"))
else:
key = load_obj(
os.path.join(dir_name, element),
obj_types[element],
load_design_info=load_design_info)
value_name = element.replace("__key__", "__value__")
# Value name could be either with .pkl or a directory.
if ".pkl" in value_name:
value_name_alt = value_name.replace(".pkl", "")
else:
value_name_alt = value_name + ".pkl"
# Checks if value name is in the dir.
if (value_name not in values) and (value_name_alt not in values):
raise FileNotFoundError(f"Value not found for key {key}.")
value_name = value_name if value_name in values else value_name_alt
# Gets the value.
if ".pkl" in value_name:
value = dill.load(
open(os.path.join(dir_name, value_name), "rb"))
else:
value = load_obj(
os.path.join(dir_name, value_name),
obj_types[value_name],
load_design_info=load_design_info)
# Sets the key, value pair.
result[key] = value
return result
elif inspect.isclass(obj):
# Object is a class instance.
# Creates the class instance and sets the attributes.
# Some class has required args during initialization,
# these args are pulled from attributes.
init_params = list(inspect.signature(obj.__init__).parameters) # init args
elements = pickles + directories
# Gets the attribute names and their values in a dictionary.
values = {}
for element in elements:
if ".pkl" in element:
values[element.split(".")[0]] = dill.load(
open(os.path.join(dir_name, element), "rb"))
else:
values[element] = load_obj(
os.path.join(dir_name, element),
obj_types[element],
load_design_info=load_design_info)
# Gets the init args from values.
init_dict = {key: value for key, value in values.items()
if key in init_params}
# Some attributes has a "_" at the beginning.
init_dict.update({key[1:]: value for key, value in values.items()
if (key[1:] in init_params and key[0] == "_")})
# ``design_info`` does not have column_names attribute,
# which is required during init.
# The column_names param is pulled from the column_name_indexes attribute.
# This can be omitted once we allow dumping @property attributes.
if "column_names" in init_params:
init_dict["column_names"] = values["column_name_indexes"].keys()
# Creates the instance.
result = obj(**init_dict)
# Sets the attributes.
for key, value in values.items():
setattr(result, key, value)
return result
else:
# Raises an error if the object is not recognized.
# This typically does not happen when the source file is dumped
# with the `dump_obj` function.
raise ValueError(f"Object {obj} is not recognized.") | Loads the pickled files which are pickled by `~greykite.framework.templates.pickle_utils.dump_obj`. Originally intended for loading the `~greykite.framework.pipeline.pipeline.ForecastResult` instance, but could potentially used for other objects. Parameters ---------- dir_name : `str` The directory that stores the pickled files. Must be the top level dir when having nested pickling results. obj : `object`, default None The object type for the next-level files. Can be one of "list", "tuple", "dict", "ordered_dict" or a class. load_design_info : `bool`, default True Whether to load the design info in `ForecastResult`. The design info is specifically for Silverkite and can be accessed from - ForecastResult.model[-1].model_dict["x_design_info"] - ForecastResult.forecast.estimator.model_dict["x_design_info"] - ForecastResult.backtest.estimator.model_dict["x_design_info"] The design info is a class from `patsy` and contains a significant amount of instances that can not be pickled directly. Recursively loading them takes longer to run. If speed is important and you don't need these information, you can turn it off. Returns ------- result : `object` The loaded object from the pickled files. |
167,486 | import json
from dataclasses import dataclass
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Type
from typing import TypeVar
from typing import Union
from typing import cast
from greykite.common.python_utils import assert_equal
def from_int(x: Any) -> int:
assert isinstance(x, int) and not isinstance(x, bool)
return x | null |
167,487 | import json
from dataclasses import dataclass
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Type
from typing import TypeVar
from typing import Union
from typing import cast
from greykite.common.python_utils import assert_equal
def from_none(x: Any) -> Any:
assert x is None
return x | null |
167,488 | import json
from dataclasses import dataclass
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Type
from typing import TypeVar
from typing import Union
from typing import cast
from greykite.common.python_utils import assert_equal
def from_union(fs, x):
for f in fs:
try:
return f(x)
except:
pass
assert False | null |
167,489 | import json
from dataclasses import dataclass
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Type
from typing import TypeVar
from typing import Union
from typing import cast
from greykite.common.python_utils import assert_equal
T = TypeVar("T")
The provided code snippet includes necessary dependencies for implementing the `from_list_dict` function. Write a Python function `def from_list_dict(f: Callable[[Any], T], x: Any) -> List[Dict[str, T]]` to solve the following problem:
Parses list of dictionaries, applying `f` to the dictionary values. All items must be dictionaries.
Here is the function:
def from_list_dict(f: Callable[[Any], T], x: Any) -> List[Dict[str, T]]:
"""Parses list of dictionaries, applying `f` to the dictionary values.
All items must be dictionaries.
"""
assert isinstance(x, list)
assert all(isinstance(d, dict) for d in x)
return [ { k: f(v) for (k, v) in d.items() } for d in x] | Parses list of dictionaries, applying `f` to the dictionary values. All items must be dictionaries. |
167,490 | import json
from dataclasses import dataclass
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Type
from typing import TypeVar
from typing import Union
from typing import cast
from greykite.common.python_utils import assert_equal
T = TypeVar("T")
The provided code snippet includes necessary dependencies for implementing the `from_list_dict_or_none` function. Write a Python function `def from_list_dict_or_none(f: Callable[[Any], T], x: Any) -> List[Optional[Dict[str, T]]]` to solve the following problem:
Parses list of dictionaries or None elements, applying `f` to the dictionary values. If an element in the list is None, it is returned directly.
Here is the function:
def from_list_dict_or_none(f: Callable[[Any], T], x: Any) -> List[Optional[Dict[str, T]]]:
"""Parses list of dictionaries or None elements, applying `f` to the dictionary values.
If an element in the list is None, it is returned directly.
"""
assert isinstance(x, list)
assert all(d is None or isinstance(d, dict)for d in x)
return [ { k: f(v) for (k, v) in d.items() } if isinstance(d, dict) else d for d in x] | Parses list of dictionaries or None elements, applying `f` to the dictionary values. If an element in the list is None, it is returned directly. |
167,491 | import json
from dataclasses import dataclass
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Type
from typing import TypeVar
from typing import Union
from typing import cast
from greykite.common.python_utils import assert_equal
def from_str(x: Any) -> str:
assert isinstance(x, str)
return x | null |
167,492 | import json
from dataclasses import dataclass
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Type
from typing import TypeVar
from typing import Union
from typing import cast
from greykite.common.python_utils import assert_equal
def from_list_int(x: Any) -> List[int]:
assert isinstance(x, list)
assert all(isinstance(item, int) for item in x)
return x | null |
167,493 | import json
from dataclasses import dataclass
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Type
from typing import TypeVar
from typing import Union
from typing import cast
from greykite.common.python_utils import assert_equal
def from_float(x: Any) -> float:
assert isinstance(x, (float, int)) and not isinstance(x, bool)
return float(x) | null |
167,494 | import json
from dataclasses import dataclass
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Type
from typing import TypeVar
from typing import Union
from typing import cast
from greykite.common.python_utils import assert_equal
def to_float(x: Any) -> float:
assert isinstance(x, float)
return x | null |
167,495 | import json
from dataclasses import dataclass
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Type
from typing import TypeVar
from typing import Union
from typing import cast
from greykite.common.python_utils import assert_equal
def from_bool(x: Any) -> bool:
assert isinstance(x, bool)
return x | null |
167,496 | import json
from dataclasses import dataclass
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Type
from typing import TypeVar
from typing import Union
from typing import cast
from greykite.common.python_utils import assert_equal
The provided code snippet includes necessary dependencies for implementing the `from_list_float` function. Write a Python function `def from_list_float(x: Any) -> List[float]` to solve the following problem:
Parses a list of floats
Here is the function:
def from_list_float(x: Any) -> List[float]:
"""Parses a list of floats"""
assert isinstance(x, list)
assert all(isinstance(item, (float, int)) and not isinstance(item, bool) for item in x)
return x | Parses a list of floats |
167,497 | import json
from dataclasses import dataclass
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Type
from typing import TypeVar
from typing import Union
from typing import cast
from greykite.common.python_utils import assert_equal
def from_list_str(x: Any) -> List[str]:
assert isinstance(x, list)
assert all(isinstance(item, str) for item in x)
return x
The provided code snippet includes necessary dependencies for implementing the `from_list_list_str` function. Write a Python function `def from_list_list_str(x: Any) -> List[List[str]]` to solve the following problem:
Parses a list that contains lists of strings
Here is the function:
def from_list_list_str(x: Any) -> List[List[str]]:
"""Parses a list that contains lists of strings"""
assert isinstance(x, list)
assert all(from_list_str(item) for item in x)
return x | Parses a list that contains lists of strings |
167,498 | import json
from dataclasses import dataclass
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Type
from typing import TypeVar
from typing import Union
from typing import cast
from greykite.common.python_utils import assert_equal
def from_dict(f: Callable[[Any], T], x: Any) -> Dict[str, T]:
class ForecastConfig:
def from_dict(obj: Any) -> 'ForecastConfig':
def to_dict(self) -> dict:
def from_json(obj: Any) -> 'ForecastConfig':
def forecast_config_from_dict(s: Any) -> ForecastConfig:
return ForecastConfig.from_dict(s) | null |
167,499 | import json
from dataclasses import dataclass
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Type
from typing import TypeVar
from typing import Union
from typing import cast
from greykite.common.python_utils import assert_equal
def to_class(c: Type[T], x: Any) -> dict:
assert isinstance(x, c)
return cast(Any, x).to_dict()
class ForecastConfig:
"""Config for providing parameters to the Forecast library"""
computation_param: Optional[ComputationParam] = None
"""How to compute the result. See
:class:`~greykite.framework.templates.autogen.forecast_config.ComputationParam`.
"""
coverage: Optional[float] = None
"""Intended coverage of the prediction bands (0.0 to 1.0).
If None, the upper/lower predictions are not returned.
"""
evaluation_metric_param: Optional[EvaluationMetricParam] = None
"""What metrics to evaluate. See
:class:`~greykite.framework.templates.autogen.forecast_config.EvaluationMetricParam`.
"""
evaluation_period_param: Optional[EvaluationPeriodParam] = None
"""How to split data for evaluation. See
:class:`~greykite.framework.templates.autogen.forecast_config.EvaluationPeriodParam`.
"""
forecast_horizon: Optional[int] = None
"""Number of periods to forecast into the future. Must be > 0.
If None, default is determined from input data frequency.
"""
forecast_one_by_one: Optional[Union[bool, int, List[int]]] = None
"""The options to activate the forecast one-by-one algorithm.
See :class:`~greykite.sklearn.estimator.one_by_one_estimator.OneByOneEstimator`.
Can be boolean, int, of list of int.
If int, it has to be less than or equal to the forecast horizon.
If list of int, the sum has to be the forecast horizon.
"""
metadata_param: Optional[MetadataParam] = None
"""Information about the input data. See
:class:`~greykite.framework.templates.autogen.forecast_config.MetadataParam`.
"""
model_components_param: Optional[Union[ModelComponentsParam, List[Optional[ModelComponentsParam]]]] = None
"""Parameters to tune the model. Typically a single ModelComponentsParam, but the `SimpleSilverkiteTemplate`
template also allows a list of ModelComponentsParam for grid search. A single ModelComponentsParam
corresponds to one grid, and a list corresponds to a list of grids.
See :class:`~greykite.framework.templates.autogen.forecast_config.ModelComponentsParam`.
"""
model_template: Optional[Union[str, dataclass, List[Union[str, dataclass]]]] = None
"""Name of the model template. Typically a single string, but the `SimpleSilverkiteTemplate`
template also allows a list of string for grid search.
See :class:`~greykite.framework.templates.model_templates.ModelTemplateEnum`
for valid names.
"""
def from_dict(obj: Any) -> 'ForecastConfig':
assert isinstance(obj, dict)
computation_param = from_union([ComputationParam.from_dict, from_none], obj.get("computation_param"))
coverage = from_union([from_float, from_none], obj.get("coverage"))
evaluation_metric_param = from_union([EvaluationMetricParam.from_dict, from_none], obj.get("evaluation_metric_param"))
evaluation_period_param = from_union([EvaluationPeriodParam.from_dict, from_none], obj.get("evaluation_period_param"))
forecast_horizon = from_union([from_int, from_none], obj.get("forecast_horizon"))
forecast_one_by_one = from_union([from_int, from_bool, from_none, from_list_int], obj.get("forecast_one_by_one"))
metadata_param = from_union([MetadataParam.from_dict, from_none], obj.get("metadata_param"))
if not isinstance(obj.get("model_components_param"), list):
model_components_param = from_union([ModelComponentsParam.from_dict, from_none], obj.get("model_components_param"))
else:
model_components_param = [from_union([ModelComponentsParam.from_dict, from_none], mcp) for mcp in obj.get("model_components_param")]
if not isinstance(obj.get("model_template"), list):
model_template = from_union([from_str, from_none], obj.get("model_template"))
else:
model_template = [from_union([from_str, from_none], mt) for mt in obj.get("model_template")]
return ForecastConfig(
computation_param=computation_param,
coverage=coverage,
evaluation_metric_param=evaluation_metric_param,
evaluation_period_param=evaluation_period_param,
forecast_horizon=forecast_horizon,
forecast_one_by_one=forecast_one_by_one,
metadata_param=metadata_param,
model_components_param=model_components_param,
model_template=model_template)
def to_dict(self) -> dict:
result: dict = {}
result["computation_param"] = from_union([lambda x: to_class(ComputationParam, x), from_none], self.computation_param)
result["coverage"] = from_union([to_float, from_none], self.coverage)
result["evaluation_metric_param"] = from_union([lambda x: to_class(EvaluationMetricParam, x), from_none], self.evaluation_metric_param)
result["evaluation_period_param"] = from_union([lambda x: to_class(EvaluationPeriodParam, x), from_none], self.evaluation_period_param)
result["forecast_horizon"] = from_union([from_int, from_none], self.forecast_horizon)
result["forecast_one_by_one"] = from_union([from_int, from_bool, from_none, from_list_int], self.forecast_one_by_one)
result["metadata_param"] = from_union([lambda x: to_class(MetadataParam, x), from_none], self.metadata_param)
if not isinstance(self.model_components_param, list):
self.model_components_param = [self.model_components_param]
result["model_components_param"] = [from_union([lambda x: to_class(ModelComponentsParam, x), from_none], mcp) for mcp in self.model_components_param]
if not isinstance(self.model_template, list):
self.model_template = [self.model_template]
result["model_template"] = [from_union([from_str, from_none], mt) for mt in self.model_template]
return result
def from_json(obj: Any) -> 'ForecastConfig':
"""Converts a json string to the corresponding instance of the `ForecastConfig` class.
Raises ValueError if the input is not a json string.
"""
try:
forecast_dict = json.loads(obj)
except Exception:
raise ValueError(f"The input ({obj}) is not a json string.")
return ForecastConfig.from_dict(forecast_dict)
def forecast_config_to_dict(x: ForecastConfig) -> Any:
return to_class(ForecastConfig, x) | null |
167,500 | import json
from dataclasses import dataclass
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Type
from typing import TypeVar
from typing import Union
from typing import cast
from greykite.common.python_utils import assert_equal
class ForecastConfig:
"""Config for providing parameters to the Forecast library"""
computation_param: Optional[ComputationParam] = None
"""How to compute the result. See
:class:`~greykite.framework.templates.autogen.forecast_config.ComputationParam`.
"""
coverage: Optional[float] = None
"""Intended coverage of the prediction bands (0.0 to 1.0).
If None, the upper/lower predictions are not returned.
"""
evaluation_metric_param: Optional[EvaluationMetricParam] = None
"""What metrics to evaluate. See
:class:`~greykite.framework.templates.autogen.forecast_config.EvaluationMetricParam`.
"""
evaluation_period_param: Optional[EvaluationPeriodParam] = None
"""How to split data for evaluation. See
:class:`~greykite.framework.templates.autogen.forecast_config.EvaluationPeriodParam`.
"""
forecast_horizon: Optional[int] = None
"""Number of periods to forecast into the future. Must be > 0.
If None, default is determined from input data frequency.
"""
forecast_one_by_one: Optional[Union[bool, int, List[int]]] = None
"""The options to activate the forecast one-by-one algorithm.
See :class:`~greykite.sklearn.estimator.one_by_one_estimator.OneByOneEstimator`.
Can be boolean, int, of list of int.
If int, it has to be less than or equal to the forecast horizon.
If list of int, the sum has to be the forecast horizon.
"""
metadata_param: Optional[MetadataParam] = None
"""Information about the input data. See
:class:`~greykite.framework.templates.autogen.forecast_config.MetadataParam`.
"""
model_components_param: Optional[Union[ModelComponentsParam, List[Optional[ModelComponentsParam]]]] = None
"""Parameters to tune the model. Typically a single ModelComponentsParam, but the `SimpleSilverkiteTemplate`
template also allows a list of ModelComponentsParam for grid search. A single ModelComponentsParam
corresponds to one grid, and a list corresponds to a list of grids.
See :class:`~greykite.framework.templates.autogen.forecast_config.ModelComponentsParam`.
"""
model_template: Optional[Union[str, dataclass, List[Union[str, dataclass]]]] = None
"""Name of the model template. Typically a single string, but the `SimpleSilverkiteTemplate`
template also allows a list of string for grid search.
See :class:`~greykite.framework.templates.model_templates.ModelTemplateEnum`
for valid names.
"""
def from_dict(obj: Any) -> 'ForecastConfig':
assert isinstance(obj, dict)
computation_param = from_union([ComputationParam.from_dict, from_none], obj.get("computation_param"))
coverage = from_union([from_float, from_none], obj.get("coverage"))
evaluation_metric_param = from_union([EvaluationMetricParam.from_dict, from_none], obj.get("evaluation_metric_param"))
evaluation_period_param = from_union([EvaluationPeriodParam.from_dict, from_none], obj.get("evaluation_period_param"))
forecast_horizon = from_union([from_int, from_none], obj.get("forecast_horizon"))
forecast_one_by_one = from_union([from_int, from_bool, from_none, from_list_int], obj.get("forecast_one_by_one"))
metadata_param = from_union([MetadataParam.from_dict, from_none], obj.get("metadata_param"))
if not isinstance(obj.get("model_components_param"), list):
model_components_param = from_union([ModelComponentsParam.from_dict, from_none], obj.get("model_components_param"))
else:
model_components_param = [from_union([ModelComponentsParam.from_dict, from_none], mcp) for mcp in obj.get("model_components_param")]
if not isinstance(obj.get("model_template"), list):
model_template = from_union([from_str, from_none], obj.get("model_template"))
else:
model_template = [from_union([from_str, from_none], mt) for mt in obj.get("model_template")]
return ForecastConfig(
computation_param=computation_param,
coverage=coverage,
evaluation_metric_param=evaluation_metric_param,
evaluation_period_param=evaluation_period_param,
forecast_horizon=forecast_horizon,
forecast_one_by_one=forecast_one_by_one,
metadata_param=metadata_param,
model_components_param=model_components_param,
model_template=model_template)
def to_dict(self) -> dict:
result: dict = {}
result["computation_param"] = from_union([lambda x: to_class(ComputationParam, x), from_none], self.computation_param)
result["coverage"] = from_union([to_float, from_none], self.coverage)
result["evaluation_metric_param"] = from_union([lambda x: to_class(EvaluationMetricParam, x), from_none], self.evaluation_metric_param)
result["evaluation_period_param"] = from_union([lambda x: to_class(EvaluationPeriodParam, x), from_none], self.evaluation_period_param)
result["forecast_horizon"] = from_union([from_int, from_none], self.forecast_horizon)
result["forecast_one_by_one"] = from_union([from_int, from_bool, from_none, from_list_int], self.forecast_one_by_one)
result["metadata_param"] = from_union([lambda x: to_class(MetadataParam, x), from_none], self.metadata_param)
if not isinstance(self.model_components_param, list):
self.model_components_param = [self.model_components_param]
result["model_components_param"] = [from_union([lambda x: to_class(ModelComponentsParam, x), from_none], mcp) for mcp in self.model_components_param]
if not isinstance(self.model_template, list):
self.model_template = [self.model_template]
result["model_template"] = [from_union([from_str, from_none], mt) for mt in self.model_template]
return result
def from_json(obj: Any) -> 'ForecastConfig':
"""Converts a json string to the corresponding instance of the `ForecastConfig` class.
Raises ValueError if the input is not a json string.
"""
try:
forecast_dict = json.loads(obj)
except Exception:
raise ValueError(f"The input ({obj}) is not a json string.")
return ForecastConfig.from_dict(forecast_dict)
def assert_equal(
actual,
expected,
ignore_list_order=False,
rel=1e-5,
dict_path="",
ignore_keys=None,
**kwargs):
"""Generic equality function that raises an ``AssertionError`` if the objects are not equal.
Notes
-----
Works with pandas.DataFrame, pandas.Series, numpy.ndarray, str, int, float,
bool, None, or a dictionary or list of such items, with arbitrary nesting
of dictionaries and lists.
Does not check equivalence of functions, or work with nested numpy arrays.
Parameters
----------
actual : `pandas.DataFrame`, `pandas.Series`, `numpy.array`, `str`, `int`,
`float`, `bool`, `None`, or a dictionary or list of such items
Actual value.
expected : `pandas.DataFrame`, `pandas.Series`, `numpy.array`, `str`, `int`,
`float`, `bool`, `None`, or a dictionary or list of such items
Expected value to compare against.
ignore_list_order : `bool`, optional, default False
If True, lists are considered equal if they contain the same elements. This option is valid
only if the list can be sorted (all elements can be compared to each other).
If False, lists are considered equal if they contain the same elements in the same order.
rel : `float`, optional, default 1e-5
To check int and float, passed to ``rel`` argument of `pytest.approx`.
To check numpy arrays, passed to ``rtol`` argument of
`numpy.testing.assert_allclose`.
To check pandas dataframe, series, and index, passed to ``rtol`` argument of
`pandas.testing.assert_frame_equal`, `pandas.testing.assert_series_equal`,
`pandas.testing.assert_index_equal`.
dict_path : `str`, optional, default ""
Location within nested dictionary of the original call to this function.
User should not set this parameter.
ignore_keys : `dict`, optional, default None
Keys to ignore in equality comparison. This only applies if
`expected` is a dictionary.
Does not compare the values of these keys. However,
still returns false if the key is not present.
Can be a nested dictionary. Terminal keys are those whose values should
not be compared.
If the expected value is an nested dictionary, this dictionary can
also be nested, with the same structure.
For example, if expected:
expected = {
"k1": {
"k1": 1,
"k2": [1, 2, 3],
}
"k2": {
"k1": "abc"
}
}
Then the following ``ignore_keys`` will ignore
dict["k1"]["k1"] and dict["k2"]["k1"] in the comparison.
ignore_keys = {
"k1": {
"k1": False # The value can be anything, the keys determine what's ignored
},
"k2": {
"k1": "skip" # The value can be anything, the keys determine what's ignored
}
}
This ``ignore_keys`` will ignore
dict["k1"] and dict["k2"]["k1"] in the comparison. "k1" is
ignored entirely because its value is not a dictionary.
ignore_keys = {
"k1": None # The value can be anything, the keys determine what's ignored
"k2": {
"k1": None
}
}
kwargs : keyword args, optional
Keyword args to pass to `pandas.testing.assert_frame_equal`,
`pandas.testing.assert_series_equal`.
Raises
------
AssertionError
If actual does not match expected.
"""
# a message to add to all error messages
location = f"dictionary location: {dict_path}"
message = "" if dict_path == "" else f"Error at {location}.\n"
if expected is None:
if actual is not None:
raise AssertionError(f"{message}Actual should be None, found {actual}.")
elif isinstance(expected, pd.DataFrame):
if not isinstance(actual, pd.DataFrame):
raise AssertionError(f"{message}Actual should be a pandas DataFrame, found {actual}.")
# leverages pandas assert function and add `message` to the error
try:
assert_frame_equal(
actual,
expected,
rtol=rel,
**kwargs)
except AssertionError as e:
import sys
raise type(e)(f"{e}{message}").with_traceback(sys.exc_info()[2])
elif isinstance(expected, pd.Series):
if not isinstance(actual, pd.Series):
raise AssertionError(f"{message}Actual should be a pandas Series, found {actual}.")
try:
assert_series_equal(
actual,
expected,
rtol=rel,
**kwargs)
except AssertionError as e:
import sys
raise type(e)(f"{message}{e}").with_traceback(sys.exc_info()[2])
elif isinstance(expected, pd.Index):
if not isinstance(actual, pd.Index):
raise AssertionError(f"{message}Actual should be a pandas Index, found {actual}.")
try:
assert_index_equal(
actual,
expected,
rtol=rel,
**kwargs)
except AssertionError as e:
import sys
raise type(e)(f"{message}{e}").with_traceback(sys.exc_info()[2])
elif isinstance(expected, np.ndarray):
if not isinstance(actual, np.ndarray):
raise AssertionError(f"{message}Actual should be a numpy array, found {actual}.")
np.testing.assert_allclose(
actual,
expected,
rtol=rel,
err_msg=message)
elif isinstance(expected, (list, tuple)):
if not isinstance(actual, (list, tuple)):
raise AssertionError(f"{message}Actual should be a list or tuple, found {actual}.")
if not len(actual) == len(expected):
raise AssertionError(f"{message}Lists have different length. "
f"Actual: {actual}. Expected: {expected}.")
if ignore_list_order:
# order doesn't matter
actual = sorted(actual)
expected = sorted(expected)
# element-wise comparison
if ignore_keys is not None:
warnings.warn(f"At {location}. `ignore_keys` is {ignore_keys}, but found a list. "
f"No keys will be ignored")
for (item_actual, item_expected) in zip(actual, expected):
assert_equal(
item_actual,
item_expected,
ignore_list_order=ignore_list_order,
rel=rel,
dict_path=dict_path,
ignore_keys=None,
**kwargs)
elif isinstance(expected, dict):
# dictionaries are equal if their keys and values are equal
if not isinstance(actual, dict):
raise AssertionError(f"{message}Actual should be a dict, found {actual}.")
# checks the keys
if not actual.keys() == expected.keys():
raise AssertionError(f"{message}Dict keys do not match. "
f"Actual: {actual.keys()}. Expected: {expected.keys()}.")
# check the next level of nesting, if not ignored by `ignore_keys`
for k, expected_item in expected.items():
if ignore_keys is not None and k in ignore_keys.keys():
if isinstance(ignore_keys[k], dict):
# specific keys within the value are ignored
new_ignore_keys = ignore_keys[k]
else:
# the entire value is ignored
continue
else:
# the key is not ignored, so its value should be fully compared
new_ignore_keys = None
# appends the key to the path
new_path = f"{dict_path}['{k}']" if dict_path != "" else f"dict['{k}']"
assert_equal(
actual[k],
expected_item,
ignore_list_order=ignore_list_order,
rel=rel,
dict_path=new_path,
ignore_keys=new_ignore_keys,
**kwargs)
elif isinstance(expected, (int, float)):
if not isinstance(actual, (int, float)):
raise AssertionError(f"{message}Actual should be numeric, found {actual}.")
if not math.isclose(actual, expected, rel_tol=rel, abs_tol=0.0):
raise AssertionError(f"{message}Actual does not match expected. "
f"Actual: {actual}. Expected: {expected}.")
else:
if actual != expected:
raise AssertionError(f"{message}Actual does not match expected. "
f"Actual: {actual}. Expected: {expected}.")
The provided code snippet includes necessary dependencies for implementing the `assert_equal_forecast_config` function. Write a Python function `def assert_equal_forecast_config( forecast_config_1: ForecastConfig, forecast_config_2: ForecastConfig)` to solve the following problem:
Asserts equality between two instances of `ForecastConfig`. Raises an error in case of parameter mismatch. Parameters ---------- forecast_config_1: `ForecastConfig` First instance of the :class:`~greykite.framework.templates.model_templates.ForecastConfig` for comparing. forecast_config_2: `ForecastConfig` Second instance of the :class:`~greykite.framework.templates.model_templates.ForecastConfig` for comparing. Raises ------- AssertionError If `ForecastConfig`s do not match, else returns None.
Here is the function:
def assert_equal_forecast_config(
forecast_config_1: ForecastConfig,
forecast_config_2: ForecastConfig):
"""Asserts equality between two instances of `ForecastConfig`.
Raises an error in case of parameter mismatch.
Parameters
----------
forecast_config_1: `ForecastConfig`
First instance of the
:class:`~greykite.framework.templates.model_templates.ForecastConfig` for comparing.
forecast_config_2: `ForecastConfig`
Second instance of the
:class:`~greykite.framework.templates.model_templates.ForecastConfig` for comparing.
Raises
-------
AssertionError
If `ForecastConfig`s do not match, else returns None.
"""
if not isinstance(forecast_config_1, ForecastConfig):
raise ValueError(f"The input ({forecast_config_1}) is not a member of 'ForecastConfig' class.")
if not isinstance(forecast_config_2, ForecastConfig):
raise ValueError(f"The input ({forecast_config_2}) is not a member of 'ForecastConfig' class.")
assert_equal(forecast_config_1.to_dict(), forecast_config_2.to_dict()) | Asserts equality between two instances of `ForecastConfig`. Raises an error in case of parameter mismatch. Parameters ---------- forecast_config_1: `ForecastConfig` First instance of the :class:`~greykite.framework.templates.model_templates.ForecastConfig` for comparing. forecast_config_2: `ForecastConfig` Second instance of the :class:`~greykite.framework.templates.model_templates.ForecastConfig` for comparing. Raises ------- AssertionError If `ForecastConfig`s do not match, else returns None. |
167,501 | from typing import Optional
import pandas as pd
from greykite.common.logging import LoggingLevelEnum
from greykite.common.logging import log_message
from greykite.common.time_properties import infer_freq
from greykite.common.time_properties import min_gap_in_seconds
from greykite.framework.pipeline.utils import get_default_time_parameters
from greykite.framework.templates.autogen.forecast_config import ForecastConfig
from greykite.framework.templates.forecast_config_defaults import ForecastConfigDefaults
from greykite.framework.templates.model_templates import ModelTemplateEnum
from greykite.framework.templates.simple_silverkite_template_config import SILVERKITE_DAILY_90
from greykite.framework.templates.simple_silverkite_template_config import SILVERKITE_HOURLY_1
from greykite.framework.templates.simple_silverkite_template_config import SILVERKITE_HOURLY_24
from greykite.framework.templates.simple_silverkite_template_config import SILVERKITE_HOURLY_168
from greykite.framework.templates.simple_silverkite_template_config import SILVERKITE_HOURLY_336
from greykite.framework.templates.simple_silverkite_template_config import SILVERKITE_WEEKLY
from greykite.sklearn.cross_validation import RollingTimeSeriesSplit
class LoggingLevelEnum(Enum):
"""Valid types of logging levels available to use."""
CRITICAL = 50
ERROR = 40
WARNING = 30
INFO = 20
DEBUG = 10
NOTSET = 0
def log_message(message, level=LoggingLevelEnum.INFO):
"""Adds a message to logger.
Parameters
----------
message : `any`
The message to be added to logger.
level : `Enum`
One of the levels in the `~greykite.common.enums.LoggingLevelEnum`.
"""
if level.name not in list(LoggingLevelEnum.__members__):
raise ValueError(f"{level} not found, it must be a member of the LoggingLevelEnum class.")
logger.log(level.value, message)
def min_gap_in_seconds(df, time_col):
"""Returns the smallest gap between observations in df[time_col].
Assumes df[time_col] is sorted in ascending order without duplicates.
:param df: pd.DataFrame
input timeseries
:param time_col: str
time column name in `df`
:return: float
minimum gap between observations, in seconds
"""
if df.shape[0] < 2:
raise ValueError(f"Must provide at least two data points. Found {df.shape[0]}.")
timestamps = pd.to_datetime(df[time_col])
period = (timestamps - timestamps.shift()).min()
return period.days*24*3600 + period.seconds
def infer_freq(
df,
time_col=TIME_COL,
window_size=20):
"""Infers frequency of the timestamps provided in the ``time_col`` of ``df``.
Notes
-----
If the timeseries does not have any missing values the
``pandas.infer_freq`` can correctly infer any valid frequency with 20 datapoints.
Valid frequencies are listed here:
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases.
Parameters
----------
df : `pandas.DataFrame`
Dataframe with column ``time_col``
time_col: `str` or None, default TIME_COL
Time column name
window_size: `int` or None, default 20
Window size to subset ``df`` at each iteration
Returns
-------
freq : `str`
Inferred frequency of the timestamps provided in the ``time_col`` of ``df``.
"""
df = df.copy()
df = df[df[time_col].notna()]
df[time_col] = pd.to_datetime(df[time_col])
freq = pd.infer_freq(df[time_col])
if freq is None:
start_index = 0
end_index = start_index + window_size
while end_index <= df.shape[0]:
df_temp = df.iloc[start_index:end_index]
freq = pd.infer_freq(df_temp[time_col])
if freq is not None:
break
start_index = end_index
end_index = start_index + window_size
return freq
def get_default_time_parameters(
period,
num_observations,
forecast_horizon=None,
test_horizon=None,
periods_between_train_test=None,
cv_horizon=None,
cv_min_train_periods=None,
cv_expanding_window=False,
cv_periods_between_splits=None,
cv_periods_between_train_test=None,
cv_max_splits=3):
"""Returns default forecast horizon, backtest, and cross-validation parameters,
given the input frequency, size, and user requested values.
This function is called from the `~greykite.framework.pipeline.pipeline.forecast_pipeline`
directly, to provide suitable default to users of forecast_pipeline, and because the default
should not depend on model configuration (the template).
Parameters
----------
period: `float`
Period of each observation (i.e. average time between observations, in seconds).
num_observations: `int`
Number of observations in the input data.
forecast_horizon: `int` or None, default None
Number of periods to forecast into the future. Must be > 0.
If None, default is determined from input data frequency.
test_horizon: `int` or None, default None
Numbers of periods held back from end of df for test.
The rest is used for cross validation.
If None, default is ``forecast_horizon``. Set to 0 to skip backtest.
periods_between_train_test : `int` or None, default None
Number of periods gap between train and test in a CV split.
If None, default is 0.
cv_horizon: `int` or None, default None
Number of periods in each CV test set.
If None, default is ``forecast_horizon``. Set to 0 to skip CV.
cv_min_train_periods: `int` or None, default None
Minimum number of periods for training each CV fold.
If ``cv_expanding_window`` is False, every training period is this size.
If None, default is 2 * ``cv_horizon``.
cv_expanding_window: `bool`, default False
If True, training window for each CV split is fixed to the first available date.
Otherwise, train start date is sliding, determined by ``cv_min_train_periods``.
cv_periods_between_splits: `int` or None, default None
Number of periods to slide the test window between CV splits
If None, default is ``cv_horizon``.
cv_periods_between_train_test: `int` or None, default None
Number of periods gap between train and test in a CV split.
If None, default is ``periods_between_train_test``.
cv_max_splits: `int` or None, default 3
Maximum number of CV splits. Given the above configuration, samples up to max_splits train/test splits,
preferring splits toward the end of available data. If None, uses all splits.
Returns
-------
time_params : `dict` [`str`, `int`]
keys are parameter names, values are their default values.
"""
if forecast_horizon is None:
forecast_horizon = get_default_horizon_from_period(
period=period,
num_observations=num_observations)
forecast_horizon = get_integer(val=forecast_horizon, name="forecast_horizon", min_value=1)
test_horizon = get_integer(
val=test_horizon,
name="test_horizon",
min_value=0,
default_value=forecast_horizon)
# reduces test_horizon to default 80/20 split if there is not enough data
if test_horizon >= num_observations:
test_horizon = math.floor(num_observations * 0.2)
cv_horizon = get_integer(
val=cv_horizon,
name="cv_horizon",
min_value=0,
default_value=forecast_horizon)
# RollingTimeSeriesSplit handles the case of no CV splits, not handled in detail here
# temporary patch to avoid the case where cv_horizon==num_observations, which throws an error
# in RollingTimeSeriesSplit
if cv_horizon >= num_observations:
cv_horizon = math.floor(num_observations * 0.2)
periods_between_train_test = get_integer(
val=periods_between_train_test,
name="periods_between_train_test",
min_value=0,
default_value=0)
cv_periods_between_train_test = get_integer(
val=cv_periods_between_train_test,
name="cv_periods_between_train_test",
min_value=0,
default_value=periods_between_train_test)
return {
"forecast_horizon": forecast_horizon,
"test_horizon": test_horizon,
"periods_between_train_test": periods_between_train_test,
"cv_horizon": cv_horizon,
"cv_min_train_periods": cv_min_train_periods,
"cv_periods_between_train_test": cv_periods_between_train_test
}
class ForecastConfig:
"""Config for providing parameters to the Forecast library"""
computation_param: Optional[ComputationParam] = None
"""How to compute the result. See
:class:`~greykite.framework.templates.autogen.forecast_config.ComputationParam`.
"""
coverage: Optional[float] = None
"""Intended coverage of the prediction bands (0.0 to 1.0).
If None, the upper/lower predictions are not returned.
"""
evaluation_metric_param: Optional[EvaluationMetricParam] = None
"""What metrics to evaluate. See
:class:`~greykite.framework.templates.autogen.forecast_config.EvaluationMetricParam`.
"""
evaluation_period_param: Optional[EvaluationPeriodParam] = None
"""How to split data for evaluation. See
:class:`~greykite.framework.templates.autogen.forecast_config.EvaluationPeriodParam`.
"""
forecast_horizon: Optional[int] = None
"""Number of periods to forecast into the future. Must be > 0.
If None, default is determined from input data frequency.
"""
forecast_one_by_one: Optional[Union[bool, int, List[int]]] = None
"""The options to activate the forecast one-by-one algorithm.
See :class:`~greykite.sklearn.estimator.one_by_one_estimator.OneByOneEstimator`.
Can be boolean, int, of list of int.
If int, it has to be less than or equal to the forecast horizon.
If list of int, the sum has to be the forecast horizon.
"""
metadata_param: Optional[MetadataParam] = None
"""Information about the input data. See
:class:`~greykite.framework.templates.autogen.forecast_config.MetadataParam`.
"""
model_components_param: Optional[Union[ModelComponentsParam, List[Optional[ModelComponentsParam]]]] = None
"""Parameters to tune the model. Typically a single ModelComponentsParam, but the `SimpleSilverkiteTemplate`
template also allows a list of ModelComponentsParam for grid search. A single ModelComponentsParam
corresponds to one grid, and a list corresponds to a list of grids.
See :class:`~greykite.framework.templates.autogen.forecast_config.ModelComponentsParam`.
"""
model_template: Optional[Union[str, dataclass, List[Union[str, dataclass]]]] = None
"""Name of the model template. Typically a single string, but the `SimpleSilverkiteTemplate`
template also allows a list of string for grid search.
See :class:`~greykite.framework.templates.model_templates.ModelTemplateEnum`
for valid names.
"""
def from_dict(obj: Any) -> 'ForecastConfig':
assert isinstance(obj, dict)
computation_param = from_union([ComputationParam.from_dict, from_none], obj.get("computation_param"))
coverage = from_union([from_float, from_none], obj.get("coverage"))
evaluation_metric_param = from_union([EvaluationMetricParam.from_dict, from_none], obj.get("evaluation_metric_param"))
evaluation_period_param = from_union([EvaluationPeriodParam.from_dict, from_none], obj.get("evaluation_period_param"))
forecast_horizon = from_union([from_int, from_none], obj.get("forecast_horizon"))
forecast_one_by_one = from_union([from_int, from_bool, from_none, from_list_int], obj.get("forecast_one_by_one"))
metadata_param = from_union([MetadataParam.from_dict, from_none], obj.get("metadata_param"))
if not isinstance(obj.get("model_components_param"), list):
model_components_param = from_union([ModelComponentsParam.from_dict, from_none], obj.get("model_components_param"))
else:
model_components_param = [from_union([ModelComponentsParam.from_dict, from_none], mcp) for mcp in obj.get("model_components_param")]
if not isinstance(obj.get("model_template"), list):
model_template = from_union([from_str, from_none], obj.get("model_template"))
else:
model_template = [from_union([from_str, from_none], mt) for mt in obj.get("model_template")]
return ForecastConfig(
computation_param=computation_param,
coverage=coverage,
evaluation_metric_param=evaluation_metric_param,
evaluation_period_param=evaluation_period_param,
forecast_horizon=forecast_horizon,
forecast_one_by_one=forecast_one_by_one,
metadata_param=metadata_param,
model_components_param=model_components_param,
model_template=model_template)
def to_dict(self) -> dict:
result: dict = {}
result["computation_param"] = from_union([lambda x: to_class(ComputationParam, x), from_none], self.computation_param)
result["coverage"] = from_union([to_float, from_none], self.coverage)
result["evaluation_metric_param"] = from_union([lambda x: to_class(EvaluationMetricParam, x), from_none], self.evaluation_metric_param)
result["evaluation_period_param"] = from_union([lambda x: to_class(EvaluationPeriodParam, x), from_none], self.evaluation_period_param)
result["forecast_horizon"] = from_union([from_int, from_none], self.forecast_horizon)
result["forecast_one_by_one"] = from_union([from_int, from_bool, from_none, from_list_int], self.forecast_one_by_one)
result["metadata_param"] = from_union([lambda x: to_class(MetadataParam, x), from_none], self.metadata_param)
if not isinstance(self.model_components_param, list):
self.model_components_param = [self.model_components_param]
result["model_components_param"] = [from_union([lambda x: to_class(ModelComponentsParam, x), from_none], mcp) for mcp in self.model_components_param]
if not isinstance(self.model_template, list):
self.model_template = [self.model_template]
result["model_template"] = [from_union([from_str, from_none], mt) for mt in self.model_template]
return result
def from_json(obj: Any) -> 'ForecastConfig':
"""Converts a json string to the corresponding instance of the `ForecastConfig` class.
Raises ValueError if the input is not a json string.
"""
try:
forecast_dict = json.loads(obj)
except Exception:
raise ValueError(f"The input ({obj}) is not a json string.")
return ForecastConfig.from_dict(forecast_dict)
class ForecastConfigDefaults:
"""Class that applies default values to a
`~greykite.framework.templates.autogen.forecast_config.ForecastConfig` object.
Provides these methods:
- apply_metadata_defaults
- apply_evaluation_metric_defaults
- apply_evaluation_period_defaults
- apply_computation_defaults
- apply_model_components_defaults
- apply_forecast_config_defaults
Subclasses may override these if different defaults are desired.
"""
DEFAULT_MODEL_TEMPLATE = "AUTO"
"""The default model template. See `~greykite.framework.templates.model_templates.ModelTemplateEnum`.
Uses a string to avoid circular imports.
"""
def apply_computation_defaults(computation: Optional[ComputationParam] = None) -> ComputationParam:
"""Applies the default ComputationParam values to the given object.
If an expected attribute value is provided, the value is unchanged. Otherwise the default value for it is used.
Other attributes are untouched.
If the input object is None, it creates a ComputationParam object.
Parameters
----------
computation : `~greykite.framework.templates.autogen.forecast_config.ComputationParam` or None
The ComputationParam object.
Returns
-------
computation : `~greykite.framework.templates.autogen.forecast_config.ComputationParam`
Valid ComputationParam object with the provided attribute values and the default attribute values if not.
"""
if computation is None:
computation = ComputationParam()
if computation.n_jobs is None:
computation.n_jobs = COMPUTATION_N_JOBS
if computation.verbose is None:
computation.verbose = COMPUTATION_VERBOSE
return computation
def apply_evaluation_metric_defaults(evaluation: Optional[EvaluationMetricParam] = None) -> EvaluationMetricParam:
"""Applies the default EvaluationMetricParam values to the given object.
If an expected attribute value is provided, the value is unchanged. Otherwise the default value for it is used.
Other attributes are untouched.
If the input object is None, it creates a EvaluationMetricParam object.
Parameters
----------
evaluation : `~greykite.framework.templates.autogen.forecast_config.EvaluationMetricParam` or None
The EvaluationMetricParam object.
Returns
-------
evaluation : `~greykite.framework.templates.autogen.forecast_config.EvaluationMetricParam`
Valid EvaluationMetricParam object with the provided attribute values and the default attribute values if not.
"""
if evaluation is None:
evaluation = EvaluationMetricParam()
if evaluation.cv_selection_metric is None:
# NB: subclass may want to override, if designed for a different objective (e.g. quantile loss)
evaluation.cv_selection_metric = EvaluationMetricEnum.MeanAbsolutePercentError.name
if evaluation.cv_report_metrics is None:
evaluation.cv_report_metrics = CV_REPORT_METRICS_ALL
return evaluation
def apply_evaluation_period_defaults(evaluation: Optional[EvaluationPeriodParam] = None) -> EvaluationPeriodParam:
"""Applies the default EvaluationPeriodParam values to the given object.
If an expected attribute value is provided, the value is unchanged. Otherwise the default value for it is used.
Other attributes are untouched.
If the input object is None, it creates a EvaluationPeriodParam object.
Parameters
----------
evaluation : `~greykite.framework.templates.autogen.forecast_config.EvaluationPeriodParam` or None
The EvaluationMetricParam object.
Returns
-------
evaluation : `~greykite.framework.templates.autogen.forecast_config.EvaluationPeriodParam`
Valid EvaluationPeriodParam object with the provided attribute values and the default attribute values if not.
"""
if evaluation is None:
evaluation = EvaluationPeriodParam()
if evaluation.cv_max_splits is None:
evaluation.cv_max_splits = EVALUATION_PERIOD_CV_MAX_SPLITS
if evaluation.cv_periods_between_train_test is None:
evaluation.cv_periods_between_train_test = evaluation.periods_between_train_test
if evaluation.cv_expanding_window is None:
# NB: subclass may want to override.
evaluation.cv_expanding_window = True # good for long-term forecasts, or when data are limited
return evaluation
def apply_metadata_defaults(metadata: Optional[MetadataParam] = None) -> MetadataParam:
"""Applies the default MetadataParam values to the given object.
If an expected attribute value is provided, the value is unchanged. Otherwise the default value for it is used.
Other attributes are untouched.
If the input object is None, it creates a MetadataParam object.
Parameters
----------
metadata : `~greykite.framework.templates.autogen.forecast_config.MetadataParam` or None
The MetadataParam object.
Returns
-------
metadata : `~greykite.framework.templates.autogen.forecast_config.MetadataParam`
Valid MetadataParam object with the provided attribute values and the default attribute values if not.
"""
if metadata is None:
metadata = MetadataParam()
if metadata.time_col is None:
metadata.time_col = TIME_COL
if metadata.value_col is None:
metadata.value_col = VALUE_COL
return metadata
def apply_model_components_defaults(model_components: Optional[Union[ModelComponentsParam, List[Optional[ModelComponentsParam]]]] = None) \
-> Union[ModelComponentsParam, List[ModelComponentsParam]]:
"""Applies the default ModelComponentsParam values to the given object.
Converts None to a ModelComponentsParam object.
Unpacks a list of a single element to the element itself.
Parameters
----------
model_components : `~greykite.framework.templates.autogen.forecast_config.ModelComponentsParam` or None or list of such items
The ModelComponentsParam object.
Returns
-------
model_components : `~greykite.framework.templates.autogen.forecast_config.ModelComponentsParam` or list of such items
Valid ModelComponentsParam object with the provided attribute values and the default attribute values if not.
"""
# Converts single element to a list
if not isinstance(model_components, list):
model_components = [model_components]
# Replaces all `None` with ModelComponentsParam()
model_components = [m if m is not None else ModelComponentsParam() for m in model_components]
# model_components can be provided as a list or a single element.
# A list of a single element is unpacked to that element.
# (Some template classes like SilverkiteTemplate do not allow model_components
# to be a list.)
if isinstance(model_components, list) and len(model_components) == 1:
model_components = model_components[0]
return model_components
def apply_model_template_defaults(self, model_template: Optional[Union[str, List[Optional[str]]]] = None) -> Union[str, List[str]]:
"""Applies the default model template to the given object.
Unpacks a list of a single element to the element itself.
Sets default value if None.
Parameters
----------
model_template : `str` or None or `list` [None, `str`]
The model template name.
See valid names in `~greykite.framework.templates.model_templates.ModelTemplateEnum`.
Returns
-------
model_template : `str` or `list` [`str`]
The model template name, with defaults value used if not provided.
"""
# Converts single element to a list
if not isinstance(model_template, list):
model_template = [model_template]
model_template = [m if m is not None else self.DEFAULT_MODEL_TEMPLATE for m in model_template]
if isinstance(model_template, list) and len(model_template) == 1:
# model_template can be provided as a list or a single element.
# A list of a single element is unpacked to that element.
# (Some template classes like SilverkiteTemplate do not allow model_template
# to be a list.)
model_template = model_template[0]
return model_template
def apply_forecast_config_defaults(self, config: Optional[ForecastConfig] = None) -> ForecastConfig:
"""Applies the default Forecast Config values to the given config.
If an expected attribute value is provided, the value is unchanged.
Otherwise the default value for it is used.
Other attributes are untouched.
If the input config is None, it creates a Forecast Config.
Parameters
----------
config : :class:`~greykite.framework.templates.autogen.forecast_config.ForecastConfig` or None
Forecast configuration if available. See
:class:`~greykite.framework.templates.autogen.forecast_config.ForecastConfig`.
Returns
-------
config : :class:`~greykite.framework.templates.model_templates.ForecastConfig`
A valid Forecast Config which contains the provided attribute values and the default attribute values if not.
"""
if config is None:
config = ForecastConfig()
else:
# makes a copy to avoid mutating input
config = dataclasses.replace(config)
config.computation_param = self.apply_computation_defaults(config.computation_param)
config.evaluation_metric_param = self.apply_evaluation_metric_defaults(config.evaluation_metric_param)
config.evaluation_period_param = self.apply_evaluation_period_defaults(config.evaluation_period_param)
config.metadata_param = self.apply_metadata_defaults(config.metadata_param)
config.model_components_param = self.apply_model_components_defaults(config.model_components_param)
config.model_template = self.apply_model_template_defaults(config.model_template)
return config
class ModelTemplateEnum(Enum):
"""Available model templates.
Enumerates the possible values for the ``model_template`` attribute of
:class:`~greykite.framework.templates.model_templates.ForecastConfig`.
The value has type `~greykite.framework.templates.model_templates.ModelTemplate` which contains:
- the template class that recognizes the model_template. Template classes implement the
`~greykite.framework.templates.template_interface.TemplateInterface` interface.
- a plain-text description of what the model_template is for,
The description should be unique across enum members. The template class
can be shared, because a template class can recognize multiple model templates.
For example, the same template class may use different default values for
``ForecastConfig.model_components_param`` depending on ``ForecastConfig.model_template``.
Notes
-----
The template classes
`~greykite.framework.templates.silverkite_template.SilverkiteTemplate`
and `~greykite.framework.templates.prophet_template.ProphetTemplate`
recognize only the model templates explicitly enumerated here.
However, the `~greykite.framework.templates.simple_silverkite_template.SimpleSilverkiteTemplate`
template class allows additional model templates to be specified generically.
Any object of type `~greykite.framework.templates.simple_silverkite_template_config.SimpleSilverkiteTemplateOptions`
can be used as the model_template.
These generic model templates are valid but not enumerated here.
"""
SILVERKITE = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model with automatic growth, seasonality, holidays, "
"automatic autoregression, normalization "
"and interactions. Best for hourly and daily frequencies."
"Uses `SimpleSilverkiteEstimator`.")
"""Silverkite model with automatic growth, seasonality, holidays,
automatic autoregression, normalization
and interactions. Best for hourly and daily frequencies.
Uses `SimpleSilverkiteEstimator`.
"""
SILVERKITE_DAILY_1_CONFIG_1 = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Config 1 in template ``SILVERKITE_DAILY_1``. "
"Compared to ``SILVERKITE``, it uses parameters "
"specifically tuned for daily data and 1-day forecast.")
"""Config 1 in template ``SILVERKITE_DAILY_1``.
Compared to ``SILVERKITE``, it uses parameters
specifically tuned for daily data and 1-day forecast.
"""
SILVERKITE_DAILY_1_CONFIG_2 = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Config 2 in template ``SILVERKITE_DAILY_1``. "
"Compared to ``SILVERKITE``, it uses parameters "
"specifically tuned for daily data and 1-day forecast.")
"""Config 2 in template ``SILVERKITE_DAILY_1``.
Compared to ``SILVERKITE``, it uses parameters
specifically tuned for daily data and 1-day forecast.
"""
SILVERKITE_DAILY_1_CONFIG_3 = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Config 3 in template ``SILVERKITE_DAILY_1``. "
"Compared to ``SILVERKITE``, it uses parameters "
"specifically tuned for daily data and 1-day forecast.")
"""Config 3 in template ``SILVERKITE_DAILY_1``.
Compared to ``SILVERKITE``, it uses parameters
specifically tuned for daily data and 1-day forecast.
"""
SILVERKITE_DAILY_1 = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model specifically tuned for daily data and 1-day forecast. "
"Contains 3 candidate configs for grid search, "
"optimized the seasonality and changepoint parameters.")
"""Silverkite model specifically tuned for daily data and 1-day forecast.
Contains 3 candidate configs for grid search,
optimized the seasonality and changepoint parameters.
"""
SILVERKITE_DAILY_90 = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model specifically tuned for daily data with 90 days forecast horizon. "
"Contains 4 hyperparameter combinations for grid search. "
"Uses `SimpleSilverkiteEstimator`.")
"""Silverkite model specifically tuned for daily data with 90 days forecast horizon.
Contains 4 hyperparameter combinations for grid search.
Uses `SimpleSilverkiteEstimator`.
"""
SILVERKITE_WEEKLY = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model specifically tuned for weekly data. "
"Contains 4 hyperparameter combinations for grid search. "
"Uses `SimpleSilverkiteEstimator`.")
"""Silverkite model specifically tuned for weekly data.
Contains 4 hyperparameter combinations for grid search.
Uses `SimpleSilverkiteEstimator`.
"""
SILVERKITE_MONTHLY = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model specifically tuned for monthly data. "
"Uses `SimpleSilverkiteEstimator`.")
"""Silverkite model specifically tuned for monthly data.
Uses `SimpleSilverkiteEstimator`.
"""
SILVERKITE_HOURLY_1 = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model specifically tuned for hourly data with 1 hour forecast horizon. "
"Contains 3 hyperparameter combinations for grid search. "
"Uses `SimpleSilverkiteEstimator`.")
"""Silverkite model specifically tuned for hourly data with 1 hour forecast horizon.
Contains 4 hyperparameter combinations for grid search.
Uses `SimpleSilverkiteEstimator`."""
SILVERKITE_HOURLY_24 = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model specifically tuned for hourly data with 24 hours (1 day) forecast horizon. "
"Contains 4 hyperparameter combinations for grid search. "
"Uses `SimpleSilverkiteEstimator`.")
"""Silverkite model specifically tuned for hourly data with 24 hours (1 day) forecast horizon.
Contains 4 hyperparameter combinations for grid search.
Uses `SimpleSilverkiteEstimator`."""
SILVERKITE_HOURLY_168 = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model specifically tuned for hourly data with 168 hours (1 week) forecast horizon. "
"Contains 4 hyperparameter combinations for grid search. "
"Uses `SimpleSilverkiteEstimator`.")
"""Silverkite model specifically tuned for hourly data with 168 hours (1 week) forecast horizon.
Contains 4 hyperparameter combinations for grid search.
Uses `SimpleSilverkiteEstimator`."""
SILVERKITE_HOURLY_336 = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model specifically tuned for hourly data with 336 hours (2 weeks) forecast horizon. "
"Contains 4 hyperparameter combinations for grid search. "
"Uses `SimpleSilverkiteEstimator`.")
"""Silverkite model specifically tuned for hourly data with 336 hours (2 weeks) forecast horizon.
Contains 4 hyperparameter combinations for grid search.
Uses `SimpleSilverkiteEstimator`.
"""
SILVERKITE_EMPTY = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model with no component included by default. Fits only a constant intercept. "
"Select and customize this template to add only the terms you want. "
"Uses `SimpleSilverkiteEstimator`.")
"""Silverkite model with no component included by default. Fits only a constant intercept.
Select and customize this template to add only the terms you want.
Uses `SimpleSilverkiteEstimator`.
"""
SK = ModelTemplate(
template_class=SilverkiteTemplate,
description="Silverkite model with low-level interface. For flexible model tuning "
"if SILVERKITE template is not flexible enough. Not for use out-of-the-box: "
"customization is needed for good performance. Uses `SilverkiteEstimator`.")
"""Silverkite model with low-level interface. For flexible model tuning
if SILVERKITE template is not flexible enough. Not for use out-of-the-box:
customization is needed for good performance. Uses `SilverkiteEstimator`.
"""
PROPHET = ModelTemplate(
template_class=ProphetTemplate,
description="Prophet model with growth, seasonality, holidays, additional regressors "
"and prediction intervals. Uses `ProphetEstimator`.")
"""Prophet model with growth, seasonality, holidays, additional regressors
and prediction intervals. Uses `ProphetEstimator`."""
AUTO_ARIMA = ModelTemplate(
template_class=AutoArimaTemplate,
description="Auto ARIMA model with fit and prediction intervals. "
"Uses `AutoArimaEstimator`.")
"""ARIMA model with automatic order selection. Uses `AutoArimaEstimator`."""
SILVERKITE_TWO_STAGE = ModelTemplate(
template_class=MultistageForecastTemplate,
description="MultistageForecastTemplate's default model template. A two-stage model. "
"The first step takes a longer history and learns the long-term effects, "
"while the second step takes a shorter history and learns the short-term residuals."
)
"""Multistage forecast model's default model template. A two-stage model. "
"The first step takes a longer history and learns the long-term effects, "
"while the second step takes a shorter history and learns the short-term residuals.
"""
MULTISTAGE_EMPTY = ModelTemplate(
template_class=MultistageForecastTemplate,
description="Empty configuration for Multistage Forecast. "
"All parameters will be exactly what user inputs. "
"Not to be used without overriding."""
)
"""Empty configuration for Multistage Forecast.
All parameters will be exactly what user inputs.
Not to be used without overriding.
"""
AUTO = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Automatically selects the SimpleSilverkite model template that corresponds to the forecast "
"problem. Selection is based on data frequency, forecast horizon, and CV configuration."
)
"""Automatically selects the SimpleSilverkite model template that corresponds to the forecast problem.
Selection is based on data frequency, forecast horizon, and CV configuration.
"""
LAG_BASED = ModelTemplate(
template_class=LagBasedTemplate,
description="Uses aggregated past observations as predictions. Examples are "
"past day, week-over-week, week-over-3-week median, etc."
)
"""Uses aggregated past observations as predictions. Examples are
past day, week-over-week, week-over-3-week median, etc.
"""
SILVERKITE_WOW = ModelTemplate(
template_class=MultistageForecastTemplate,
description="The Silverkite+WOW model uses Silverkite to model yearly/quarterly/monthly seasonality, "
"growth and holiday effects first, then uses week over week to estimate the residuals. "
"The final prediction is the total of the two models. "
"This avoids the normal week over week (WOW) estimation's weakness in capturing "
"growth and holidays."
)
"""The Silverkite+WOW model uses Silverkite to model yearly/quarterly/monthly seasonality,
growth and holiday effects first, then uses week over week to estimate the residuals.
The final prediction is the total of the two models.
This avoids the normal week over week (WOW) estimation's weakness in capturing growth and holidays.
"""
SILVERKITE_DAILY_90 = [
# For daily data, light seasonality up to weekly, light trend changepoints,
# separate holidays +- 2 days, default feature sets and linear fit algorithm.
"DAILY_SEAS_LTQM_GR_LINEAR_CP_LT_HOL_SP2_FEASET_AUTO_ALGO_LINEAR_AR_OFF_DSI_AUTO_WSI_AUTO",
# For daily data, light seasonality up to weekly, no trend changepoints,
# separate holidays +- 2 days, default feature sets and linear fit algorithm.
"DAILY_SEAS_LTQM_GR_LINEAR_CP_NONE_HOL_SP2_FEASET_AUTO_ALGO_LINEAR_AR_OFF_DSI_AUTO_WSI_AUTO",
# For daily data, light seasonality up to weekly, light trend changepoints,
# separate holidays +- 2 days, default feature sets and ridge fit algorithm.
"DAILY_SEAS_LTQM_GR_LINEAR_CP_LT_HOL_SP2_FEASET_AUTO_ALGO_RIDGE_AR_OFF_DSI_AUTO_WSI_AUTO",
# For daily data, normal seasonality up to weekly, light trend changepoints,
# separate holidays +- 4 days, default feature sets and ridge fit algorithm.
"DAILY_SEAS_NM_GR_LINEAR_CP_LT_HOL_SP4_FEASET_AUTO_ALGO_RIDGE_AR_OFF_DSI_AUTO_WSI_AUTO"
]
SILVERKITE_WEEKLY = [
# For weekly data, normal seasonality up to yearly, no trend changepoints,
# no holiday, no feature sets and linear fit algorithm.
"WEEKLY_SEAS_NM_GR_LINEAR_CP_NONE_HOL_NONE_FEASET_OFF_ALGO_LINEAR_AR_OFF_DSI_AUTO_WSI_AUTO",
# For weekly data, normal seasonality up to yearly, light trend changepoints,
# no holiday, no feature sets and linear fit algorithm.
"WEEKLY_SEAS_NM_GR_LINEAR_CP_LT_HOL_NONE_FEASET_OFF_ALGO_LINEAR_AR_OFF_DSI_AUTO_WSI_AUTO",
# For weekly data, heavy seasonality up to yearly, normal trend changepoints,
# no holiday, no feature sets and ridge fit algorithm.
"WEEKLY_SEAS_HV_GR_LINEAR_CP_NM_HOL_NONE_FEASET_OFF_ALGO_RIDGE_AR_OFF_DSI_AUTO_WSI_AUTO",
# For weekly data, heavy seasonality up to yearly, light trend changepoints,
# no holiday, no feature sets and ridge fit algorithm.
"WEEKLY_SEAS_HV_GR_LINEAR_CP_LT_HOL_NONE_FEASET_OFF_ALGO_RIDGE_AR_OFF_DSI_AUTO_WSI_AUTO"
]
SILVERKITE_HOURLY_1 = [
# For hourly data, the first template is the same as the "SILVERKITE" template defined above.
"SILVERKITE",
# For hourly data, light seasonality up to daily, normal trend changepoints,
# separate holidays +- 4 days, no feature sets, automatic autoregression and ridge fit algorithm.
"HOURLY_SEAS_LT_GR_LINEAR_CP_NM_HOL_SP4_FEASET_OFF_ALGO_RIDGE_AR_AUTO",
# For hourly data, normal seasonality up to daily, normal trend changepoints,
# separate holidays +- 1 day, default feature sets, automatic autoregression and ridge fit algorithm.
"HOURLY_SEAS_NM_GR_LINEAR_CP_NM_HOL_SP1_FEASET_AUTO_ALGO_RIDGE_AR_AUTO"
]
SILVERKITE_HOURLY_24 = [
# For hourly data, light seasonality up to daily, normal trend changepoints,
# separate holidays +- 4 days, default feature sets, automatic autoregression and ridge fit algorithm.
"HOURLY_SEAS_LT_GR_LINEAR_CP_NM_HOL_SP4_FEASET_AUTO_ALGO_RIDGE_AR_AUTO",
# For hourly data, light seasonality up to daily, no trend changepoints,
# separate holidays +- 4 days, default feature sets, automatic autoregression and ridge fit algorithm.
"HOURLY_SEAS_LT_GR_LINEAR_CP_NONE_HOL_SP4_FEASET_AUTO_ALGO_RIDGE_AR_AUTO",
# For hourly data, normal seasonality up to daily, light trend changepoints,
# separate holidays +- 1 days, no feature sets, automatic autoregression and linear fit algorithm.
"HOURLY_SEAS_NM_GR_LINEAR_CP_LT_HOL_SP1_FEASET_OFF_ALGO_LINEAR_AR_AUTO",
# For hourly data, normal seasonality up to daily, normal trend changepoints,
# separate holidays +- 4 day, default feature sets, automatic autoregression and ridge fit algorithm.
"HOURLY_SEAS_NM_GR_LINEAR_CP_NM_HOL_SP4_FEASET_AUTO_ALGO_RIDGE_AR_AUTO"
]
SILVERKITE_HOURLY_168 = [
# For hourly data, light seasonality up to daily, light trend changepoints,
# separate holidays +- 4 days, default feature sets, no autoregression and ridge fit algorithm.
"HOURLY_SEAS_LT_GR_LINEAR_CP_LT_HOL_SP4_FEASET_AUTO_ALGO_RIDGE_AR_OFF",
# For hourly data, light seasonality up to daily, light trend changepoints,
# separate holidays +- 2 days, default feature sets, no autoregression and ridge fit algorithm.
"HOURLY_SEAS_LT_GR_LINEAR_CP_LT_HOL_SP2_FEASET_AUTO_ALGO_RIDGE_AR_OFF",
# For hourly data, normal seasonality up to daily, no trend changepoints,
# separate holidays +- 4 days, no feature sets, automatic autoregression and linear fit algorithm.
"HOURLY_SEAS_NM_GR_LINEAR_CP_NONE_HOL_SP4_FEASET_OFF_ALGO_LINEAR_AR_AUTO",
# For hourly data, normal seasonality up to daily, normal trend changepoints,
# separate holidays +- 1 day, default feature sets, no autoregression and ridge fit algorithm.
"HOURLY_SEAS_NM_GR_LINEAR_CP_NM_HOL_SP1_FEASET_AUTO_ALGO_RIDGE_AR_OFF"
]
SILVERKITE_HOURLY_336 = [
# For hourly data, light seasonality up to daily, light trend changepoints,
# separate holidays +- 2 days, default feature sets, no autoregression and ridge fit algorithm.
"HOURLY_SEAS_LT_GR_LINEAR_CP_LT_HOL_SP2_FEASET_AUTO_ALGO_RIDGE_AR_OFF",
# For hourly data, light seasonality up to daily, light trend changepoints,
# separate holidays +- 4 days, default feature sets, no autoregression and ridge fit algorithm.
"HOURLY_SEAS_LT_GR_LINEAR_CP_LT_HOL_SP4_FEASET_AUTO_ALGO_RIDGE_AR_OFF",
# For hourly data, normal seasonality up to daily, light trend changepoints,
# separate holidays +- 2 days, default feature sets, no autoregression and linear fit algorithm.
"HOURLY_SEAS_NM_GR_LINEAR_CP_LT_HOL_SP1_FEASET_AUTO_ALGO_LINEAR_AR_OFF",
# For hourly data, normal seasonality up to daily, normal trend changepoints,
# separate holidays +- 1 day, default feature sets, automatic autoregression and linear fit algorithm.
"HOURLY_SEAS_NM_GR_LINEAR_CP_NM_HOL_SP1_FEASET_AUTO_ALGO_LINEAR_AR_AUTO"
]
class RollingTimeSeriesSplit(BaseCrossValidator):
"""Flexible splitter for time-series cross validation and rolling window evaluation.
Suitable for use in GridSearchCV.
Attributes
----------
min_splits : int
Guaranteed min number of splits. This is always set to 1. If provided configuration results in 0 splits,
the cross validator will yield a default split.
__starting_test_index : int
Test end index of the first CV split. Actual offset = __starting_test_index + _get_offset(X), for a particular
dataset X.
Cross validator ensures the last test split contains the last observation in X.
Examples
--------
>>> from greykite.sklearn.cross_validation import RollingTimeSeriesSplit
>>> X = np.random.rand(20, 4)
>>> tscv = RollingTimeSeriesSplit(forecast_horizon=3, max_splits=4)
>>> tscv.get_n_splits(X=X)
4
>>> for train, test in tscv.split(X=X):
... print(train, test)
[2 3 4 5 6 7] [ 8 9 10]
[ 5 6 7 8 9 10] [11 12 13]
[ 8 9 10 11 12 13] [14 15 16]
[11 12 13 14 15 16] [17 18 19]
>>> X = np.random.rand(20, 4)
>>> tscv = RollingTimeSeriesSplit(forecast_horizon=2,
... min_train_periods=4,
... expanding_window=True,
... periods_between_splits=4,
... periods_between_train_test=2,
... max_splits=None)
>>> tscv.get_n_splits(X=X)
4
>>> for train, test in tscv.split(X=X):
... print(train, test)
[0 1 2 3] [6 7]
[0 1 2 3 4 5 6 7] [10 11]
[ 0 1 2 3 4 5 6 7 8 9 10 11] [14 15]
[ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15] [18 19]
>>> X = np.random.rand(5, 4) # default split if there is not enough data
>>> for train, test in tscv.split(X=X):
... print(train, test)
[0 1 2 3] [4]
"""
def __init__(
self,
forecast_horizon,
min_train_periods=None,
expanding_window=False,
use_most_recent_splits=False,
periods_between_splits=None,
periods_between_train_test=0,
max_splits=3):
"""Initializes attributes of RollingTimeSeriesSplit
Parameters
----------
forecast_horizon : `int`
How many periods in each CV test set
min_train_periods : `int` or None, optional
Minimum number of periods for training.
If ``expanding_window`` is False, every training period has this size.
expanding_window : `bool`, default False
If True, training window for each CV split is fixed to the first available date.
Otherwise, train start date is sliding, determined by ``min_train_periods``.
use_most_recent_splits: `bool`, default False
If True, splits from the end of the dataset are used.
Else a sampling strategy is applied. Check
`~greykite.sklearn.cross_validation.RollingTimeSeriesSplit._sample_splits`
for details.
periods_between_splits : `int` or None
Number of periods to slide the test window
periods_between_train_test : `int`
Number of periods gap between train and test within a CV split
max_splits : `int` or None
Maximum number of CV splits. Given the above configuration, samples up to max_splits train/test splits,
preferring splits toward the end of available data. If None, uses all splits.
"""
super().__init__()
self.forecast_horizon = get_integer(forecast_horizon, name="forecast_horizon", min_value=1)
# by default, use at least twice the forecast horizon for training
self.min_train_periods = get_integer(min_train_periods, name="min_train_periods",
min_value=1, default_value=2 * self.forecast_horizon)
# by default, use fixed size training window
self.expanding_window = expanding_window
# by default, does not force most recent splits
self.use_most_recent_splits = use_most_recent_splits
# by default, use non-overlapping test sets
self.periods_between_splits = get_integer(periods_between_splits, name="periods_between_splits",
min_value=1, default_value=self.forecast_horizon)
# by default, use test set immediately following train set
self.periods_between_train_test = get_integer(periods_between_train_test, name="periods_between_train_test",
min_value=0, default_value=0)
if self.min_train_periods < 2 * self.forecast_horizon:
warnings.warn(f"`min_train_periods` is too small for your `forecast_horizon`. Should be at least"
f" {forecast_horizon*2}=2*`forecast_horizon`.")
self.max_splits = max_splits
self.min_splits = 1 # CV ensures there is always at least one split
# test end index for the first CV split, before applying offset to ensure last data point in X is used
self.__starting_test_index = (self.forecast_horizon
+ self.min_train_periods
+ self.periods_between_train_test
- 1)
def split(self, X, y=None, groups=None):
"""Generates indices to split data into training and test CV folds according to rolling
window time series cross validation
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Must have `shape` method.
y : array-like, shape (n_samples,), optional
The target variable for supervised learning problems. Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set. Always ignored, exists for compatibility.
Yields
------
train : `numpy.array`
The training set indices for that split.
test : `numpy.array`
The testing set indices for that split.
"""
num_samples = X.shape[0]
indices = np.arange(num_samples)
n_splits_without_capping = self.get_n_splits_without_capping(X=X)
n_splits = self.get_n_splits(X=X)
if n_splits_without_capping == 0:
warnings.warn("There are no CV splits under the requested settings. Decrease `forecast_horizon` and/or"
" `min_train_periods`. Using default 90/10 CV split")
elif n_splits == 1:
warnings.warn("There is only one CV split")
elif n_splits >= 10:
warnings.warn(f"There is a high number of CV splits ({n_splits}). If training is slow, increase "
f"`periods_between_splits` or `min_train_periods`, or decrease `max_splits`")
log_message(f"There are {n_splits} CV splits.", LoggingLevelEnum.INFO)
if n_splits_without_capping == 0: # uses default split
default_split_ratio = 0.9
train_samples = int(round(num_samples * default_split_ratio))
yield indices[:train_samples], indices[train_samples:]
else: # determines which splits to keep so that up to max_splits are returned
splits_to_keep = self._sample_splits(n_splits_without_capping)
last_index = num_samples - 1
test_end_index = self.__starting_test_index + self._get_offset(X=X)
current_split_index = 0
while test_end_index <= last_index:
test_start_index = test_end_index - self.forecast_horizon + 1
train_end_index = test_start_index - self.periods_between_train_test - 1
train_start_index = 0 if self.expanding_window else train_end_index - self.min_train_periods + 1
assert train_start_index >= 0 # guaranteed by n_splits > 0
if current_split_index in splits_to_keep:
log_message(f"CV split: Train {train_start_index} to {train_end_index}. "
f"Test {test_start_index} to {test_end_index}.", LoggingLevelEnum.DEBUG)
yield indices[train_start_index:train_end_index + 1], indices[test_start_index:test_end_index + 1]
test_end_index += self.periods_between_splits
current_split_index += 1
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations yielded by the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data to split
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
The number of splitting iterations yielded by the cross-validator.
"""
num_splits = self.get_n_splits_without_capping(X=X)
if self.max_splits is not None and num_splits > self.max_splits:
num_splits = self.max_splits # num_splits is set to max limit
if num_splits == 0:
num_splits = self.min_splits # not enough observations to create split, uses default
return num_splits
def get_n_splits_without_capping(self, X=None):
"""Returns the number of splitting iterations in the cross-validator as configured, ignoring
self.max_splits and self.min_splits
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data to split
Returns
-------
n_splits : int
The number of splitting iterations in the cross-validator as configured, ignoring
self.max_splits and self.min_splits
"""
last_index = X.shape[0] - 1
starting_index = self.__starting_test_index + self._get_offset(X=X)
if starting_index > last_index:
return 0
return math.ceil((last_index - starting_index + 1) / self.periods_between_splits)
def _get_offset(self, X=None):
"""Returns an offset to add to test set indices when creating CV splits
CV splits are shifted so that the last test observation is the last point in X.
This shift does not affect the total number of splits.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data to split
Returns
-------
offset : int
The number of observations to ignore at the beginning of X when creating CV splits
"""
last_index = X.shape[0] - 1
starting_index = self.__starting_test_index
if starting_index > last_index:
return 0
return (last_index - starting_index) % self.periods_between_splits
def _sample_splits(self, num_splits, seed=48912):
"""Samples up to ``max_splits`` items from list(range(`num_splits`)).
If ``use_most_recent_splits`` is True, highest split indices up to ``max_splits``
are retained. Otherwise, the following sampling scheme is implemented:
- takes the last 2 splits
- samples from the rest uniformly at random
Parameters
----------
num_splits : `int`
Number of splits before sampling.
seed : `int`
Seed for random sampling.
Returns
-------
n_splits : `list`
Indices of splits to keep (subset of `list(range(num_splits))`).
"""
split_indices = list(range(num_splits))
if self.max_splits is not None and num_splits > self.max_splits:
if self.use_most_recent_splits:
# keep indices from the end up to max_splits
keep_split_indices = split_indices[-self.max_splits:]
else:
# applies sampling scheme to take up to max_splits
keep_split_indices = []
if self.max_splits > 0: # first takes the last split
keep_split_indices.append(split_indices[-1])
if self.max_splits > 1: # then takes the second to last split
keep_split_indices.append(split_indices[-2])
if self.max_splits > 2: # then randomly samples the remaining splits
random.seed(seed)
keep_split_indices += random.sample(split_indices[:-2], self.max_splits - 2)
split_indices = keep_split_indices
return split_indices
def _iter_test_indices(self, X=None, y=None, groups=None):
"""Class directly implements `split` instead of providing this function"""
raise NotImplementedError
The provided code snippet includes necessary dependencies for implementing the `get_auto_silverkite_model_template` function. Write a Python function `def get_auto_silverkite_model_template( df: pd.DataFrame, default_model_template_name: str, config: Optional[ForecastConfig] = None)` to solve the following problem:
Gets the most appropriate model template that fits the input df's frequency, forecast horizon and number of cv splits. We define the cv to be sufficient if both number of splits is at least 5 and the number of evaluated points is at least 30. Multi-template will be used only when cv is sufficient. Parameter --------- df : `pandas.DataFrame` The input time series. default_model_template_name : `str` The default model template name. The default must be something other than "AUTO", since this function resolves "AUTO" to a specific model template. config : :class:`~greykite.framework.templates.model_templates.ForecastConfig` or None, default None Config object for template class to use. See :class:`~greykite.framework.templates.model_templates.ForecastConfig`. Returns ------- model_template : `str` The model template name that best fits the scenario.
Here is the function:
def get_auto_silverkite_model_template(
df: pd.DataFrame,
default_model_template_name: str,
config: Optional[ForecastConfig] = None):
"""Gets the most appropriate model template that fits the
input df's frequency, forecast horizon and number of cv splits.
We define the cv to be sufficient if both number of splits is at least 5
and the number of evaluated points is at least 30.
Multi-template will be used only when cv is sufficient.
Parameter
---------
df : `pandas.DataFrame`
The input time series.
default_model_template_name : `str`
The default model template name.
The default must be something other than "AUTO", since this function resolves
"AUTO" to a specific model template.
config : :class:`~greykite.framework.templates.model_templates.ForecastConfig` or None, default None
Config object for template class to use.
See :class:`~greykite.framework.templates.model_templates.ForecastConfig`.
Returns
-------
model_template : `str`
The model template name that best fits the scenario.
"""
if default_model_template_name == "AUTO":
raise ValueError("The `default_model_template_name` in "
"`get_auto_silverkite_model_template` cannot be 'AUTO'.")
model_template = default_model_template_name
if config is None:
return model_template
forecast_config_defaults = ForecastConfigDefaults()
# Tries to get the data frequency.
# If failed, uses the default model template.
config = forecast_config_defaults.apply_forecast_config_defaults(config)
metadata = config.metadata_param
freq = metadata.freq
if freq is None:
freq = infer_freq(
df,
metadata.time_col
)
if freq is None:
# NB: frequency inference fails if there are missing points in the input data
log_message(
message=f"Model template was set to 'auto', however, the data frequency "
f"is not given and can not be inferred. "
f"Using default model template '{model_template}'.",
level=LoggingLevelEnum.INFO
)
return model_template
# Gets the number of cv splits.
# This is to decide if we want to go with multiple templates.
evaluation_period = config.evaluation_period_param
# Tries to infer ``forecast_horizon``, ``test_horizon``, ``cv_horizon``, etc. if not given.
period = min_gap_in_seconds(df=df, time_col=metadata.time_col)
default_time_params = get_default_time_parameters(
period=period,
num_observations=df.shape[0],
forecast_horizon=config.forecast_horizon,
test_horizon=evaluation_period.test_horizon,
periods_between_train_test=evaluation_period.periods_between_train_test,
cv_horizon=evaluation_period.cv_horizon,
cv_min_train_periods=evaluation_period.cv_min_train_periods,
cv_periods_between_train_test=evaluation_period.cv_periods_between_train_test)
forecast_horizon = default_time_params.get("forecast_horizon")
cv_horizon = default_time_params.get("cv_horizon")
cv = RollingTimeSeriesSplit(
forecast_horizon=cv_horizon,
min_train_periods=default_time_params.get("cv_min_train_periods"),
expanding_window=evaluation_period.cv_expanding_window,
use_most_recent_splits=evaluation_period.cv_use_most_recent_splits,
periods_between_splits=evaluation_period.cv_periods_between_splits,
periods_between_train_test=default_time_params.get("cv_periods_between_train_test"),
max_splits=evaluation_period.cv_max_splits)
testing_length = default_time_params.get("test_horizon")
testing_length += default_time_params.get("periods_between_train_test")
if testing_length > 0:
df_sample = df.iloc[:-testing_length].copy()
else:
df_sample = df.copy()
n_splits = cv.get_n_splits(X=df_sample)
# We define the cv to be sufficient if both number of splits is at least 5
# and the number of evaluated points is at least 30.
splits_sufficient = (n_splits >= 5) and (n_splits * cv_horizon >= 30) and (evaluation_period.cv_max_splits != 0)
# Handles the frequencies separately.
# Depending on the forecast horizon and the number of splits,
# we choose the most appropriate model template.
# If no close model template is available,
# the model template remains the default.
if freq == "H":
if not splits_sufficient:
# For 1 hour case, the best single model template
# uses linear fit algorithm which has small risk of numerical issues.
# We removed them from our auto template and use SILVERKITE in both cases.
if forecast_horizon == 1:
model_template = SILVERKITE_HOURLY_1[0]
elif forecast_horizon <= 24 * 2:
model_template = SILVERKITE_HOURLY_24[0]
elif forecast_horizon <= 24 * 7 + 24:
model_template = SILVERKITE_HOURLY_168[0]
elif forecast_horizon <= 24 * 7 * 3:
model_template = SILVERKITE_HOURLY_336[0]
else:
if forecast_horizon == 1:
model_template = ModelTemplateEnum.SILVERKITE_HOURLY_1.name
elif forecast_horizon <= 24 * 2:
model_template = ModelTemplateEnum.SILVERKITE_HOURLY_24.name
elif forecast_horizon <= 24 * 7 + 24:
model_template = ModelTemplateEnum.SILVERKITE_HOURLY_168.name
elif forecast_horizon <= 24 * 7 * 3:
model_template = ModelTemplateEnum.SILVERKITE_HOURLY_336.name
elif freq == "D":
if not splits_sufficient:
if forecast_horizon <= 7:
model_template = ModelTemplateEnum.SILVERKITE_DAILY_1_CONFIG_1.name
elif forecast_horizon >= 90:
model_template = SILVERKITE_DAILY_90[0]
else:
if forecast_horizon <= 7:
model_template = ModelTemplateEnum.SILVERKITE_DAILY_1.name
elif forecast_horizon >= 90:
model_template = ModelTemplateEnum.SILVERKITE_DAILY_90.name
elif freq in ["W", "W-SUN", "W-MON", "W-TUE", "W-WED", "W-THU", "W-FRI", "W-SAT"]:
if not splits_sufficient:
model_template = SILVERKITE_WEEKLY[0]
else:
model_template = ModelTemplateEnum.SILVERKITE_WEEKLY.name
elif freq in ["M", "MS", "SM", "BM", "CBM", "SMS", "BMS", "CBMS"]:
# Monthly template includes monthly data and some variants.
# See pandas documentation
# https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases
model_template = ModelTemplateEnum.SILVERKITE_MONTHLY.name
log_message(
message=f"Model template was set to 'auto'. "
f"Automatically found most appropriate model template '{model_template}'.",
level=LoggingLevelEnum.INFO
)
return model_template | Gets the most appropriate model template that fits the input df's frequency, forecast horizon and number of cv splits. We define the cv to be sufficient if both number of splits is at least 5 and the number of evaluated points is at least 30. Multi-template will be used only when cv is sufficient. Parameter --------- df : `pandas.DataFrame` The input time series. default_model_template_name : `str` The default model template name. The default must be something other than "AUTO", since this function resolves "AUTO" to a specific model template. config : :class:`~greykite.framework.templates.model_templates.ForecastConfig` or None, default None Config object for template class to use. See :class:`~greykite.framework.templates.model_templates.ForecastConfig`. Returns ------- model_template : `str` The model template name that best fits the scenario. |
167,502 | import functools
import warnings
from dataclasses import dataclass
import pandas as pd
from sklearn import clone
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from greykite.common.constants import TIME_COL
from greykite.common.constants import VALUE_COL
from greykite.common.evaluation import EvaluationMetricEnum
from greykite.common.logging import LoggingLevelEnum
from greykite.common.logging import log_message
from greykite.common.python_utils import get_integer
from greykite.common.time_properties import min_gap_in_seconds
from greykite.framework.constants import COMPUTATION_N_JOBS
from greykite.framework.constants import CV_REPORT_METRICS_ALL
from greykite.framework.input.univariate_time_series import UnivariateTimeSeries
from greykite.framework.output.univariate_forecast import UnivariateForecast
from greykite.framework.pipeline.utils import get_basic_pipeline
from greykite.framework.pipeline.utils import get_default_time_parameters
from greykite.framework.pipeline.utils import get_forecast
from greykite.framework.pipeline.utils import get_hyperparameter_searcher
from greykite.sklearn.cross_validation import RollingTimeSeriesSplit
from greykite.sklearn.estimator.simple_silverkite_estimator import SimpleSilverkiteEstimator
TIME_COL = "ts"
VALUE_COL = "y"
class EvaluationMetricEnum(Enum):
"""Valid evaluation metrics.
The values tuple is ``(score_func: callable, greater_is_better: boolean, short_name: str)``
``add_finite_filter_to_scorer`` is added to the metrics that are directly imported from
``sklearn.metrics`` (e.g. ``mean_squared_error``) to ensure that the metric gets calculated
even when inputs have missing values.
"""
Correlation = (correlation, True, "CORR")
"""Pearson correlation coefficient between forecast and actuals. Higher is better."""
CoefficientOfDetermination = (add_finite_filter_to_scorer(r2_score), True, "R2")
"""Coefficient of determination. See `sklearn.metrics.r2_score`. Higher is better. Equals `1.0 - mean_squared_error / variance(actuals)`."""
MeanSquaredError = (add_finite_filter_to_scorer(mean_squared_error), False, "MSE")
"""Mean squared error, the average of squared differences,
see `sklearn.metrics.mean_squared_error`."""
RootMeanSquaredError = (root_mean_squared_error, False, "RMSE")
"""Root mean squared error, the square root of `sklearn.metrics.mean_squared_error`"""
MeanAbsoluteError = (add_finite_filter_to_scorer(mean_absolute_error), False, "MAE")
"""Mean absolute error, average of absolute differences,
see `sklearn.metrics.mean_absolute_error`."""
MedianAbsoluteError = (add_finite_filter_to_scorer(median_absolute_error), False, "MedAE")
"""Median absolute error, median of absolute differences,
see `sklearn.metrics.median_absolute_error`."""
MeanAbsolutePercentError = (mean_absolute_percent_error, False, "MAPE")
"""Mean absolute percent error, error relative to actuals expressed as a %,
see `wikipedia MAPE <https://en.wikipedia.org/wiki/Mean_absolute_percentage_error>`_."""
MedianAbsolutePercentError = (median_absolute_percent_error, False, "MedAPE")
"""Median absolute percent error, median of error relative to actuals expressed as a %,
a median version of the MeanAbsolutePercentError, less affected by extreme values."""
SymmetricMeanAbsolutePercentError = (symmetric_mean_absolute_percent_error, False, "sMAPE")
"""Symmetric mean absolute percent error, error relative to (actuals+forecast) expressed as a %.
Note that we do not include a factor of 2 in the denominator, so the range is 0% to 100%,
see `wikipedia sMAPE <https://en.wikipedia.org/wiki/Symmetric_mean_absolute_percentage_error>`_."""
Quantile80 = (quantile_loss_q(0.80), False, "Q80")
"""Quantile loss with q=0.80::
np.where(y_true < y_pred, (1 - q) * (y_pred - y_true), q * (y_true - y_pred)).mean()
"""
Quantile95 = (quantile_loss_q(0.95), False, "Q95")
"""Quantile loss with q=0.95::
np.where(y_true < y_pred, (1 - q) * (y_pred - y_true), q * (y_true - y_pred)).mean()
"""
Quantile99 = (quantile_loss_q(0.99), False, "Q99")
"""Quantile loss with q=0.99::
np.where(y_true < y_pred, (1 - q) * (y_pred - y_true), q * (y_true - y_pred)).mean()
"""
FractionOutsideTolerance1 = (partial(fraction_outside_tolerance, rtol=0.01), False, "OutsideTolerance1p")
"""Fraction of forecasted values that deviate more than 1% from the actual"""
FractionOutsideTolerance2 = (partial(fraction_outside_tolerance, rtol=0.02), False, "OutsideTolerance2p")
"""Fraction of forecasted values that deviate more than 2% from the actual"""
FractionOutsideTolerance3 = (partial(fraction_outside_tolerance, rtol=0.03), False, "OutsideTolerance3p")
"""Fraction of forecasted values that deviate more than 3% from the actual"""
FractionOutsideTolerance4 = (partial(fraction_outside_tolerance, rtol=0.04), False, "OutsideTolerance4p")
"""Fraction of forecasted values that deviate more than 4% from the actual"""
FractionOutsideTolerance5 = (partial(fraction_outside_tolerance, rtol=0.05), False, "OutsideTolerance5p")
"""Fraction of forecasted values that deviate more than 5% from the actual"""
def get_metric_func(self):
"""Returns the metric function."""
return self.value[0]
def get_metric_greater_is_better(self):
"""Returns the greater_is_better boolean."""
return self.value[1]
def get_metric_name(self):
"""Returns the short name."""
return self.value[2]
class LoggingLevelEnum(Enum):
"""Valid types of logging levels available to use."""
CRITICAL = 50
ERROR = 40
WARNING = 30
INFO = 20
DEBUG = 10
NOTSET = 0
def log_message(message, level=LoggingLevelEnum.INFO):
"""Adds a message to logger.
Parameters
----------
message : `any`
The message to be added to logger.
level : `Enum`
One of the levels in the `~greykite.common.enums.LoggingLevelEnum`.
"""
if level.name not in list(LoggingLevelEnum.__members__):
raise ValueError(f"{level} not found, it must be a member of the LoggingLevelEnum class.")
logger.log(level.value, message)
def get_integer(val=None, name="value", min_value=0, default_value=0):
"""Returns integer value from input, with basic validation
Parameters
----------
val : `float` or None, default None
Value to convert to integer.
name : `str`, default "value"
What the value represents.
min_value : `float`, default 0
Minimum allowed value.
default_value : `float` , default 0
Value to be used if ``val`` is None.
Returns
-------
val : `int`
Value parsed as an integer.
"""
if val is None:
val = default_value
try:
orig = val
val = int(val)
except ValueError:
raise ValueError(f"{name} must be an integer")
else:
if val != orig:
warnings.warn(f"{name} converted to integer {val} from {orig}")
if not val >= min_value:
raise ValueError(f"{name} must be >= {min_value}")
return val
def min_gap_in_seconds(df, time_col):
"""Returns the smallest gap between observations in df[time_col].
Assumes df[time_col] is sorted in ascending order without duplicates.
:param df: pd.DataFrame
input timeseries
:param time_col: str
time column name in `df`
:return: float
minimum gap between observations, in seconds
"""
if df.shape[0] < 2:
raise ValueError(f"Must provide at least two data points. Found {df.shape[0]}.")
timestamps = pd.to_datetime(df[time_col])
period = (timestamps - timestamps.shift()).min()
return period.days*24*3600 + period.seconds
COMPUTATION_N_JOBS = 1
def get_default_time_parameters(
period,
num_observations,
forecast_horizon=None,
test_horizon=None,
periods_between_train_test=None,
cv_horizon=None,
cv_min_train_periods=None,
cv_expanding_window=False,
cv_periods_between_splits=None,
cv_periods_between_train_test=None,
cv_max_splits=3):
"""Returns default forecast horizon, backtest, and cross-validation parameters,
given the input frequency, size, and user requested values.
This function is called from the `~greykite.framework.pipeline.pipeline.forecast_pipeline`
directly, to provide suitable default to users of forecast_pipeline, and because the default
should not depend on model configuration (the template).
Parameters
----------
period: `float`
Period of each observation (i.e. average time between observations, in seconds).
num_observations: `int`
Number of observations in the input data.
forecast_horizon: `int` or None, default None
Number of periods to forecast into the future. Must be > 0.
If None, default is determined from input data frequency.
test_horizon: `int` or None, default None
Numbers of periods held back from end of df for test.
The rest is used for cross validation.
If None, default is ``forecast_horizon``. Set to 0 to skip backtest.
periods_between_train_test : `int` or None, default None
Number of periods gap between train and test in a CV split.
If None, default is 0.
cv_horizon: `int` or None, default None
Number of periods in each CV test set.
If None, default is ``forecast_horizon``. Set to 0 to skip CV.
cv_min_train_periods: `int` or None, default None
Minimum number of periods for training each CV fold.
If ``cv_expanding_window`` is False, every training period is this size.
If None, default is 2 * ``cv_horizon``.
cv_expanding_window: `bool`, default False
If True, training window for each CV split is fixed to the first available date.
Otherwise, train start date is sliding, determined by ``cv_min_train_periods``.
cv_periods_between_splits: `int` or None, default None
Number of periods to slide the test window between CV splits
If None, default is ``cv_horizon``.
cv_periods_between_train_test: `int` or None, default None
Number of periods gap between train and test in a CV split.
If None, default is ``periods_between_train_test``.
cv_max_splits: `int` or None, default 3
Maximum number of CV splits. Given the above configuration, samples up to max_splits train/test splits,
preferring splits toward the end of available data. If None, uses all splits.
Returns
-------
time_params : `dict` [`str`, `int`]
keys are parameter names, values are their default values.
"""
if forecast_horizon is None:
forecast_horizon = get_default_horizon_from_period(
period=period,
num_observations=num_observations)
forecast_horizon = get_integer(val=forecast_horizon, name="forecast_horizon", min_value=1)
test_horizon = get_integer(
val=test_horizon,
name="test_horizon",
min_value=0,
default_value=forecast_horizon)
# reduces test_horizon to default 80/20 split if there is not enough data
if test_horizon >= num_observations:
test_horizon = math.floor(num_observations * 0.2)
cv_horizon = get_integer(
val=cv_horizon,
name="cv_horizon",
min_value=0,
default_value=forecast_horizon)
# RollingTimeSeriesSplit handles the case of no CV splits, not handled in detail here
# temporary patch to avoid the case where cv_horizon==num_observations, which throws an error
# in RollingTimeSeriesSplit
if cv_horizon >= num_observations:
cv_horizon = math.floor(num_observations * 0.2)
periods_between_train_test = get_integer(
val=periods_between_train_test,
name="periods_between_train_test",
min_value=0,
default_value=0)
cv_periods_between_train_test = get_integer(
val=cv_periods_between_train_test,
name="cv_periods_between_train_test",
min_value=0,
default_value=periods_between_train_test)
return {
"forecast_horizon": forecast_horizon,
"test_horizon": test_horizon,
"periods_between_train_test": periods_between_train_test,
"cv_horizon": cv_horizon,
"cv_min_train_periods": cv_min_train_periods,
"cv_periods_between_train_test": cv_periods_between_train_test
}
class SimpleSilverkiteEstimator(BaseSilverkiteEstimator):
"""Wrapper for `~greykite.algo.forecast.silverkite.forecast_simple_silverkite.forecast_simple_silverkite`.
Parameters
----------
score_func : callable, optional, default mean_squared_error
See `~greykite.sklearn.estimator.base_forecast_estimator.BaseForecastEstimator`.
coverage : `float` between [0.0, 1.0] or None, optional
See `~greykite.sklearn.estimator.base_forecast_estimator.BaseForecastEstimator`.
null_model_params : `dict` or None, optional
Dictionary with arguments to define ``DummyRegressor`` null model, default is `None`.
See `~greykite.sklearn.estimator.base_forecast_estimator.BaseForecastEstimator`.
fit_algorithm_dict : `dict` or None, optional
How to fit the model. A dictionary with the following optional keys.
``"fit_algorithm"`` : `str`, optional, default "ridge"
The type of predictive model used in fitting.
See `~greykite.algo.common.ml_models.fit_model_via_design_matrix`
for available options and their parameters.
``"fit_algorithm_params"`` : `dict` or None, optional, default None
Parameters passed to the requested fit_algorithm.
If None, uses the defaults in `~greykite.algo.common.ml_models.fit_model_via_design_matrix`.
uncertainty_dict : `dict` or `str` or None, optional
How to fit the uncertainty model.
See `~greykite.algo.forecast.silverkite.forecast_silverkite.SilverkiteForecast.forecast`.
Note that this is allowed to be "auto". If None or "auto", will be set to
a default value by ``coverage`` before calling ``forecast_silverkite``.
See ``BaseForecastEstimator`` for details.
kwargs : additional parameters
Other parameters are the same as in
`~greykite.algo.forecast.silverkite.forecast_simple_silverkite.forecast_simple_silverkite`.
See source code ``__init__`` for the parameter names, and refer to
`~greykite.algo.forecast.silverkite.forecast_simple_silverkite.forecast_simple_silverkite` for
their description.
If this Estimator is called from
:func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`,
``train_test_thresh`` and ``training_fraction`` should almost
always be `None`, because train/test is handled outside
this Estimator.
Notes
-----
Attributes match those of
`~greykite.sklearn.estimator.base_silverkite_estimator.BaseSilverkiteEstimator`.
See Also
--------
`~greykite.sklearn.estimator.base_silverkite_estimator.BaseSilverkiteEstimator`
For attributes and details on fit, predict, and component plots.
`~greykite.algo.forecast.silverkite.forecast_simple_silverkite.forecast_simple_silverkite`
Function to transform the parameters to call ``forecast_silverkite`` fit.
`~greykite.algo.forecast.silverkite.forecast_silverkite.SilverkiteForecast.forecast`
Functions performing the fit and predict.
"""
def __init__(
self,
silverkite: SimpleSilverkiteForecast = SimpleSilverkiteForecast(),
score_func: callable = mean_squared_error,
coverage: float = None,
null_model_params: Optional[Dict] = None,
time_properties: Optional[Dict] = None,
freq: Optional[str] = None,
forecast_horizon: Optional[int] = None,
origin_for_time_vars: Optional[float] = None,
train_test_thresh: Optional[datetime] = None,
training_fraction: Optional[float] = None,
fit_algorithm_dict: Optional[Dict] = None,
auto_holiday: bool = False,
holidays_to_model_separately: Optional[Union[str, List[str]]] = "auto",
holiday_lookup_countries: Optional[Union[str, List[str]]] = "auto",
holiday_pre_num_days: int = 2,
holiday_post_num_days: int = 2,
holiday_pre_post_num_dict: Optional[Dict] = None,
daily_event_df_dict: Optional[Dict] = None,
auto_holiday_params: Optional[Dict] = None,
daily_event_neighbor_impact: Optional[Union[int, List[int], callable]] = None,
daily_event_shifted_effect: Optional[List[str]] = None,
auto_growth: bool = False,
changepoints_dict: Optional[Dict] = None,
auto_seasonality: bool = False,
yearly_seasonality: Union[bool, str, int] = "auto",
quarterly_seasonality: Union[bool, str, int] = "auto",
monthly_seasonality: Union[bool, str, int] = "auto",
weekly_seasonality: Union[bool, str, int] = "auto",
daily_seasonality: Union[bool, str, int] = "auto",
max_daily_seas_interaction_order: Optional[int] = None,
max_weekly_seas_interaction_order: Optional[int] = None,
autoreg_dict: Optional[Dict] = None,
past_df: Optional[pd.DataFrame] = None,
lagged_regressor_dict: Optional[Dict] = None,
seasonality_changepoints_dict: Optional[Dict] = None,
min_admissible_value: Optional[float] = None,
max_admissible_value: Optional[float] = None,
uncertainty_dict: Optional[Dict] = None,
normalize_method: Optional[str] = None,
growth_term: Optional[str] = cst.GrowthColEnum.linear.name,
regressor_cols: Optional[List[str]] = None,
feature_sets_enabled: Optional[Union[bool, Dict[str, bool]]] = None,
extra_pred_cols: Optional[List[str]] = None,
drop_pred_cols: Optional[List[str]] = None,
explicit_pred_cols: Optional[List[str]] = None,
regression_weight_col: Optional[str] = None,
simulation_based: Optional[bool] = False,
simulation_num: int = 10,
fast_simulation: bool = False,
remove_intercept: bool = False):
# every subclass of BaseSilverkiteEstimator must call super().__init__
super().__init__(
silverkite=silverkite,
score_func=score_func,
coverage=coverage,
null_model_params=null_model_params,
uncertainty_dict=uncertainty_dict)
# necessary to set parameters, to ensure get_params() works
# (used in grid search)
self.score_func = score_func
self.coverage = coverage
self.null_model_params = null_model_params
self.time_properties = time_properties
self.freq = freq
self.forecast_horizon = forecast_horizon
self.origin_for_time_vars = origin_for_time_vars
self.train_test_thresh = train_test_thresh
self.training_fraction = training_fraction
self.fit_algorithm_dict = fit_algorithm_dict
self.auto_holiday = auto_holiday
self.holidays_to_model_separately = holidays_to_model_separately
self.holiday_lookup_countries = holiday_lookup_countries
self.holiday_pre_num_days = holiday_pre_num_days
self.holiday_post_num_days = holiday_post_num_days
self.holiday_pre_post_num_dict = holiday_pre_post_num_dict
self.daily_event_df_dict = daily_event_df_dict
self.auto_holiday_params = auto_holiday_params
self.daily_event_neighbor_impact = daily_event_neighbor_impact
self.daily_event_shifted_effect = daily_event_shifted_effect
self.auto_growth = auto_growth
self.changepoints_dict = changepoints_dict
self.auto_seasonality = auto_seasonality
self.yearly_seasonality = yearly_seasonality
self.quarterly_seasonality = quarterly_seasonality
self.monthly_seasonality = monthly_seasonality
self.weekly_seasonality = weekly_seasonality
self.daily_seasonality = daily_seasonality
self.max_daily_seas_interaction_order = max_daily_seas_interaction_order
self.max_weekly_seas_interaction_order = max_weekly_seas_interaction_order
self.autoreg_dict = autoreg_dict
self.past_df = past_df
self.lagged_regressor_dict = lagged_regressor_dict
self.seasonality_changepoints_dict = seasonality_changepoints_dict
self.min_admissible_value = min_admissible_value
self.max_admissible_value = max_admissible_value
self.uncertainty_dict = uncertainty_dict
self.normalize_method = normalize_method
self.growth_term = growth_term
self.regressor_cols = regressor_cols
self.feature_sets_enabled = feature_sets_enabled
self.extra_pred_cols = extra_pred_cols
self.drop_pred_cols = drop_pred_cols
self.explicit_pred_cols = explicit_pred_cols
self.regression_weight_col = regression_weight_col
self.simulation_based = simulation_based
self.simulation_num = simulation_num
self.fast_simulation = fast_simulation
self.remove_intercept = remove_intercept
# ``forecast_simple_silverkite`` generates a ``fs_components_df`` to call
# ``forecast_silverkite`` that is compatible with ``BaseSilverkiteEstimator``.
# Unlike ``SilverkiteEstimator``, this does not need to call ``validate_inputs``.
def fit(
self,
X,
y=None,
time_col=cst.TIME_COL,
value_col=cst.VALUE_COL,
**fit_params):
"""Fits ``Silverkite`` forecast model.
Parameters
----------
X: `pandas.DataFrame`
Input timeseries, with timestamp column,
value column, and any additional regressors.
The value column is the response, included in
``X`` to allow transformation by `sklearn.pipeline`.
y: ignored
The original timeseries values, ignored.
(The ``y`` for fitting is included in ``X``).
time_col: `str`
Time column name in ``X``.
value_col: `str`
Value column name in ``X``.
fit_params: `dict`
additional parameters for null model.
Returns
-------
self : self
Fitted model is stored in ``self.model_dict``.
"""
# Initializes `fit_algorithm_dict` with default values.
# This cannot be done in __init__ to remain compatible
# with sklearn grid search.
default_fit_algorithm_dict = {
"fit_algorithm": "ridge",
"fit_algorithm_params": None}
self.fit_algorithm_dict = update_dictionary(
default_fit_algorithm_dict,
overwrite_dict=self.fit_algorithm_dict)
# Fits null model
super().fit(
X=X,
y=y,
time_col=time_col,
value_col=value_col,
**fit_params)
# The uncertainty method has been filled as "simple_conditional_residuals" in ``super().fit`` above if
# ``coverage`` is given but ``uncertainty_dict`` is not given.
# In the case that the method is "simple_conditional_residuals",
# we use SimpleSilverkiteForecast to fit it, because under the situation of AR simulation,
# those information are needed in generating the prediction intervals.
# In all other cases, we fit the uncertainty model separately.
uncertainty_dict = None
if self.uncertainty_dict is not None:
uncertainty_method = self.uncertainty_dict.get("uncertainty_method", None)
if uncertainty_method == UncertaintyMethodEnum.simple_conditional_residuals.name:
uncertainty_dict = self.uncertainty_dict
self.model_dict = self.silverkite.forecast_simple(
df=X,
time_col=time_col,
value_col=value_col,
time_properties=self.time_properties,
freq=self.freq,
forecast_horizon=self.forecast_horizon,
origin_for_time_vars=self.origin_for_time_vars,
train_test_thresh=self.train_test_thresh,
training_fraction=self.training_fraction,
fit_algorithm=self.fit_algorithm_dict["fit_algorithm"],
fit_algorithm_params=self.fit_algorithm_dict["fit_algorithm_params"],
auto_holiday=self.auto_holiday,
holidays_to_model_separately=self.holidays_to_model_separately,
holiday_lookup_countries=self.holiday_lookup_countries,
holiday_pre_num_days=self.holiday_pre_num_days,
holiday_post_num_days=self.holiday_post_num_days,
holiday_pre_post_num_dict=self.holiday_pre_post_num_dict,
daily_event_df_dict=self.daily_event_df_dict,
auto_holiday_params=self.auto_holiday_params,
daily_event_neighbor_impact=self.daily_event_neighbor_impact,
daily_event_shifted_effect=self.daily_event_shifted_effect,
auto_growth=self.auto_growth,
changepoints_dict=self.changepoints_dict,
auto_seasonality=self.auto_seasonality,
yearly_seasonality=self.yearly_seasonality,
quarterly_seasonality=self.quarterly_seasonality,
monthly_seasonality=self.monthly_seasonality,
weekly_seasonality=self.weekly_seasonality,
daily_seasonality=self.daily_seasonality,
max_daily_seas_interaction_order=self.max_daily_seas_interaction_order,
max_weekly_seas_interaction_order=self.max_weekly_seas_interaction_order,
autoreg_dict=self.autoreg_dict,
past_df=self.past_df,
lagged_regressor_dict=self.lagged_regressor_dict,
seasonality_changepoints_dict=self.seasonality_changepoints_dict,
min_admissible_value=self.min_admissible_value,
max_admissible_value=self.max_admissible_value,
uncertainty_dict=uncertainty_dict,
normalize_method=self.normalize_method,
growth_term=self.growth_term,
regressor_cols=self.regressor_cols,
feature_sets_enabled=self.feature_sets_enabled,
extra_pred_cols=self.extra_pred_cols,
drop_pred_cols=self.drop_pred_cols,
explicit_pred_cols=self.explicit_pred_cols,
regression_weight_col=self.regression_weight_col,
simulation_based=self.simulation_based,
simulation_num=self.simulation_num,
fast_simulation=self.fast_simulation,
remove_intercept=self.remove_intercept)
# Fits the uncertainty model if not already fit.
if self.uncertainty_dict is not None and uncertainty_dict is None:
# The quantile regression model.
if uncertainty_method == UncertaintyMethodEnum.quantile_regression.name:
fit_df = self.silverkite.predict(
X,
trained_model=self.model_dict
)["fut_df"].rename(
columns={self.value_col_: cst.PREDICTED_COL}
)[[self.time_col_, cst.PREDICTED_COL]]
fit_df[self.value_col_] = X[self.value_col_].values
x_mat = self.model_dict["x_mat"]
default_params = {"is_residual_based": False}
params = self.uncertainty_dict.get("params", {})
default_params.update(params)
fit_params = {"x_mat": x_mat}
self.fit_uncertainty(
df=fit_df,
uncertainty_dict=self.uncertainty_dict,
fit_params=fit_params,
**default_params
)
# Sets attributes based on ``self.model_dict``
super().finish_fit()
return self
The provided code snippet includes necessary dependencies for implementing the `validate_pipeline_input` function. Write a Python function `def validate_pipeline_input(pipeline_function)` to solve the following problem:
Decorator that validates inputs to forecast_pipeline function and sets defaults
Here is the function:
def validate_pipeline_input(pipeline_function):
"""Decorator that validates inputs to forecast_pipeline function and sets defaults"""
@functools.wraps(pipeline_function)
def pipeline_wrapper(
# The arguments to this wrapper must be identical to forecast_pipeline() function.
# We don't use **kwargs
# because it's easier to check parameters directly.
# input
df: pd.DataFrame,
time_col=TIME_COL,
value_col=VALUE_COL,
date_format=None,
tz=None,
freq=None,
train_end_date=None,
anomaly_info=None,
# model
pipeline=None,
regressor_cols=None,
lagged_regressor_cols=None,
estimator=SimpleSilverkiteEstimator(),
hyperparameter_grid=None,
hyperparameter_budget=None,
n_jobs=COMPUTATION_N_JOBS,
verbose=1,
# forecast
forecast_horizon=None,
coverage=0.95,
test_horizon=None,
periods_between_train_test=None,
agg_periods=None,
agg_func=None,
# evaluation
score_func=EvaluationMetricEnum.MeanAbsolutePercentError.name,
score_func_greater_is_better=False,
cv_report_metrics=None,
null_model_params=None,
relative_error_tolerance=None,
# CV
cv_horizon=None,
cv_min_train_periods=None,
cv_expanding_window=False,
cv_use_most_recent_splits=False,
cv_periods_between_splits=None,
cv_periods_between_train_test=0,
cv_max_splits=3):
if coverage is not None and (coverage < 0 or coverage > 1):
raise ValueError(f"coverage must be between 0 and 1, found {coverage}")
if relative_error_tolerance is not None and relative_error_tolerance < 0:
raise ValueError(f"relative_error_tolerance must non-negative, found {relative_error_tolerance}")
# default values for forecast horizon, test, and cross-validation parameters
period = min_gap_in_seconds(df=df, time_col=time_col)
num_observations = df.shape[0]
default_time_params = get_default_time_parameters(
period=period,
num_observations=num_observations,
forecast_horizon=forecast_horizon,
test_horizon=test_horizon,
periods_between_train_test=periods_between_train_test,
cv_horizon=cv_horizon,
cv_min_train_periods=cv_min_train_periods,
cv_periods_between_train_test=cv_periods_between_train_test)
forecast_horizon = default_time_params.get("forecast_horizon")
test_horizon = default_time_params.get("test_horizon")
periods_between_train_test = default_time_params.get("periods_between_train_test")
cv_horizon = default_time_params.get("cv_horizon")
cv_min_train_periods = default_time_params.get("cv_min_train_periods")
cv_periods_between_train_test = default_time_params.get("cv_periods_between_train_test")
# ensures the values are integers in the proper domain
if hyperparameter_budget is not None:
hyperparameter_budget = get_integer(
hyperparameter_budget,
"hyperparameter_budget",
min_value=1)
if (cv_horizon == 0 or cv_max_splits == 0) and test_horizon == 0:
warnings.warn("Both CV and backtest are skipped! Make sure this is intended."
" It's important to check model performance on historical data."
" Set cv_horizon and cv_max_splits to nonzero values to enable CV."
" Set test_horizon to nonzero value to enable backtest.")
if test_horizon == 0:
warnings.warn("No data selected for test (test_horizon=0). "
"It is important to check out of sample performance")
# checks horizon against data size
if num_observations < forecast_horizon * 2:
warnings.warn(f"Not enough training data to forecast the full forecast_horizon."
" Exercise extra caution with"
f" forecasted values after {num_observations // 2} periods.")
if test_horizon > num_observations:
raise ValueError(f"test_horizon ({test_horizon}) is too large."
" Must be less than the number "
f"of input data points: {num_observations})")
if test_horizon > forecast_horizon:
warnings.warn(f"test_horizon should never be larger than forecast_horizon.")
if test_horizon > num_observations // 3:
warnings.warn(f"test_horizon should be <= than 1/3 of the data set size to allow enough data to train"
f" a backtest model. Consider reducing to {num_observations // 3}. If this is smaller"
f" than the forecast_horizon, you will need to make a trade-off between setting"
f" test_horizon=forecast_horizon and having enough data left over to properly"
f" train a realistic backtest model.")
log_message(f"forecast_horizon: {forecast_horizon}", LoggingLevelEnum.INFO)
log_message(f"test_horizon: {test_horizon}", LoggingLevelEnum.INFO)
log_message(f"cv_horizon: {cv_horizon}", LoggingLevelEnum.INFO)
return pipeline_function(
df,
time_col=time_col,
value_col=value_col,
date_format=date_format,
tz=tz,
freq=freq,
train_end_date=train_end_date,
anomaly_info=anomaly_info,
pipeline=pipeline,
regressor_cols=regressor_cols,
lagged_regressor_cols=lagged_regressor_cols,
estimator=estimator,
hyperparameter_grid=hyperparameter_grid,
hyperparameter_budget=hyperparameter_budget,
n_jobs=n_jobs,
verbose=verbose,
forecast_horizon=forecast_horizon,
coverage=coverage,
test_horizon=test_horizon,
periods_between_train_test=periods_between_train_test,
agg_periods=agg_periods,
agg_func=agg_func,
score_func=score_func,
score_func_greater_is_better=score_func_greater_is_better,
cv_report_metrics=cv_report_metrics,
null_model_params=null_model_params,
relative_error_tolerance=relative_error_tolerance,
cv_horizon=cv_horizon,
cv_min_train_periods=cv_min_train_periods,
cv_expanding_window=cv_expanding_window,
cv_use_most_recent_splits=cv_use_most_recent_splits,
cv_periods_between_splits=cv_periods_between_splits,
cv_periods_between_train_test=cv_periods_between_train_test,
cv_max_splits=cv_max_splits
)
return pipeline_wrapper | Decorator that validates inputs to forecast_pipeline function and sets defaults |
167,503 | import itertools
import os
import timeit
from pathlib import Path
from greykite.common.evaluation import EvaluationMetricEnum
from greykite.framework.templates.autogen.forecast_config import EvaluationPeriodParam
from greykite.framework.templates.autogen.forecast_config import ForecastConfig
from greykite.framework.templates.autogen.forecast_config import ModelComponentsParam
from greykite.framework.templates.forecaster import Forecaster
from greykite.framework.templates.model_templates import ModelTemplateEnum
class EvaluationPeriodParam:
"""How to split data for evaluation."""
cv_expanding_window: Optional[bool] = None
"""See :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`."""
cv_horizon: Optional[int] = None
"""See :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`."""
cv_max_splits: Optional[int] = None
"""See :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`."""
cv_min_train_periods: Optional[int] = None
"""See :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`."""
cv_periods_between_splits: Optional[int] = None
"""See :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`."""
cv_periods_between_train_test: Optional[int] = None
"""See :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`."""
cv_use_most_recent_splits: Optional[bool] = None
"""See :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`."""
periods_between_train_test: Optional[int] = None
"""See :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`"""
test_horizon: Optional[int] = None
"""See :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`."""
def from_dict(obj: Any) -> 'EvaluationPeriodParam':
assert isinstance(obj, dict)
cv_expanding_window = from_union([from_bool, from_none], obj.get("cv_expanding_window"))
cv_horizon = from_union([from_int, from_none], obj.get("cv_horizon"))
cv_max_splits = from_union([from_int, from_none], obj.get("cv_max_splits"))
cv_min_train_periods = from_union([from_int, from_none], obj.get("cv_min_train_periods"))
cv_periods_between_splits = from_union([from_int, from_none], obj.get("cv_periods_between_splits"))
cv_periods_between_train_test = from_union([from_int, from_none], obj.get("cv_periods_between_train_test"))
cv_use_most_recent_splits = from_union([from_bool, from_none], obj.get("cv_use_most_recent_splits"))
periods_between_train_test = from_union([from_int, from_none], obj.get("periods_between_train_test"))
test_horizon = from_union([from_int, from_none], obj.get("test_horizon"))
return EvaluationPeriodParam(
cv_expanding_window=cv_expanding_window,
cv_horizon=cv_horizon,
cv_max_splits=cv_max_splits,
cv_min_train_periods=cv_min_train_periods,
cv_periods_between_splits=cv_periods_between_splits,
cv_periods_between_train_test=cv_periods_between_train_test,
cv_use_most_recent_splits=cv_use_most_recent_splits,
periods_between_train_test=periods_between_train_test,
test_horizon=test_horizon)
def to_dict(self) -> dict:
result: dict = {}
result["cv_expanding_window"] = from_union([from_bool, from_none], self.cv_expanding_window)
result["cv_horizon"] = from_union([from_int, from_none], self.cv_horizon)
result["cv_max_splits"] = from_union([from_int, from_none], self.cv_max_splits)
result["cv_min_train_periods"] = from_union([from_int, from_none], self.cv_min_train_periods)
result["cv_periods_between_splits"] = from_union([from_int, from_none], self.cv_periods_between_splits)
result["cv_periods_between_train_test"] = from_union([from_int, from_none], self.cv_periods_between_train_test)
result["cv_use_most_recent_splits"] = from_union([from_bool, from_none], self.cv_use_most_recent_splits)
result["periods_between_train_test"] = from_union([from_int, from_none], self.periods_between_train_test)
result["test_horizon"] = from_union([from_int, from_none], self.test_horizon)
return result
class ModelComponentsParam:
"""Parameters to tune the model."""
autoregression: Optional[Dict[str, Any]] = None
"""For modeling autoregression, see template for details"""
changepoints: Optional[Dict[str, Any]] = None
"""For modeling changepoints, see template for details"""
custom: Optional[Dict[str, Any]] = None
"""Additional parameters used by template, see template for details"""
events: Optional[Dict[str, Any]] = None
"""For modeling events, see template for details"""
growth: Optional[Dict[str, Any]] = None
"""For modeling growth (trend), see template for details"""
hyperparameter_override: Optional[Union[Dict, List[Optional[Dict]]]] = None
"""After the above model components are used to create a hyperparameter grid,
the result is updated by this dictionary, to create new keys or override existing ones.
Allows for complete customization of the grid search.
"""
regressors: Optional[Dict[str, Any]] = None
"""For modeling regressors, see template for details"""
lagged_regressors: Optional[Dict[str, Any]] = None
"""For modeling lagged regressors, see template for details"""
seasonality: Optional[Dict[str, Any]] = None
"""For modeling seasonality, see template for details"""
uncertainty: Optional[Dict[str, Any]] = None
"""For modeling uncertainty, see template for details"""
def from_dict(obj: Any) -> 'ModelComponentsParam':
assert isinstance(obj, dict)
autoregression = from_union([lambda x: from_dict(lambda x: x, x), from_none], obj.get("autoregression"))
changepoints = from_union([lambda x: from_dict(lambda x: x, x), from_none], obj.get("changepoints"))
custom = from_union([lambda x: from_dict(lambda x: x, x), from_none], obj.get("custom"))
events = from_union([lambda x: from_dict(lambda x: x, x), from_none], obj.get("events"))
growth = from_union([lambda x: from_dict(lambda x: x, x), from_none], obj.get("growth"))
hyperparameter_override = from_union([
lambda x: from_dict(lambda x: x, x),
lambda x: from_list_dict_or_none(lambda x: x, x),
from_none], obj.get("hyperparameter_override"))
regressors = from_union([lambda x: from_dict(lambda x: x, x), from_none], obj.get("regressors"))
lagged_regressors = from_union([lambda x: from_dict(lambda x: x, x), from_none], obj.get("lagged_regressors"))
seasonality = from_union([lambda x: from_dict(lambda x: x, x), from_none], obj.get("seasonality"))
uncertainty = from_union([lambda x: from_dict(lambda x: x, x), from_none], obj.get("uncertainty"))
return ModelComponentsParam(
autoregression=autoregression,
changepoints=changepoints,
custom=custom,
events=events,
growth=growth,
hyperparameter_override=hyperparameter_override,
regressors=regressors,
lagged_regressors=lagged_regressors,
seasonality=seasonality,
uncertainty=uncertainty)
def to_dict(self) -> dict:
result: dict = {}
result["autoregression"] = from_union([lambda x: from_dict(lambda x: x, x), from_none], self.autoregression)
result["changepoints"] = from_union([lambda x: from_dict(lambda x: x, x), from_none], self.changepoints)
result["custom"] = from_union([lambda x: from_dict(lambda x: x, x), from_none], self.custom)
result["events"] = from_union([lambda x: from_dict(lambda x: x, x), from_none], self.events)
result["growth"] = from_union([lambda x: from_dict(lambda x: x, x), from_none], self.growth)
result["hyperparameter_override"] = from_union([
lambda x: from_dict(lambda x: x, x),
lambda x: from_list_dict_or_none(lambda x: x, x),
from_none], self.hyperparameter_override)
result["regressors"] = from_union([lambda x: from_dict(lambda x: x, x), from_none], self.regressors)
result["lagged_regressors"] = from_union([lambda x: from_dict(lambda x: x, x), from_none], self.lagged_regressors)
result["seasonality"] = from_union([lambda x: from_dict(lambda x: x, x), from_none], self.seasonality)
result["uncertainty"] = from_union([lambda x: from_dict(lambda x: x, x), from_none], self.uncertainty)
return result
class ForecastConfig:
"""Config for providing parameters to the Forecast library"""
computation_param: Optional[ComputationParam] = None
"""How to compute the result. See
:class:`~greykite.framework.templates.autogen.forecast_config.ComputationParam`.
"""
coverage: Optional[float] = None
"""Intended coverage of the prediction bands (0.0 to 1.0).
If None, the upper/lower predictions are not returned.
"""
evaluation_metric_param: Optional[EvaluationMetricParam] = None
"""What metrics to evaluate. See
:class:`~greykite.framework.templates.autogen.forecast_config.EvaluationMetricParam`.
"""
evaluation_period_param: Optional[EvaluationPeriodParam] = None
"""How to split data for evaluation. See
:class:`~greykite.framework.templates.autogen.forecast_config.EvaluationPeriodParam`.
"""
forecast_horizon: Optional[int] = None
"""Number of periods to forecast into the future. Must be > 0.
If None, default is determined from input data frequency.
"""
forecast_one_by_one: Optional[Union[bool, int, List[int]]] = None
"""The options to activate the forecast one-by-one algorithm.
See :class:`~greykite.sklearn.estimator.one_by_one_estimator.OneByOneEstimator`.
Can be boolean, int, of list of int.
If int, it has to be less than or equal to the forecast horizon.
If list of int, the sum has to be the forecast horizon.
"""
metadata_param: Optional[MetadataParam] = None
"""Information about the input data. See
:class:`~greykite.framework.templates.autogen.forecast_config.MetadataParam`.
"""
model_components_param: Optional[Union[ModelComponentsParam, List[Optional[ModelComponentsParam]]]] = None
"""Parameters to tune the model. Typically a single ModelComponentsParam, but the `SimpleSilverkiteTemplate`
template also allows a list of ModelComponentsParam for grid search. A single ModelComponentsParam
corresponds to one grid, and a list corresponds to a list of grids.
See :class:`~greykite.framework.templates.autogen.forecast_config.ModelComponentsParam`.
"""
model_template: Optional[Union[str, dataclass, List[Union[str, dataclass]]]] = None
"""Name of the model template. Typically a single string, but the `SimpleSilverkiteTemplate`
template also allows a list of string for grid search.
See :class:`~greykite.framework.templates.model_templates.ModelTemplateEnum`
for valid names.
"""
def from_dict(obj: Any) -> 'ForecastConfig':
assert isinstance(obj, dict)
computation_param = from_union([ComputationParam.from_dict, from_none], obj.get("computation_param"))
coverage = from_union([from_float, from_none], obj.get("coverage"))
evaluation_metric_param = from_union([EvaluationMetricParam.from_dict, from_none], obj.get("evaluation_metric_param"))
evaluation_period_param = from_union([EvaluationPeriodParam.from_dict, from_none], obj.get("evaluation_period_param"))
forecast_horizon = from_union([from_int, from_none], obj.get("forecast_horizon"))
forecast_one_by_one = from_union([from_int, from_bool, from_none, from_list_int], obj.get("forecast_one_by_one"))
metadata_param = from_union([MetadataParam.from_dict, from_none], obj.get("metadata_param"))
if not isinstance(obj.get("model_components_param"), list):
model_components_param = from_union([ModelComponentsParam.from_dict, from_none], obj.get("model_components_param"))
else:
model_components_param = [from_union([ModelComponentsParam.from_dict, from_none], mcp) for mcp in obj.get("model_components_param")]
if not isinstance(obj.get("model_template"), list):
model_template = from_union([from_str, from_none], obj.get("model_template"))
else:
model_template = [from_union([from_str, from_none], mt) for mt in obj.get("model_template")]
return ForecastConfig(
computation_param=computation_param,
coverage=coverage,
evaluation_metric_param=evaluation_metric_param,
evaluation_period_param=evaluation_period_param,
forecast_horizon=forecast_horizon,
forecast_one_by_one=forecast_one_by_one,
metadata_param=metadata_param,
model_components_param=model_components_param,
model_template=model_template)
def to_dict(self) -> dict:
result: dict = {}
result["computation_param"] = from_union([lambda x: to_class(ComputationParam, x), from_none], self.computation_param)
result["coverage"] = from_union([to_float, from_none], self.coverage)
result["evaluation_metric_param"] = from_union([lambda x: to_class(EvaluationMetricParam, x), from_none], self.evaluation_metric_param)
result["evaluation_period_param"] = from_union([lambda x: to_class(EvaluationPeriodParam, x), from_none], self.evaluation_period_param)
result["forecast_horizon"] = from_union([from_int, from_none], self.forecast_horizon)
result["forecast_one_by_one"] = from_union([from_int, from_bool, from_none, from_list_int], self.forecast_one_by_one)
result["metadata_param"] = from_union([lambda x: to_class(MetadataParam, x), from_none], self.metadata_param)
if not isinstance(self.model_components_param, list):
self.model_components_param = [self.model_components_param]
result["model_components_param"] = [from_union([lambda x: to_class(ModelComponentsParam, x), from_none], mcp) for mcp in self.model_components_param]
if not isinstance(self.model_template, list):
self.model_template = [self.model_template]
result["model_template"] = [from_union([from_str, from_none], mt) for mt in self.model_template]
return result
def from_json(obj: Any) -> 'ForecastConfig':
"""Converts a json string to the corresponding instance of the `ForecastConfig` class.
Raises ValueError if the input is not a json string.
"""
try:
forecast_dict = json.loads(obj)
except Exception:
raise ValueError(f"The input ({obj}) is not a json string.")
return ForecastConfig.from_dict(forecast_dict)
class Forecaster:
"""The main entry point to create a forecast.
Call the :py:meth:`~greykite.framework.templates.forecaster.Forecaster.run_forecast_config`
method to create a forecast. It takes a dataset and forecast configuration parameters.
Notes
-----
This class can create forecasts using any of the model templates in
`~greykite.framework.templates.model_templates.ModelTemplateEnum`.
Model templates provide suitable default values for the available
forecast estimators depending on the data characteristics.
The model template is selected via the ``config.model_template``
parameter to :py:meth:`~greykite.framework.templates.forecaster.Forecaster.run_forecast_config`.
To add your own custom algorithms or template classes in our framework,
pass ``model_template_enum`` and ``default_model_template_name``
to the constructor.
"""
def __init__(
self,
model_template_enum: Type[Enum] = ModelTemplateEnum,
default_model_template_name: str = ModelTemplateEnum.AUTO.name):
# Optional user input
self.model_template_enum: Type[Enum] = model_template_enum
"""The available template names. An Enum class where names are template names, and values are of type
`~greykite.framework.templates.model_templates.ModelTemplate`.
"""
self.default_model_template_name: str = default_model_template_name
"""The default template name if not provided by ``config.model_template``.
Should be a name in ``model_template_enum`` or "auto".
Used by :py:meth:`~greykite.framework.templates.forecaster.Forecaster.__get_template_class`.
"""
# The following are set by `self.run_forecast_config`.
self.template_class: Optional[Type[TemplateInterface]] = None
"""Template class used. Must implement
`~greykite.framework.templates.template_interface.TemplateInterface`
and be one of the classes in ``self.model_template_enum``.
Available for debugging purposes.
Set by :py:meth:`~greykite.framework.templates.forecaster.Forecaster.run_forecast_config`.
"""
self.template: Optional[TemplateInterface] = None
"""Instance of ``template_class`` used to run the forecast.
See the docstring of the specific template class used.
- `~greykite.framework.templates.simple_silverkite_template.SimpleSilverkiteTemplate`
- `~greykite.framework.templates.silverkite_template.SilverkiteTemplate`
- `~greykite.framework.templates.prophet_template.ProphetTemplate`
- etc.
Available for debugging purposes.
Set by :py:meth:`~greykite.framework.templates.forecaster.Forecaster.run_forecast_config`.
"""
self.config: Optional[ForecastConfig] = None
"""`~greykite.framework.templates.autogen.forecast_config.ForecastConfig`
passed to the template class.
Set by :py:meth:`~greykite.framework.templates.forecaster.Forecaster.run_forecast_config`.
"""
self.pipeline_params: Optional[Dict] = None
"""Parameters used to call :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`.
Available for debugging purposes.
Set by :py:meth:`~greykite.framework.templates.forecaster.Forecaster.run_forecast_config`.
"""
self.forecast_result: Optional[ForecastResult] = None
"""The forecast result, returned by :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`.
Set by :py:meth:`~greykite.framework.templates.forecaster.Forecaster.run_forecast_config`.
"""
def __get_config_with_default_model_template_and_components(self, config: Optional[ForecastConfig] = None) -> ForecastConfig:
"""Gets config with default value for `model_template` and `model_components_param` if not provided.
- model_template : default value is ``self.default_model_template_name``.
- model_components_param : default value is an empty ModelComponentsParam().
Parameters
----------
config : :class:`~greykite.framework.templates.model_templates.ForecastConfig` or None
Config object for template class to use.
See :class:`~greykite.framework.templates.model_templates.ForecastConfig`.
If None, uses an empty ForecastConfig.
Returns
-------
config : :class:`~greykite.framework.templates.model_templates.ForecastConfig`
Input ``config`` with default ``model_template`` populated.
If ``config.model_template`` is None, it is set to ``self.default_model_template_name``.
If ``config.model_components_param`` is None, it is set to ``ModelComponentsParam()``.
"""
config = deepcopy(config) if config is not None else ForecastConfig()
# Unpacks list of a single element and sets default value if None.
# NB: Does not call `apply_forecast_config_defaults`.
# Only sets `model_template` and `model_components_param`.
# The template class may have its own implementation of forecast config defaults.
forecast_config_defaults = ForecastConfigDefaults()
forecast_config_defaults.DEFAULT_MODEL_TEMPLATE = self.default_model_template_name
config.model_template = forecast_config_defaults.apply_model_template_defaults(config.model_template)
config.model_components_param = forecast_config_defaults.apply_model_components_defaults(config.model_components_param)
return config
def __get_template_class(self, config: Optional[ForecastConfig] = None) -> Type[TemplateInterface]:
"""Extracts template class (e.g. `SimpleSilverkiteTemplate`) from the config.
Parameters
----------
config : :class:`~greykite.framework.templates.model_templates.ForecastConfig` or None
Config object for template class to use.
See :class:`~greykite.framework.templates.model_templates.ForecastConfig`.
Returns
-------
template_class : Type[`~greykite.framework.templates.template_interface.TemplateInterface`]
An implementation of `~greykite.framework.templates.template_interface.TemplateInterface`.
"""
config = self.__get_config_with_default_model_template_and_components(config)
if isinstance(config.model_template, list):
# Parses `config.model_template` to extract the template class, with validation.
# Handles a list of model templates.
template_classes = [self.__get_template_class(config=ForecastConfig(model_template=mt))
for mt in config.model_template]
for tc in template_classes:
if tc != template_classes[0]:
raise ValueError("All model templates must use the same template class. "
f"Found {template_classes}")
template_class = template_classes[0]
if not template_class().allow_model_template_list:
raise ValueError(f"The template class {template_class} does not allow `model_template` to be a list. "
f"Pass a string instead.")
else:
# Handles other situations (string, data class).
try:
# Tries to look up in `self.model_template_enum`.
template_class = self.model_template_enum[config.model_template].value.template_class
except (KeyError, TypeError):
# Template is not found in the enum.
# NB: The logic in this clause is written for the default `self.model_template_enum`,
# which contains only one template class that is a subclass of SimpleSilverkiteTemplate.
# If a custom `self.model_template_enum` is provided it may be useful to override this logic.
valid_names = ", ".join(self.model_template_enum.__dict__["_member_names_"])
# Checks if template enum has a template class that supports generic naming
# i.e. a subclass of `SimpleSilverkiteTemplate`.
subclass_simple_silverkite = [mte for mte in self.model_template_enum
if issubclass(mte.value.template_class, SimpleSilverkiteTemplate)]
if len(subclass_simple_silverkite) > 0:
try:
log_message(f"Model template {config.model_template} is not found in the template enum. "
f"Checking if model template is suitable for `SimpleSilverkiteTemplate`.", LoggingLevelEnum.DEBUG)
SimpleSilverkiteTemplate().check_template_type(config.model_template)
possible_template_classes = unique_elements_in_list([mte.value.template_class
for mte in subclass_simple_silverkite])
if len(possible_template_classes) > 1:
log_message(f"Multiple template classes could be used for the model "
f"template {config.model_template}: {possible_template_classes}", LoggingLevelEnum.DEBUG)
# arbitrarily take a class that supports generic naming
template_class = subclass_simple_silverkite[0].value.template_class
log_message(f"Using template class {template_class} for the model "
f"template {config.model_template}", LoggingLevelEnum.DEBUG)
except ValueError:
raise ValueError(f"Model Template '{config.model_template}' is not recognized! Must be one of: {valid_names}"
" or satisfy the `SimpleSilverkiteTemplate` rules.")
else:
raise ValueError(f"Model Template '{config.model_template}' is not recognized! Must be one of: {valid_names}.")
# Validates `model_components_param` compatibility with the template
if not template_class().allow_model_components_param_list and isinstance(config.model_components_param, list):
raise ValueError(f"Model template {config.model_template} does not support a list of `ModelComponentsParam`.")
return template_class
def __apply_forecast_one_by_one_to_pipeline_parameters(self):
"""If forecast_one_by_one is activated,
1. replaces the estimator with ``OneByOneEstimator`` in pipeline.
2. Adds one by one estimator's parameters to ``hyperparameter_grid``.
"""
if self.config.forecast_one_by_one not in (None, False):
pipeline = get_basic_pipeline(
estimator=OneByOneEstimator(
estimator=self.template.estimator.__class__.__name__,
forecast_horizon=self.config.forecast_horizon),
score_func=self.template.score_func,
score_func_greater_is_better=self.template.score_func_greater_is_better,
agg_periods=self.template.config.evaluation_metric_param.agg_periods,
agg_func=self.template.config.evaluation_metric_param.agg_func,
relative_error_tolerance=self.template.config.evaluation_metric_param.relative_error_tolerance,
coverage=self.template.config.coverage,
null_model_params=self.template.config.evaluation_metric_param.null_model_params,
regressor_cols=self.template.regressor_cols)
self.pipeline_params["pipeline"] = pipeline
if isinstance(self.pipeline_params["hyperparameter_grid"], list):
for i in range(len(self.pipeline_params["hyperparameter_grid"])):
self.pipeline_params["hyperparameter_grid"][i]["estimator__forecast_horizon"] = [
self.config.forecast_horizon]
self.pipeline_params["hyperparameter_grid"][i]["estimator__estimator_map"] = [
self.config.forecast_one_by_one]
else:
self.pipeline_params["hyperparameter_grid"]["estimator__forecast_horizon"] = [
self.config.forecast_horizon]
self.pipeline_params["hyperparameter_grid"]["estimator__estimator_map"] = [
self.config.forecast_one_by_one]
def __get_model_template(
self,
df: pd.DataFrame,
config: ForecastConfig) -> str:
"""Gets the default model template when "auto" is given.
This is called after ``config`` has been filled with the default values
and all fields are not None.
Parameters
----------
df : `pandas.DataFrame`
Timeseries data to forecast.
Contains columns [`time_col`, `value_col`], and optional regressor columns
Regressor columns should include future values for prediction
config : :class:`~greykite.framework.templates.model_templates.ForecastConfig`
Config object for template class to use.
Must be an instance with all fields not None.
See :class:`~greykite.framework.templates.model_templates.ForecastConfig`.
Returns
-------
model_template : `str`
The corresponding model template.
"""
# Gets the model template from config.
# Model template should already be a string when this function is called,
# which is handled by `self.__get_config_with_default_model_template_and_components`.
model_template = config.model_template
# Returns the model template if it's not "auto".
if not isinstance(model_template, str) or model_template.lower() != "auto":
return model_template
# Handles the "auto" case.
# Since `get_auto_silverkite_model_template` resolves "AUTO" to
# a specific SILVERKITE template, the fallback template passed to it cannot be "AUTO".
# We use SILVERKITE if `self.default_model_template_name` is "AUTO".
default_template_for_auto = (self.default_model_template_name
if self.default_model_template_name.lower() != "auto"
else ModelTemplateEnum.SILVERKITE.name)
model_template = get_auto_silverkite_model_template(
df=df,
default_model_template_name=default_template_for_auto,
config=config
)
return model_template
def apply_forecast_config(
self,
df: pd.DataFrame,
config: Optional[ForecastConfig] = None) -> Dict:
"""Fetches pipeline parameters from the ``df`` and ``config``,
but does not run the pipeline to generate a forecast.
:py:meth:`~greykite.framework.templates.forecaster.Forecaster.run_forecast_config`
calls this function and also runs the forecast pipeline.
Available for debugging purposes to check pipeline parameters before
running a forecast. Sets these attributes for debugging:
- ``pipeline_params`` : the parameters passed to
:func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`.
- ``template_class``, ``template`` : the template class used to generate the
pipeline parameters.
- ``config`` : the :class:`~greykite.framework.templates.model_templates.ForecastConfig`
passed as input to template class, to translate into pipeline parameters.
Provides basic validation on the compatibility of ``config.model_template``
with ``config.model_components_param``.
Parameters
----------
df : `pandas.DataFrame`
Timeseries data to forecast.
Contains columns [`time_col`, `value_col`], and optional regressor columns
Regressor columns should include future values for prediction
config : :class:`~greykite.framework.templates.model_templates.ForecastConfig` or None
Config object for template class to use.
See :class:`~greykite.framework.templates.model_templates.ForecastConfig`.
Returns
-------
pipeline_params : `dict` [`str`, `any`]
Input to :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`.
"""
self.config = self.__get_config_with_default_model_template_and_components(config)
self.config.model_template = self.__get_model_template(df=df, config=self.config)
self.template_class = self.__get_template_class(self.config)
self.template = self.template_class()
self.pipeline_params = self.template.apply_template_for_pipeline_params(df=df, config=self.config)
self.__apply_forecast_one_by_one_to_pipeline_parameters()
return self.pipeline_params
def run_forecast_config(
self,
df: pd.DataFrame,
config: Optional[ForecastConfig] = None) -> ForecastResult:
"""Creates a forecast from input data and config.
The result is also stored as ``self.forecast_result``.
Parameters
----------
df : `pandas.DataFrame`
Timeseries data to forecast.
Contains columns [`time_col`, `value_col`], and optional regressor columns
Regressor columns should include future values for prediction
config : :class:`~greykite.framework.templates.model_templates.ForecastConfig`
Config object for template class to use.
See :class:`~greykite.framework.templates.model_templates.ForecastConfig`.
Returns
-------
forecast_result : :class:`~greykite.framework.pipeline.pipeline.ForecastResult`
Forecast result, an object of type
:class:`~greykite.framework.pipeline.pipeline.ForecastResult`.
The output of :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`,
according to the ``df`` and ``config`` configuration parameters.
"""
pipeline_parameters = self.apply_forecast_config(
df=df,
config=config)
self.forecast_result = forecast_pipeline(**pipeline_parameters)
return self.forecast_result
def run_forecast_json(
self,
df: pd.DataFrame,
json_str: str = "{}") -> ForecastResult:
"""Calls :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`
according to the ``json_str`` configuration parameters.
Parameters
----------
df : `pandas.DataFrame`
Timeseries data to forecast.
Contains columns [`time_col`, `value_col`], and optional regressor columns
Regressor columns should include future values for prediction
json_str : `str`
Json string of the config object for Forecast to use.
See :class:`~greykite.framework.templates.model_templates.ForecastConfig`.
Returns
-------
forecast_result : :class:`~greykite.framework.pipeline.pipeline.ForecastResult`
Forecast result.
The output of :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`,
called using the template class with specified configuration.
See :class:`~greykite.framework.pipeline.pipeline.ForecastResult`
for details.
"""
config_dict = json.loads(json_str)
config = forecast_config_from_dict(config_dict)
self.run_forecast_config(
df=df,
config=config)
return self.forecast_result
def dump_forecast_result(
self,
destination_dir,
object_name="object",
dump_design_info=True,
overwrite_exist_dir=False):
"""Dumps ``self.forecast_result`` to local pickle files.
Parameters
----------
destination_dir : `str`
The pickle destination directory.
object_name : `str`
The stored file name.
dump_design_info : `bool`, default True
Whether to dump design info.
Design info is a patsy class that includes the design matrix information.
It takes longer to dump design info.
overwrite_exist_dir : `bool`, default False
What to do when ``destination_dir`` already exists.
Removes the original directory when exists, if set to True.
Returns
-------
This function writes to local files and does not return anything.
"""
if self.forecast_result is None:
raise ValueError("self.forecast_result is None, nothing to dump.")
dump_obj(
obj=self.forecast_result,
dir_name=destination_dir,
obj_name=object_name,
dump_design_info=dump_design_info,
overwrite_exist_dir=overwrite_exist_dir
)
def load_forecast_result(
self,
source_dir,
load_design_info=True):
"""Loads ``self.forecast_result`` from local files created by ``self.dump_result``.
Parameters
----------
source_dir : `str`
The source file directory.
load_design_info : `bool`, default True
Whether to load design info.
Design info is a patsy class that includes the design matrix information.
It takes longer to load design info.
"""
if self.forecast_result is not None:
raise ValueError("self.forecast_result is not None, please create a new instance.")
self.forecast_result = load_obj(
dir_name=source_dir,
obj=None,
load_design_info=load_design_info
)
class ModelTemplateEnum(Enum):
"""Available model templates.
Enumerates the possible values for the ``model_template`` attribute of
:class:`~greykite.framework.templates.model_templates.ForecastConfig`.
The value has type `~greykite.framework.templates.model_templates.ModelTemplate` which contains:
- the template class that recognizes the model_template. Template classes implement the
`~greykite.framework.templates.template_interface.TemplateInterface` interface.
- a plain-text description of what the model_template is for,
The description should be unique across enum members. The template class
can be shared, because a template class can recognize multiple model templates.
For example, the same template class may use different default values for
``ForecastConfig.model_components_param`` depending on ``ForecastConfig.model_template``.
Notes
-----
The template classes
`~greykite.framework.templates.silverkite_template.SilverkiteTemplate`
and `~greykite.framework.templates.prophet_template.ProphetTemplate`
recognize only the model templates explicitly enumerated here.
However, the `~greykite.framework.templates.simple_silverkite_template.SimpleSilverkiteTemplate`
template class allows additional model templates to be specified generically.
Any object of type `~greykite.framework.templates.simple_silverkite_template_config.SimpleSilverkiteTemplateOptions`
can be used as the model_template.
These generic model templates are valid but not enumerated here.
"""
SILVERKITE = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model with automatic growth, seasonality, holidays, "
"automatic autoregression, normalization "
"and interactions. Best for hourly and daily frequencies."
"Uses `SimpleSilverkiteEstimator`.")
"""Silverkite model with automatic growth, seasonality, holidays,
automatic autoregression, normalization
and interactions. Best for hourly and daily frequencies.
Uses `SimpleSilverkiteEstimator`.
"""
SILVERKITE_DAILY_1_CONFIG_1 = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Config 1 in template ``SILVERKITE_DAILY_1``. "
"Compared to ``SILVERKITE``, it uses parameters "
"specifically tuned for daily data and 1-day forecast.")
"""Config 1 in template ``SILVERKITE_DAILY_1``.
Compared to ``SILVERKITE``, it uses parameters
specifically tuned for daily data and 1-day forecast.
"""
SILVERKITE_DAILY_1_CONFIG_2 = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Config 2 in template ``SILVERKITE_DAILY_1``. "
"Compared to ``SILVERKITE``, it uses parameters "
"specifically tuned for daily data and 1-day forecast.")
"""Config 2 in template ``SILVERKITE_DAILY_1``.
Compared to ``SILVERKITE``, it uses parameters
specifically tuned for daily data and 1-day forecast.
"""
SILVERKITE_DAILY_1_CONFIG_3 = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Config 3 in template ``SILVERKITE_DAILY_1``. "
"Compared to ``SILVERKITE``, it uses parameters "
"specifically tuned for daily data and 1-day forecast.")
"""Config 3 in template ``SILVERKITE_DAILY_1``.
Compared to ``SILVERKITE``, it uses parameters
specifically tuned for daily data and 1-day forecast.
"""
SILVERKITE_DAILY_1 = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model specifically tuned for daily data and 1-day forecast. "
"Contains 3 candidate configs for grid search, "
"optimized the seasonality and changepoint parameters.")
"""Silverkite model specifically tuned for daily data and 1-day forecast.
Contains 3 candidate configs for grid search,
optimized the seasonality and changepoint parameters.
"""
SILVERKITE_DAILY_90 = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model specifically tuned for daily data with 90 days forecast horizon. "
"Contains 4 hyperparameter combinations for grid search. "
"Uses `SimpleSilverkiteEstimator`.")
"""Silverkite model specifically tuned for daily data with 90 days forecast horizon.
Contains 4 hyperparameter combinations for grid search.
Uses `SimpleSilverkiteEstimator`.
"""
SILVERKITE_WEEKLY = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model specifically tuned for weekly data. "
"Contains 4 hyperparameter combinations for grid search. "
"Uses `SimpleSilverkiteEstimator`.")
"""Silverkite model specifically tuned for weekly data.
Contains 4 hyperparameter combinations for grid search.
Uses `SimpleSilverkiteEstimator`.
"""
SILVERKITE_MONTHLY = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model specifically tuned for monthly data. "
"Uses `SimpleSilverkiteEstimator`.")
"""Silverkite model specifically tuned for monthly data.
Uses `SimpleSilverkiteEstimator`.
"""
SILVERKITE_HOURLY_1 = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model specifically tuned for hourly data with 1 hour forecast horizon. "
"Contains 3 hyperparameter combinations for grid search. "
"Uses `SimpleSilverkiteEstimator`.")
"""Silverkite model specifically tuned for hourly data with 1 hour forecast horizon.
Contains 4 hyperparameter combinations for grid search.
Uses `SimpleSilverkiteEstimator`."""
SILVERKITE_HOURLY_24 = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model specifically tuned for hourly data with 24 hours (1 day) forecast horizon. "
"Contains 4 hyperparameter combinations for grid search. "
"Uses `SimpleSilverkiteEstimator`.")
"""Silverkite model specifically tuned for hourly data with 24 hours (1 day) forecast horizon.
Contains 4 hyperparameter combinations for grid search.
Uses `SimpleSilverkiteEstimator`."""
SILVERKITE_HOURLY_168 = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model specifically tuned for hourly data with 168 hours (1 week) forecast horizon. "
"Contains 4 hyperparameter combinations for grid search. "
"Uses `SimpleSilverkiteEstimator`.")
"""Silverkite model specifically tuned for hourly data with 168 hours (1 week) forecast horizon.
Contains 4 hyperparameter combinations for grid search.
Uses `SimpleSilverkiteEstimator`."""
SILVERKITE_HOURLY_336 = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model specifically tuned for hourly data with 336 hours (2 weeks) forecast horizon. "
"Contains 4 hyperparameter combinations for grid search. "
"Uses `SimpleSilverkiteEstimator`.")
"""Silverkite model specifically tuned for hourly data with 336 hours (2 weeks) forecast horizon.
Contains 4 hyperparameter combinations for grid search.
Uses `SimpleSilverkiteEstimator`.
"""
SILVERKITE_EMPTY = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model with no component included by default. Fits only a constant intercept. "
"Select and customize this template to add only the terms you want. "
"Uses `SimpleSilverkiteEstimator`.")
"""Silverkite model with no component included by default. Fits only a constant intercept.
Select and customize this template to add only the terms you want.
Uses `SimpleSilverkiteEstimator`.
"""
SK = ModelTemplate(
template_class=SilverkiteTemplate,
description="Silverkite model with low-level interface. For flexible model tuning "
"if SILVERKITE template is not flexible enough. Not for use out-of-the-box: "
"customization is needed for good performance. Uses `SilverkiteEstimator`.")
"""Silverkite model with low-level interface. For flexible model tuning
if SILVERKITE template is not flexible enough. Not for use out-of-the-box:
customization is needed for good performance. Uses `SilverkiteEstimator`.
"""
PROPHET = ModelTemplate(
template_class=ProphetTemplate,
description="Prophet model with growth, seasonality, holidays, additional regressors "
"and prediction intervals. Uses `ProphetEstimator`.")
"""Prophet model with growth, seasonality, holidays, additional regressors
and prediction intervals. Uses `ProphetEstimator`."""
AUTO_ARIMA = ModelTemplate(
template_class=AutoArimaTemplate,
description="Auto ARIMA model with fit and prediction intervals. "
"Uses `AutoArimaEstimator`.")
"""ARIMA model with automatic order selection. Uses `AutoArimaEstimator`."""
SILVERKITE_TWO_STAGE = ModelTemplate(
template_class=MultistageForecastTemplate,
description="MultistageForecastTemplate's default model template. A two-stage model. "
"The first step takes a longer history and learns the long-term effects, "
"while the second step takes a shorter history and learns the short-term residuals."
)
"""Multistage forecast model's default model template. A two-stage model. "
"The first step takes a longer history and learns the long-term effects, "
"while the second step takes a shorter history and learns the short-term residuals.
"""
MULTISTAGE_EMPTY = ModelTemplate(
template_class=MultistageForecastTemplate,
description="Empty configuration for Multistage Forecast. "
"All parameters will be exactly what user inputs. "
"Not to be used without overriding."""
)
"""Empty configuration for Multistage Forecast.
All parameters will be exactly what user inputs.
Not to be used without overriding.
"""
AUTO = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Automatically selects the SimpleSilverkite model template that corresponds to the forecast "
"problem. Selection is based on data frequency, forecast horizon, and CV configuration."
)
"""Automatically selects the SimpleSilverkite model template that corresponds to the forecast problem.
Selection is based on data frequency, forecast horizon, and CV configuration.
"""
LAG_BASED = ModelTemplate(
template_class=LagBasedTemplate,
description="Uses aggregated past observations as predictions. Examples are "
"past day, week-over-week, week-over-3-week median, etc."
)
"""Uses aggregated past observations as predictions. Examples are
past day, week-over-week, week-over-3-week median, etc.
"""
SILVERKITE_WOW = ModelTemplate(
template_class=MultistageForecastTemplate,
description="The Silverkite+WOW model uses Silverkite to model yearly/quarterly/monthly seasonality, "
"growth and holiday effects first, then uses week over week to estimate the residuals. "
"The final prediction is the total of the two models. "
"This avoids the normal week over week (WOW) estimation's weakness in capturing "
"growth and holidays."
)
"""The Silverkite+WOW model uses Silverkite to model yearly/quarterly/monthly seasonality,
growth and holiday effects first, then uses week over week to estimate the residuals.
The final prediction is the total of the two models.
This avoids the normal week over week (WOW) estimation's weakness in capturing growth and holidays.
"""
The provided code snippet includes necessary dependencies for implementing the `benchmark_silverkite_template` function. Write a Python function `def benchmark_silverkite_template( data_name, df, forecast_horizons, fit_algorithms, max_cvs, metadata=None, evaluation_metric=None)` to solve the following problem:
Benchmarks silverkite template and returns the output as a list :param data_name: str Name of the dataset we are performing benchmarking on For real datasets, the data_name matches the corresponding filename in the data/ folder For simulated datasets, we follow the convention "<freq>_simulated" e.g. "daily_simulated" :param df: pd.DataFrame Dataframe containing the time and value columns :param forecast_horizons: List[int] One forecast is created for every given forecast_horizon :param fit_algorithms: List[str] Names of predictive models to fit. Options are "linear", "lasso", "ridge", "rf" etc. :param max_cvs: List[int] or None Number of maximum CV folds to use. :param metadata: :class:`~greykite.framework.templates.autogen.forecast_config.MetadataParam` or None, default None Information about the input data. See :class:`~greykite.framework.templates.autogen.forecast_config.MetadataParam`. :param evaluation_metric: :class:`~greykite.framework.templates.autogen.forecast_config.EvaluationMetricParam` or None, default None What metrics to evaluate. See :class:`~greykite.framework.templates.autogen.forecast_config.EvaluationMetricParam`. :return: .csv file Each row of the .csv file records the following outputs from one run of the silverkite template: - "data_name": Fixed string "<freq>_simulated", or name of the dataset in data/ folder - "forecast_model_name": "silverkite_<fit_algorithm>" e.g. "silverkite_linear" or "prophet" - "train_period": train_period - "forecast_horizon": forecast_horizon - "fit_algorithm": fit algorithm name - "cv_folds": max_cv - "runtime_sec": runtime in seconds - "train_mae": Mean Absolute Error of training data in backtest - "train_mape": Mean Absolute Percent Error of training data in backtest - "test_mae": Mean Absolute Error of testing data in backtest - "test_mape": Mean Absolute Percent Error of testing data in backtest
Here is the function:
def benchmark_silverkite_template(
data_name,
df,
forecast_horizons,
fit_algorithms,
max_cvs,
metadata=None,
evaluation_metric=None):
"""Benchmarks silverkite template and returns the output as a list
:param data_name: str
Name of the dataset we are performing benchmarking on
For real datasets, the data_name matches the corresponding filename in the data/ folder
For simulated datasets, we follow the convention "<freq>_simulated" e.g. "daily_simulated"
:param df: pd.DataFrame
Dataframe containing the time and value columns
:param forecast_horizons: List[int]
One forecast is created for every given forecast_horizon
:param fit_algorithms: List[str]
Names of predictive models to fit.
Options are "linear", "lasso", "ridge", "rf" etc.
:param max_cvs: List[int] or None
Number of maximum CV folds to use.
:param metadata: :class:`~greykite.framework.templates.autogen.forecast_config.MetadataParam` or None, default None
Information about the input data. See
:class:`~greykite.framework.templates.autogen.forecast_config.MetadataParam`.
:param evaluation_metric: :class:`~greykite.framework.templates.autogen.forecast_config.EvaluationMetricParam` or None, default None
What metrics to evaluate. See
:class:`~greykite.framework.templates.autogen.forecast_config.EvaluationMetricParam`.
:return: .csv file
Each row of the .csv file records the following outputs from one run of the silverkite template:
- "data_name": Fixed string "<freq>_simulated", or name of the dataset in data/ folder
- "forecast_model_name": "silverkite_<fit_algorithm>" e.g. "silverkite_linear" or "prophet"
- "train_period": train_period
- "forecast_horizon": forecast_horizon
- "fit_algorithm": fit algorithm name
- "cv_folds": max_cv
- "runtime_sec": runtime in seconds
- "train_mae": Mean Absolute Error of training data in backtest
- "train_mape": Mean Absolute Percent Error of training data in backtest
- "test_mae": Mean Absolute Error of testing data in backtest
- "test_mape": Mean Absolute Percent Error of testing data in backtest
"""
benchmark_results = []
for forecast_horizon, fit_algorithm, max_cv in itertools.product(forecast_horizons, fit_algorithms, max_cvs):
model_components = ModelComponentsParam(
custom={
"fit_algorithm_dict": {
"fit_algorithm": fit_algorithm,
},
"feature_sets_enabled": True
}
)
evaluation_period = EvaluationPeriodParam(
cv_max_splits=max_cv
)
start_time = timeit.default_timer()
forecaster = Forecaster()
result = forecaster.run_forecast_config(
df=df,
config=ForecastConfig(
model_template=ModelTemplateEnum.SILVERKITE.name,
forecast_horizon=forecast_horizon,
metadata_param=metadata,
evaluation_metric_param=evaluation_metric,
model_components_param=model_components,
evaluation_period_param=evaluation_period,
)
)
runtime = timeit.default_timer() - start_time
output_dict = dict(
data_name=data_name,
forecast_model_name=f"silverkite_{fit_algorithm}",
train_period=df.shape[0],
forecast_horizon=forecast_horizon,
cv_folds=result.grid_search.n_splits_,
runtime_sec=round(runtime, 3),
train_mae=result.backtest.train_evaluation["MAE"].round(3),
train_mape=result.backtest.train_evaluation["MAPE"].round(3),
test_mae=result.backtest.test_evaluation["MAE"].round(3),
test_mape=result.backtest.test_evaluation["MAPE"].round(3)
)
benchmark_results.append(output_dict)
return benchmark_results | Benchmarks silverkite template and returns the output as a list :param data_name: str Name of the dataset we are performing benchmarking on For real datasets, the data_name matches the corresponding filename in the data/ folder For simulated datasets, we follow the convention "<freq>_simulated" e.g. "daily_simulated" :param df: pd.DataFrame Dataframe containing the time and value columns :param forecast_horizons: List[int] One forecast is created for every given forecast_horizon :param fit_algorithms: List[str] Names of predictive models to fit. Options are "linear", "lasso", "ridge", "rf" etc. :param max_cvs: List[int] or None Number of maximum CV folds to use. :param metadata: :class:`~greykite.framework.templates.autogen.forecast_config.MetadataParam` or None, default None Information about the input data. See :class:`~greykite.framework.templates.autogen.forecast_config.MetadataParam`. :param evaluation_metric: :class:`~greykite.framework.templates.autogen.forecast_config.EvaluationMetricParam` or None, default None What metrics to evaluate. See :class:`~greykite.framework.templates.autogen.forecast_config.EvaluationMetricParam`. :return: .csv file Each row of the .csv file records the following outputs from one run of the silverkite template: - "data_name": Fixed string "<freq>_simulated", or name of the dataset in data/ folder - "forecast_model_name": "silverkite_<fit_algorithm>" e.g. "silverkite_linear" or "prophet" - "train_period": train_period - "forecast_horizon": forecast_horizon - "fit_algorithm": fit algorithm name - "cv_folds": max_cv - "runtime_sec": runtime in seconds - "train_mae": Mean Absolute Error of training data in backtest - "train_mape": Mean Absolute Percent Error of training data in backtest - "test_mae": Mean Absolute Error of testing data in backtest - "test_mape": Mean Absolute Percent Error of testing data in backtest |
167,504 | import itertools
import os
import timeit
from pathlib import Path
from greykite.common.evaluation import EvaluationMetricEnum
from greykite.framework.templates.autogen.forecast_config import EvaluationPeriodParam
from greykite.framework.templates.autogen.forecast_config import ForecastConfig
from greykite.framework.templates.autogen.forecast_config import ModelComponentsParam
from greykite.framework.templates.forecaster import Forecaster
from greykite.framework.templates.model_templates import ModelTemplateEnum
The provided code snippet includes necessary dependencies for implementing the `get_default_benchmark_real_datasets` function. Write a Python function `def get_default_benchmark_real_datasets()` to solve the following problem:
Default parameter sets to framework.benchmark real datasets. The datasets are located in data folder. Every tuple has the following structure: (data_name, frequency, time_col, value_col, forecast_horizon)
Here is the function:
def get_default_benchmark_real_datasets():
"""Default parameter sets to framework.benchmark real datasets. The datasets are located in data folder.
Every tuple has the following structure:
(data_name, frequency, time_col, value_col, forecast_horizon)"""
real_datasets = [
# daily_peyton_manning, 8 years of data
("daily_peyton_manning", "D", "ts", "y", [30, 365]),
# daily_female_births, 1 year of data
("daily_female_births", "D", "Date", "Births", [30, 3*30])
]
return real_datasets | Default parameter sets to framework.benchmark real datasets. The datasets are located in data folder. Every tuple has the following structure: (data_name, frequency, time_col, value_col, forecast_horizon) |
167,505 | import itertools
import os
import timeit
from pathlib import Path
from greykite.common.evaluation import EvaluationMetricEnum
from greykite.framework.templates.autogen.forecast_config import EvaluationPeriodParam
from greykite.framework.templates.autogen.forecast_config import ForecastConfig
from greykite.framework.templates.autogen.forecast_config import ModelComponentsParam
from greykite.framework.templates.forecaster import Forecaster
from greykite.framework.templates.model_templates import ModelTemplateEnum
The provided code snippet includes necessary dependencies for implementing the `get_default_benchmark_silverkite_parameters` function. Write a Python function `def get_default_benchmark_silverkite_parameters()` to solve the following problem:
Default parameter sets for benchmarking silverkite template
Here is the function:
def get_default_benchmark_silverkite_parameters():
"""Default parameter sets for benchmarking silverkite template"""
return dict(
fit_algorithms=("linear", "lasso", "ridge", "rf", "sgd"),
max_cvs=[3]
) | Default parameter sets for benchmarking silverkite template |
167,506 | import itertools
import os
import timeit
from pathlib import Path
from greykite.common.evaluation import EvaluationMetricEnum
from greykite.framework.templates.autogen.forecast_config import EvaluationPeriodParam
from greykite.framework.templates.autogen.forecast_config import ForecastConfig
from greykite.framework.templates.autogen.forecast_config import ModelComponentsParam
from greykite.framework.templates.forecaster import Forecaster
from greykite.framework.templates.model_templates import ModelTemplateEnum
class EvaluationMetricEnum(Enum):
"""Valid evaluation metrics.
The values tuple is ``(score_func: callable, greater_is_better: boolean, short_name: str)``
``add_finite_filter_to_scorer`` is added to the metrics that are directly imported from
``sklearn.metrics`` (e.g. ``mean_squared_error``) to ensure that the metric gets calculated
even when inputs have missing values.
"""
Correlation = (correlation, True, "CORR")
"""Pearson correlation coefficient between forecast and actuals. Higher is better."""
CoefficientOfDetermination = (add_finite_filter_to_scorer(r2_score), True, "R2")
"""Coefficient of determination. See `sklearn.metrics.r2_score`. Higher is better. Equals `1.0 - mean_squared_error / variance(actuals)`."""
MeanSquaredError = (add_finite_filter_to_scorer(mean_squared_error), False, "MSE")
"""Mean squared error, the average of squared differences,
see `sklearn.metrics.mean_squared_error`."""
RootMeanSquaredError = (root_mean_squared_error, False, "RMSE")
"""Root mean squared error, the square root of `sklearn.metrics.mean_squared_error`"""
MeanAbsoluteError = (add_finite_filter_to_scorer(mean_absolute_error), False, "MAE")
"""Mean absolute error, average of absolute differences,
see `sklearn.metrics.mean_absolute_error`."""
MedianAbsoluteError = (add_finite_filter_to_scorer(median_absolute_error), False, "MedAE")
"""Median absolute error, median of absolute differences,
see `sklearn.metrics.median_absolute_error`."""
MeanAbsolutePercentError = (mean_absolute_percent_error, False, "MAPE")
"""Mean absolute percent error, error relative to actuals expressed as a %,
see `wikipedia MAPE <https://en.wikipedia.org/wiki/Mean_absolute_percentage_error>`_."""
MedianAbsolutePercentError = (median_absolute_percent_error, False, "MedAPE")
"""Median absolute percent error, median of error relative to actuals expressed as a %,
a median version of the MeanAbsolutePercentError, less affected by extreme values."""
SymmetricMeanAbsolutePercentError = (symmetric_mean_absolute_percent_error, False, "sMAPE")
"""Symmetric mean absolute percent error, error relative to (actuals+forecast) expressed as a %.
Note that we do not include a factor of 2 in the denominator, so the range is 0% to 100%,
see `wikipedia sMAPE <https://en.wikipedia.org/wiki/Symmetric_mean_absolute_percentage_error>`_."""
Quantile80 = (quantile_loss_q(0.80), False, "Q80")
"""Quantile loss with q=0.80::
np.where(y_true < y_pred, (1 - q) * (y_pred - y_true), q * (y_true - y_pred)).mean()
"""
Quantile95 = (quantile_loss_q(0.95), False, "Q95")
"""Quantile loss with q=0.95::
np.where(y_true < y_pred, (1 - q) * (y_pred - y_true), q * (y_true - y_pred)).mean()
"""
Quantile99 = (quantile_loss_q(0.99), False, "Q99")
"""Quantile loss with q=0.99::
np.where(y_true < y_pred, (1 - q) * (y_pred - y_true), q * (y_true - y_pred)).mean()
"""
FractionOutsideTolerance1 = (partial(fraction_outside_tolerance, rtol=0.01), False, "OutsideTolerance1p")
"""Fraction of forecasted values that deviate more than 1% from the actual"""
FractionOutsideTolerance2 = (partial(fraction_outside_tolerance, rtol=0.02), False, "OutsideTolerance2p")
"""Fraction of forecasted values that deviate more than 2% from the actual"""
FractionOutsideTolerance3 = (partial(fraction_outside_tolerance, rtol=0.03), False, "OutsideTolerance3p")
"""Fraction of forecasted values that deviate more than 3% from the actual"""
FractionOutsideTolerance4 = (partial(fraction_outside_tolerance, rtol=0.04), False, "OutsideTolerance4p")
"""Fraction of forecasted values that deviate more than 4% from the actual"""
FractionOutsideTolerance5 = (partial(fraction_outside_tolerance, rtol=0.05), False, "OutsideTolerance5p")
"""Fraction of forecasted values that deviate more than 5% from the actual"""
def get_metric_func(self):
"""Returns the metric function."""
return self.value[0]
def get_metric_greater_is_better(self):
"""Returns the greater_is_better boolean."""
return self.value[1]
def get_metric_name(self):
"""Returns the short name."""
return self.value[2]
The provided code snippet includes necessary dependencies for implementing the `get_default_benchmark_parameters` function. Write a Python function `def get_default_benchmark_parameters()` to solve the following problem:
Default parameter sets for benchmarking
Here is the function:
def get_default_benchmark_parameters():
"""Default parameter sets for benchmarking"""
directory = Path(__file__).parents[4] # src/ root
directory = os.path.abspath(directory)
data_directory = os.path.join(directory, "benchmark_output")
return dict(
metric=EvaluationMetricEnum.MeanSquaredError,
data_directory=data_directory
) | Default parameter sets for benchmarking |
167,507 | import itertools
import os
import timeit
from pathlib import Path
from greykite.common.evaluation import EvaluationMetricEnum
from greykite.framework.templates.autogen.forecast_config import EvaluationPeriodParam
from greykite.framework.templates.autogen.forecast_config import ForecastConfig
from greykite.framework.templates.autogen.forecast_config import ModelComponentsParam
from greykite.framework.templates.forecaster import Forecaster
from greykite.framework.templates.model_templates import ModelTemplateEnum
The provided code snippet includes necessary dependencies for implementing the `get_default_benchmark_simulated_datasets` function. Write a Python function `def get_default_benchmark_simulated_datasets()` to solve the following problem:
Default parameter sets to generate simulated data for benchmarking. The training periods and forecast horizon are chosen to complement default real datasets. Every tuple has the following structure: (data_name, frequency, training_periods, forecast_horizon)
Here is the function:
def get_default_benchmark_simulated_datasets():
"""Default parameter sets to generate simulated data for benchmarking.
The training periods and forecast horizon are chosen to complement default real datasets.
Every tuple has the following structure:
(data_name, frequency, training_periods, forecast_horizon)"""
simulation_parameters = [
# daily data
("daily_simulated", "D", 3*30, [30]),
("daily_simulated", "D", 2*365, [365]),
# hourly data
("hourly_simulated", "H", 7*24, [24]),
("hourly_simulated", "H", 30*24, [7*24]),
("hourly_simulated", "H", 365*24, [6*30*24]),
("hourly_simulated", "H", 4*365*24, [365*24])
]
return simulation_parameters | Default parameter sets to generate simulated data for benchmarking. The training periods and forecast horizon are chosen to complement default real datasets. Every tuple has the following structure: (data_name, frequency, training_periods, forecast_horizon) |
167,508 | import timeit
from typing import Dict
import pandas as pd
from tqdm.autonotebook import tqdm
from greykite.common.constants import TIME_COL
from greykite.common.logging import LoggingLevelEnum
from greykite.common.logging import log_message
from greykite.framework.pipeline.pipeline import forecast_pipeline
from greykite.sklearn.cross_validation import RollingTimeSeriesSplit
TIME_COL = "ts"
class LoggingLevelEnum(Enum):
"""Valid types of logging levels available to use."""
CRITICAL = 50
ERROR = 40
WARNING = 30
INFO = 20
DEBUG = 10
NOTSET = 0
def log_message(message, level=LoggingLevelEnum.INFO):
"""Adds a message to logger.
Parameters
----------
message : `any`
The message to be added to logger.
level : `Enum`
One of the levels in the `~greykite.common.enums.LoggingLevelEnum`.
"""
if level.name not in list(LoggingLevelEnum.__members__):
raise ValueError(f"{level} not found, it must be a member of the LoggingLevelEnum class.")
logger.log(level.value, message)
def forecast_pipeline(
# input
df: pd.DataFrame,
time_col=TIME_COL,
value_col=VALUE_COL,
date_format=None,
tz=None,
freq=None,
train_end_date=None,
anomaly_info=None,
# model
pipeline=None,
regressor_cols=None,
lagged_regressor_cols=None,
estimator=SimpleSilverkiteEstimator(),
hyperparameter_grid=None,
hyperparameter_budget=None,
n_jobs=COMPUTATION_N_JOBS,
verbose=1,
# forecast
forecast_horizon=None,
coverage=0.95,
test_horizon=None,
periods_between_train_test=None,
agg_periods=None,
agg_func=None,
# evaluation
score_func=EvaluationMetricEnum.MeanAbsolutePercentError.name,
score_func_greater_is_better=False,
cv_report_metrics=CV_REPORT_METRICS_ALL,
null_model_params=None,
relative_error_tolerance=None,
# CV
cv_horizon=None,
cv_min_train_periods=None,
cv_expanding_window=False,
cv_use_most_recent_splits=False,
cv_periods_between_splits=None,
cv_periods_between_train_test=None,
cv_max_splits=3):
"""Computation pipeline for end-to-end forecasting.
Trains a forecast model end-to-end:
1. checks input data
2. runs cross-validation to select optimal hyperparameters e.g. best model
3. evaluates best model on test set
4. provides forecast of best model (re-trained on all data) into the future
Returns forecasts with methods to plot and see diagnostics.
Also returns the fitted pipeline and CV results.
Provides a high degree of customization over training and evaluation parameters:
1. model
2. cross validation
3. evaluation
4. forecast horizon
See test cases for examples.
Parameters
----------
df : `pandas.DataFrame`
Timeseries data to forecast.
Contains columns [`time_col`, `value_col`], and optional regressor columns
Regressor columns should include future values for prediction
time_col : `str`, default TIME_COL in constants.py
name of timestamp column in df
value_col : `str`, default VALUE_COL in constants.py
name of value column in df (the values to forecast)
date_format : `str` or None, default None
strftime format to parse time column, eg ``%m/%d/%Y``.
Note that ``%f`` will parse all the way up to nanoseconds.
If None (recommended), inferred by `pandas.to_datetime`.
tz : `str` or None, default None
Passed to `pandas.tz_localize` to localize the timestamp
freq : `str` or None, default None
Frequency of input data. Used to generate future dates for prediction.
Frequency strings can have multiples, e.g. '5H'.
See https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases
for a list of frequency aliases.
If None, inferred by `pandas.infer_freq`.
Provide this parameter if ``df`` has missing timepoints.
train_end_date : `datetime.datetime`, optional, default None
Last date to use for fitting the model. Forecasts are generated after this date.
If None, it is set to the last date with a non-null value in
``value_col`` of ``df``.
anomaly_info : `dict` or `list` [`dict`] or None, default None
Anomaly adjustment info. Anomalies in ``df``
are corrected before any forecasting is done.
If None, no adjustments are made.
A dictionary containing the parameters to
`~greykite.common.features.adjust_anomalous_data.adjust_anomalous_data`.
See that function for details.
The possible keys are:
``"value_col"`` : `str`
The name of the column in ``df`` to adjust. You may adjust the value
to forecast as well as any numeric regressors.
``"anomaly_df"`` : `pandas.DataFrame`
Adjustments to correct the anomalies.
``"start_time_col"``: `str`, default START_TIME_COL
Start date column in ``anomaly_df``.
``"end_time_col"``: `str`, default END_TIME_COL
End date column in ``anomaly_df``.
``"adjustment_delta_col"``: `str` or None, default None
Impact column in ``anomaly_df``.
``"filter_by_dict"``: `dict` or None, default None
Used to filter ``anomaly_df`` to the relevant anomalies for
the ``value_col`` in this dictionary.
Key specifies the column name, value specifies the filter value.
``"filter_by_value_col""``: `str` or None, default None
Adds ``{filter_by_value_col: value_col}`` to ``filter_by_dict``
if not None, for the ``value_col`` in this dictionary.
``"adjustment_method"`` : `str` ("add" or "subtract"), default "add"
How to make the adjustment, if ``adjustment_delta_col`` is provided.
Accepts a list of such dictionaries to adjust multiple columns in ``df``.
pipeline : `sklearn.pipeline.Pipeline` or None, default None
Pipeline to fit. The final named step must be called "estimator".
If None, will use the default Pipeline from
`~greykite.framework.pipeline.utils.get_basic_pipeline`.
regressor_cols : `list` [`str`] or None, default None
A list of regressor columns used in the training and prediction DataFrames.
It should contain only the regressors that are being used in the grid search.
If None, no regressor columns are used.
Regressor columns that are unavailable in ``df`` are dropped.
lagged_regressor_cols : `list` [`str`] or None, default None
A list of additional columns needed for lagged regressors in the training and prediction DataFrames.
This list can have overlap with ``regressor_cols``.
If None, no additional columns are added to the DataFrame.
Lagged regressor columns that are unavailable in ``df`` are dropped.
estimator : instance of an estimator that implements `greykite.algo.models.base_forecast_estimator.BaseForecastEstimator`
Estimator to use as the final step in the pipeline.
Ignored if ``pipeline`` is provided.
forecast_horizon : `int` or None, default None
Number of periods to forecast into the future. Must be > 0.
If None, default is determined from input data frequency
coverage : `float` or None, default=0.95
Intended coverage of the prediction bands (0.0 to 1.0)
If None, the upper/lower predictions are not returned
Ignored if `pipeline` is provided. Uses coverage of the ``pipeline`` estimator instead.
test_horizon : `int` or None, default None
Numbers of periods held back from end of df for test.
The rest is used for cross validation.
If None, default is forecast_horizon. Set to 0 to skip backtest.
periods_between_train_test : `int` or None, default None
Number of periods for the gap between train and test data.
If None, default is 0.
agg_periods : `int` or None, default None
Number of periods to aggregate before evaluation.
Model is fit and forecasted on the dataset's original frequency.
Before evaluation, the actual and forecasted values are aggregated,
using rolling windows of size ``agg_periods`` and the function
``agg_func``. (e.g. if the dataset is hourly, use ``agg_periods=24, agg_func=np.sum``,
to evaluate performance on the daily totals).
If None, does not aggregate before evaluation.
Currently, this is only used when calculating CV metrics and
the R2_null_model_score metric in backtest/forecast. No pre-aggregation
is applied for the other backtest/forecast evaluation metrics.
agg_func : callable or None, default None
Takes an array and returns a number, e.g. np.max, np.sum.
Defines how to aggregate rolling windows of actual and predicted values
before evaluation.
Ignored if ``agg_periods`` is None.
Currently, this is only used when calculating CV metrics and
the R2_null_model_score metric in backtest/forecast. No pre-aggregation
is applied for the other backtest/forecast evaluation metrics.
score_func : `str` or callable, default ``EvaluationMetricEnum.MeanAbsolutePercentError.name``
Score function used to select optimal model in CV.
If a callable, takes arrays ``y_true``, ``y_pred`` and returns a float.
If a string, must be either a
`~greykite.common.evaluation.EvaluationMetricEnum` member name
or `~greykite.common.constants.FRACTION_OUTSIDE_TOLERANCE`.
score_func_greater_is_better : `bool`, default False
True if ``score_func`` is a score function, meaning higher is better,
and False if it is a loss function, meaning lower is better.
Must be provided if ``score_func`` is a callable (custom function).
Ignored if ``score_func`` is a string, because the direction is known.
cv_report_metrics : `str`, or `list` [`str`], or None, default `~greykite.common.constants.CV_REPORT_METRICS_ALL`
Additional metrics to compute during CV, besides the one specified by ``score_func``.
- If the string constant `greykite.framework.constants.CV_REPORT_METRICS_ALL`,
computes all metrics in ``EvaluationMetricEnum``. Also computes
``FRACTION_OUTSIDE_TOLERANCE`` if ``relative_error_tolerance`` is not None.
The results are reported by the short name (``.get_metric_name()``) for ``EvaluationMetricEnum``
members and ``FRACTION_OUTSIDE_TOLERANCE_NAME`` for ``FRACTION_OUTSIDE_TOLERANCE``.
These names appear in the keys of ``forecast_result.grid_search.cv_results_``
returned by this function.
- If a list of strings, each of the listed metrics is computed. Valid strings are
`~greykite.common.evaluation.EvaluationMetricEnum` member names
and `~greykite.common.constants.FRACTION_OUTSIDE_TOLERANCE`.
For example::
["MeanSquaredError", "MeanAbsoluteError", "MeanAbsolutePercentError", "MedianAbsolutePercentError", "FractionOutsideTolerance2"]
- If None, no additional metrics are computed.
null_model_params : `dict` or None, default None
Defines baseline model to compute ``R2_null_model_score`` evaluation metric.
``R2_null_model_score`` is the improvement in the loss function relative
to a null model. It can be used to evaluate model quality with respect to
a simple baseline. For details, see
`~greykite.common.evaluation.r2_null_model_score`.
The null model is a `~sklearn.dummy.DummyRegressor`,
which returns constant predictions.
Valid keys are "strategy", "constant", "quantile".
See `~sklearn.dummy.DummyRegressor`. For example::
null_model_params = {
"strategy": "mean",
}
null_model_params = {
"strategy": "median",
}
null_model_params = {
"strategy": "quantile",
"quantile": 0.8,
}
null_model_params = {
"strategy": "constant",
"constant": 2.0,
}
If None, ``R2_null_model_score`` is not calculated.
Note: CV model selection always optimizes ``score_func`, not
the ``R2_null_model_score``.
relative_error_tolerance : `float` or None, default None
Threshold to compute the ``Outside Tolerance`` metric,
defined as the fraction of forecasted values whose relative
error is strictly greater than ``relative_error_tolerance``.
For example, 0.05 allows for 5% relative error.
If `None`, the metric is not computed.
hyperparameter_grid : `dict`, `list` [`dict`] or None, default None
Sets properties of the steps in the pipeline,
and specifies combinations to search over.
Should be valid input to `sklearn.model_selection.GridSearchCV` (param_grid)
or `sklearn.model_selection.RandomizedSearchCV` (param_distributions).
Prefix transform/estimator attributes by the name of the step in the pipeline.
See details at: https://scikit-learn.org/stable/modules/compose.html#nested-parameters
If None, uses the default pipeline parameters.
hyperparameter_budget : `int` or None, default None
Max number of hyperparameter sets to try within the ``hyperparameter_grid`` search space
Runs a full grid search if ``hyperparameter_budget`` is sufficient to exhaust full
``hyperparameter_grid``, otherwise samples uniformly at random from the space.
If None, uses defaults:
* full grid search if all values are constant
* 10 if any value is a distribution to sample from
n_jobs : `int` or None, default `~greykite.framework.constants.COMPUTATION_N_JOBS`
Number of jobs to run in parallel
(the maximum number of concurrently running workers).
``-1`` uses all CPUs. ``-2`` uses all CPUs but one.
``None`` is treated as 1 unless in a `joblib.Parallel` backend context
that specifies otherwise.
verbose : `int`, default 1
Verbosity level during CV.
if > 0, prints number of fits
if > 1, prints fit parameters, total score + fit time
if > 2, prints train/test scores
cv_horizon : `int` or None, default None
Number of periods in each CV test set
If None, default is ``forecast_horizon``.
Set either ``cv_horizon`` or ``cv_max_splits`` to 0 to skip CV.
cv_min_train_periods : `int` or None, default None
Minimum number of periods for training each CV fold.
If cv_expanding_window is False, every training period is this size
If None, default is 2 * ``cv_horizon``
cv_expanding_window : `bool`, default False
If True, training window for each CV split is fixed to the first available date.
Otherwise, train start date is sliding, determined by ``cv_min_train_periods``.
cv_use_most_recent_splits: `bool`, default False
If True, splits from the end of the dataset are used.
Else a sampling strategy is applied. Check
`~greykite.sklearn.cross_validation.RollingTimeSeriesSplit._sample_splits`
for details.
cv_periods_between_splits : `int` or None, default None
Number of periods to slide the test window between CV splits
If None, default is ``cv_horizon``
cv_periods_between_train_test : `int` or None, default None
Number of periods for the gap between train and test in a CV split.
If None, default is ``periods_between_train_test``.
cv_max_splits : `int` or None, default 3
Maximum number of CV splits.
Given the above configuration, samples up to max_splits train/test splits,
preferring splits toward the end of available data. If None, uses all splits.
Set either ``cv_horizon`` or ``cv_max_splits`` to 0 to skip CV.
Returns
-------
forecast_result : :class:`~greykite.framework.pipeline.pipeline.ForecastResult`
Forecast result. See :class:`~greykite.framework.pipeline.pipeline.ForecastResult`
for details.
* If ``cv_horizon=0``, ``forecast_result.grid_search.best_estimator_``
and ``forecast_result.grid_search.best_params_`` attributes are defined
according to the provided single set of parameters. There must be a single
set of parameters to skip cross-validation.
* If ``test_horizon=0``, ``forecast_result.backtest`` is None.
"""
if hyperparameter_grid is None or hyperparameter_grid == []:
hyperparameter_grid = {}
# When hyperparameter_grid is a singleton list, unlist it
if isinstance(hyperparameter_grid, list) and len(hyperparameter_grid) == 1:
hyperparameter_grid = hyperparameter_grid[0]
# Loads full dataset
ts = UnivariateTimeSeries()
ts.load_data(
df=df,
time_col=time_col,
value_col=value_col,
freq=freq,
date_format=date_format,
tz=tz,
train_end_date=train_end_date,
regressor_cols=regressor_cols,
lagged_regressor_cols=lagged_regressor_cols,
anomaly_info=anomaly_info)
# Splits data into training and test sets. ts.df uses standardized column names
if test_horizon == 0:
train_df = ts.fit_df
train_y = ts.fit_y
test_df = pd.DataFrame(columns=list(df.columns))
else:
# Make sure to refit best_pipeline appropriately
train_df, test_df, train_y, test_y = train_test_split(
ts.fit_df,
ts.fit_y,
train_size=ts.fit_df.shape[0] - test_horizon - periods_between_train_test,
test_size=test_horizon + periods_between_train_test,
shuffle=False) # this is important since this is timeseries forecasting!
log_message(f"Train size: {train_df.shape[0]}. Test size: {test_df.shape[0]}", LoggingLevelEnum.INFO)
# Defines default training pipeline
if pipeline is None:
pipeline = get_basic_pipeline(
estimator=estimator,
score_func=score_func,
score_func_greater_is_better=score_func_greater_is_better,
agg_periods=agg_periods,
agg_func=agg_func,
relative_error_tolerance=relative_error_tolerance,
coverage=coverage,
null_model_params=null_model_params,
regressor_cols=ts.regressor_cols,
lagged_regressor_cols=ts.lagged_regressor_cols)
# Searches for the best parameters, and refits model with selected parameters on the entire training set
if cv_horizon == 0 or cv_max_splits == 0:
# No cross-validation. Only one set of hyperparameters is allowed.
try:
if len(ParameterGrid(hyperparameter_grid)) > 1:
raise ValueError(
"CV is required to identify the best model because there are multiple options "
"in `hyperparameter_grid`. Either provide a single option or set `cv_horizon` and `cv_max_splits` "
"to nonzero values.")
except TypeError: # Parameter value is not iterable
raise ValueError(
"CV is required to identify the best model because `hyperparameter_grid` contains "
"a distribution. Either remove the distribution or set `cv_horizon` and `cv_max_splits` "
"to nonzero values.")
# Fits model to entire train set. Params must be set manually since it's not done by grid search
params = {k: v[0] for k, v in hyperparameter_grid.items()} # unpack lists, `v` is a singleton list with the parameter value
best_estimator = pipeline.set_params(**params).fit(train_df, train_y)
# Wraps this model in a dummy RandomizedSearchCV object to return the backtest model
grid_search = get_hyperparameter_searcher(
hyperparameter_grid=hyperparameter_grid,
model=pipeline,
cv=None, # no cross-validation
hyperparameter_budget=hyperparameter_budget,
n_jobs=n_jobs,
verbose=verbose,
score_func=score_func,
score_func_greater_is_better=score_func_greater_is_better,
cv_report_metrics=cv_report_metrics,
agg_periods=agg_periods,
agg_func=agg_func,
relative_error_tolerance=relative_error_tolerance)
# Sets relevant attributes. Others are undefined (cv_results_, best_score_, best_index_, scorer_, refit_time_)
grid_search.best_estimator_ = best_estimator
grid_search.best_params_ = params
grid_search.n_splits_ = 0
else:
# Defines cross-validation splitter
cv = RollingTimeSeriesSplit(
forecast_horizon=cv_horizon,
min_train_periods=cv_min_train_periods,
expanding_window=cv_expanding_window,
use_most_recent_splits=cv_use_most_recent_splits,
periods_between_splits=cv_periods_between_splits,
periods_between_train_test=cv_periods_between_train_test,
max_splits=cv_max_splits)
# Defines grid search approach for CV
grid_search = get_hyperparameter_searcher(
hyperparameter_grid=hyperparameter_grid,
model=pipeline,
cv=cv,
hyperparameter_budget=hyperparameter_budget,
n_jobs=n_jobs,
verbose=verbose,
score_func=score_func,
score_func_greater_is_better=score_func_greater_is_better,
cv_report_metrics=cv_report_metrics,
agg_periods=agg_periods,
agg_func=agg_func,
relative_error_tolerance=relative_error_tolerance)
grid_search.fit(train_df, train_y)
best_estimator = grid_search.best_estimator_
# Evaluates historical performance, fits model to all data (train+test)
if test_horizon > 0:
backtest_train_end_date = train_df[TIME_COL].max()
# Uses pd.date_range because pd.Timedelta does not work for complicated frequencies e.g. "W-MON"
backtest_test_start_date = pd.date_range(
start=backtest_train_end_date,
periods=periods_between_train_test + 2, # Adds 2 as start parameter is inclusive
freq=ts.freq)[-1]
backtest = get_forecast(
df=ts.fit_df, # Backtest needs to happen on fit_df, not on the entire df
trained_model=best_estimator,
train_end_date=backtest_train_end_date,
test_start_date=backtest_test_start_date,
forecast_horizon=test_horizon,
xlabel=time_col,
ylabel=value_col,
relative_error_tolerance=relative_error_tolerance)
best_pipeline = clone(best_estimator) # Copies optimal parameters
best_pipeline.fit(ts.fit_df, ts.y) # Refits this model on entire training dataset
else:
backtest = None # Backtest training metrics are the same as forecast training metrics
best_pipeline = best_estimator # best_model is already fit to all data
# Makes future predictions
periods = forecast_horizon + periods_between_train_test
future_df = ts.make_future_dataframe(
periods=periods,
include_history=True)
forecast_train_end_date = ts.train_end_date
# Uses pd.date_range because pd.Timedelta does not work for complicated frequencies e.g. "W-MON"
forecast_test_start_date = pd.date_range(
start=forecast_train_end_date,
periods=periods_between_train_test + 2, # Adds 2 as start parameter is inclusive
freq=ts.freq)[-1]
forecast = get_forecast(
df=future_df,
trained_model=best_pipeline,
train_end_date=forecast_train_end_date,
test_start_date=forecast_test_start_date,
forecast_horizon=forecast_horizon,
xlabel=time_col,
ylabel=value_col,
relative_error_tolerance=relative_error_tolerance)
result = ForecastResult(
timeseries=ts,
grid_search=grid_search,
model=best_pipeline,
backtest=backtest,
forecast=forecast
)
return result
class RollingTimeSeriesSplit(BaseCrossValidator):
"""Flexible splitter for time-series cross validation and rolling window evaluation.
Suitable for use in GridSearchCV.
Attributes
----------
min_splits : int
Guaranteed min number of splits. This is always set to 1. If provided configuration results in 0 splits,
the cross validator will yield a default split.
__starting_test_index : int
Test end index of the first CV split. Actual offset = __starting_test_index + _get_offset(X), for a particular
dataset X.
Cross validator ensures the last test split contains the last observation in X.
Examples
--------
>>> from greykite.sklearn.cross_validation import RollingTimeSeriesSplit
>>> X = np.random.rand(20, 4)
>>> tscv = RollingTimeSeriesSplit(forecast_horizon=3, max_splits=4)
>>> tscv.get_n_splits(X=X)
4
>>> for train, test in tscv.split(X=X):
... print(train, test)
[2 3 4 5 6 7] [ 8 9 10]
[ 5 6 7 8 9 10] [11 12 13]
[ 8 9 10 11 12 13] [14 15 16]
[11 12 13 14 15 16] [17 18 19]
>>> X = np.random.rand(20, 4)
>>> tscv = RollingTimeSeriesSplit(forecast_horizon=2,
... min_train_periods=4,
... expanding_window=True,
... periods_between_splits=4,
... periods_between_train_test=2,
... max_splits=None)
>>> tscv.get_n_splits(X=X)
4
>>> for train, test in tscv.split(X=X):
... print(train, test)
[0 1 2 3] [6 7]
[0 1 2 3 4 5 6 7] [10 11]
[ 0 1 2 3 4 5 6 7 8 9 10 11] [14 15]
[ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15] [18 19]
>>> X = np.random.rand(5, 4) # default split if there is not enough data
>>> for train, test in tscv.split(X=X):
... print(train, test)
[0 1 2 3] [4]
"""
def __init__(
self,
forecast_horizon,
min_train_periods=None,
expanding_window=False,
use_most_recent_splits=False,
periods_between_splits=None,
periods_between_train_test=0,
max_splits=3):
"""Initializes attributes of RollingTimeSeriesSplit
Parameters
----------
forecast_horizon : `int`
How many periods in each CV test set
min_train_periods : `int` or None, optional
Minimum number of periods for training.
If ``expanding_window`` is False, every training period has this size.
expanding_window : `bool`, default False
If True, training window for each CV split is fixed to the first available date.
Otherwise, train start date is sliding, determined by ``min_train_periods``.
use_most_recent_splits: `bool`, default False
If True, splits from the end of the dataset are used.
Else a sampling strategy is applied. Check
`~greykite.sklearn.cross_validation.RollingTimeSeriesSplit._sample_splits`
for details.
periods_between_splits : `int` or None
Number of periods to slide the test window
periods_between_train_test : `int`
Number of periods gap between train and test within a CV split
max_splits : `int` or None
Maximum number of CV splits. Given the above configuration, samples up to max_splits train/test splits,
preferring splits toward the end of available data. If None, uses all splits.
"""
super().__init__()
self.forecast_horizon = get_integer(forecast_horizon, name="forecast_horizon", min_value=1)
# by default, use at least twice the forecast horizon for training
self.min_train_periods = get_integer(min_train_periods, name="min_train_periods",
min_value=1, default_value=2 * self.forecast_horizon)
# by default, use fixed size training window
self.expanding_window = expanding_window
# by default, does not force most recent splits
self.use_most_recent_splits = use_most_recent_splits
# by default, use non-overlapping test sets
self.periods_between_splits = get_integer(periods_between_splits, name="periods_between_splits",
min_value=1, default_value=self.forecast_horizon)
# by default, use test set immediately following train set
self.periods_between_train_test = get_integer(periods_between_train_test, name="periods_between_train_test",
min_value=0, default_value=0)
if self.min_train_periods < 2 * self.forecast_horizon:
warnings.warn(f"`min_train_periods` is too small for your `forecast_horizon`. Should be at least"
f" {forecast_horizon*2}=2*`forecast_horizon`.")
self.max_splits = max_splits
self.min_splits = 1 # CV ensures there is always at least one split
# test end index for the first CV split, before applying offset to ensure last data point in X is used
self.__starting_test_index = (self.forecast_horizon
+ self.min_train_periods
+ self.periods_between_train_test
- 1)
def split(self, X, y=None, groups=None):
"""Generates indices to split data into training and test CV folds according to rolling
window time series cross validation
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Must have `shape` method.
y : array-like, shape (n_samples,), optional
The target variable for supervised learning problems. Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set. Always ignored, exists for compatibility.
Yields
------
train : `numpy.array`
The training set indices for that split.
test : `numpy.array`
The testing set indices for that split.
"""
num_samples = X.shape[0]
indices = np.arange(num_samples)
n_splits_without_capping = self.get_n_splits_without_capping(X=X)
n_splits = self.get_n_splits(X=X)
if n_splits_without_capping == 0:
warnings.warn("There are no CV splits under the requested settings. Decrease `forecast_horizon` and/or"
" `min_train_periods`. Using default 90/10 CV split")
elif n_splits == 1:
warnings.warn("There is only one CV split")
elif n_splits >= 10:
warnings.warn(f"There is a high number of CV splits ({n_splits}). If training is slow, increase "
f"`periods_between_splits` or `min_train_periods`, or decrease `max_splits`")
log_message(f"There are {n_splits} CV splits.", LoggingLevelEnum.INFO)
if n_splits_without_capping == 0: # uses default split
default_split_ratio = 0.9
train_samples = int(round(num_samples * default_split_ratio))
yield indices[:train_samples], indices[train_samples:]
else: # determines which splits to keep so that up to max_splits are returned
splits_to_keep = self._sample_splits(n_splits_without_capping)
last_index = num_samples - 1
test_end_index = self.__starting_test_index + self._get_offset(X=X)
current_split_index = 0
while test_end_index <= last_index:
test_start_index = test_end_index - self.forecast_horizon + 1
train_end_index = test_start_index - self.periods_between_train_test - 1
train_start_index = 0 if self.expanding_window else train_end_index - self.min_train_periods + 1
assert train_start_index >= 0 # guaranteed by n_splits > 0
if current_split_index in splits_to_keep:
log_message(f"CV split: Train {train_start_index} to {train_end_index}. "
f"Test {test_start_index} to {test_end_index}.", LoggingLevelEnum.DEBUG)
yield indices[train_start_index:train_end_index + 1], indices[test_start_index:test_end_index + 1]
test_end_index += self.periods_between_splits
current_split_index += 1
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations yielded by the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data to split
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
The number of splitting iterations yielded by the cross-validator.
"""
num_splits = self.get_n_splits_without_capping(X=X)
if self.max_splits is not None and num_splits > self.max_splits:
num_splits = self.max_splits # num_splits is set to max limit
if num_splits == 0:
num_splits = self.min_splits # not enough observations to create split, uses default
return num_splits
def get_n_splits_without_capping(self, X=None):
"""Returns the number of splitting iterations in the cross-validator as configured, ignoring
self.max_splits and self.min_splits
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data to split
Returns
-------
n_splits : int
The number of splitting iterations in the cross-validator as configured, ignoring
self.max_splits and self.min_splits
"""
last_index = X.shape[0] - 1
starting_index = self.__starting_test_index + self._get_offset(X=X)
if starting_index > last_index:
return 0
return math.ceil((last_index - starting_index + 1) / self.periods_between_splits)
def _get_offset(self, X=None):
"""Returns an offset to add to test set indices when creating CV splits
CV splits are shifted so that the last test observation is the last point in X.
This shift does not affect the total number of splits.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data to split
Returns
-------
offset : int
The number of observations to ignore at the beginning of X when creating CV splits
"""
last_index = X.shape[0] - 1
starting_index = self.__starting_test_index
if starting_index > last_index:
return 0
return (last_index - starting_index) % self.periods_between_splits
def _sample_splits(self, num_splits, seed=48912):
"""Samples up to ``max_splits`` items from list(range(`num_splits`)).
If ``use_most_recent_splits`` is True, highest split indices up to ``max_splits``
are retained. Otherwise, the following sampling scheme is implemented:
- takes the last 2 splits
- samples from the rest uniformly at random
Parameters
----------
num_splits : `int`
Number of splits before sampling.
seed : `int`
Seed for random sampling.
Returns
-------
n_splits : `list`
Indices of splits to keep (subset of `list(range(num_splits))`).
"""
split_indices = list(range(num_splits))
if self.max_splits is not None and num_splits > self.max_splits:
if self.use_most_recent_splits:
# keep indices from the end up to max_splits
keep_split_indices = split_indices[-self.max_splits:]
else:
# applies sampling scheme to take up to max_splits
keep_split_indices = []
if self.max_splits > 0: # first takes the last split
keep_split_indices.append(split_indices[-1])
if self.max_splits > 1: # then takes the second to last split
keep_split_indices.append(split_indices[-2])
if self.max_splits > 2: # then randomly samples the remaining splits
random.seed(seed)
keep_split_indices += random.sample(split_indices[:-2], self.max_splits - 2)
split_indices = keep_split_indices
return split_indices
def _iter_test_indices(self, X=None, y=None, groups=None):
"""Class directly implements `split` instead of providing this function"""
raise NotImplementedError
The provided code snippet includes necessary dependencies for implementing the `forecast_pipeline_rolling_evaluation` function. Write a Python function `def forecast_pipeline_rolling_evaluation( pipeline_params: Dict, tscv: RollingTimeSeriesSplit)` to solve the following problem:
Runs ``forecast_pipeline`` on a rolling window basis. Parameters ---------- pipeline_params : `Dict` A dictionary containing the input to the :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`. tscv : `~greykite.sklearn.cross_validation.RollingTimeSeriesSplit` Cross-validation object that determines the rolling window evaluation. See :class:`~greykite.sklearn.cross_validation.RollingTimeSeriesSplit` for details. Returns ------- rolling_evaluation : `dict` Stores benchmarking results for each split, e.g. split_0 contains result for first split, split_1 contains result for second split and so on. Number of splits is determined by the input parameters. Every split is a dictionary with keys "runtime_sec" and "pipeline_result".
Here is the function:
def forecast_pipeline_rolling_evaluation(
pipeline_params: Dict,
tscv: RollingTimeSeriesSplit):
"""Runs ``forecast_pipeline`` on a rolling window basis.
Parameters
----------
pipeline_params : `Dict`
A dictionary containing the input to the
:func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`.
tscv : `~greykite.sklearn.cross_validation.RollingTimeSeriesSplit`
Cross-validation object that determines the rolling window evaluation.
See :class:`~greykite.sklearn.cross_validation.RollingTimeSeriesSplit` for details.
Returns
-------
rolling_evaluation : `dict`
Stores benchmarking results for each split, e.g.
split_0 contains result for first split, split_1 contains result for second split and so on.
Number of splits is determined by the input parameters.
Every split is a dictionary with keys "runtime_sec" and "pipeline_result".
"""
if pipeline_params["forecast_horizon"] != tscv.forecast_horizon:
raise ValueError("Forecast horizon in 'pipeline_params' does not match that of the 'tscv'.")
if pipeline_params["periods_between_train_test"] != tscv.periods_between_train_test:
raise ValueError("'periods_between_train_test' in 'pipeline_params' does not match that of the 'tscv'.")
df = pipeline_params["df"]
time_col = pipeline_params.get("time_col", TIME_COL)
date_format = pipeline_params.get("date_format")
# Disables backtest. For rolling evaluation we know the actual values in forecast period.
# So out of sample performance can be calculated using pipeline_result.forecast
pipeline_params["test_horizon"] = 0
rolling_evaluation = {}
with tqdm(list(tscv.split(X=df)), ncols=800, leave=True) as progress_bar:
for (split_num, (train, test)) in enumerate(progress_bar):
# Description will be displayed on the left of progress bar
progress_bar.set_description(f"Split '{split_num}' ")
train_end_date = pd.to_datetime(
df.iloc[train[-1]][time_col],
format=date_format,
infer_datetime_format=True)
pipeline_params["train_end_date"] = train_end_date
start_time = timeit.default_timer()
pipeline_result = forecast_pipeline(**pipeline_params)
runtime = timeit.default_timer() - start_time
pipeline_output = dict(
runtime_sec=round(runtime, 3),
pipeline_result=pipeline_result)
rolling_evaluation[f"split_{split_num}"] = pipeline_output
log_message(f"Completed evaluation for split {split_num}.", LoggingLevelEnum.DEBUG)
return rolling_evaluation | Runs ``forecast_pipeline`` on a rolling window basis. Parameters ---------- pipeline_params : `Dict` A dictionary containing the input to the :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`. tscv : `~greykite.sklearn.cross_validation.RollingTimeSeriesSplit` Cross-validation object that determines the rolling window evaluation. See :class:`~greykite.sklearn.cross_validation.RollingTimeSeriesSplit` for details. Returns ------- rolling_evaluation : `dict` Stores benchmarking results for each split, e.g. split_0 contains result for first split, split_1 contains result for second split and so on. Number of splits is determined by the input parameters. Every split is a dictionary with keys "runtime_sec" and "pipeline_result". |
167,509 | import base64
from io import BytesIO
import matplotlib.pyplot as plt
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
from greykite.algo.changepoint.adalasso.changepoint_detector import ChangepointDetector
from greykite.algo.common.holiday_inferrer import HolidayInferrer
from greykite.common.constants import TIME_COL
from greykite.common.constants import VALUE_COL
from greykite.common.enums import SeasonalityEnum
from greykite.common.time_properties import min_gap_in_seconds
from greykite.common.time_properties_forecast import get_simple_time_frequency_from_period
from greykite.framework.input.univariate_time_series import UnivariateTimeSeries
class ChangepointDetector:
"""A class to implement change point detection.
Currently supports long-term change point detection only. Input is a dataframe with time_col
indicating the column of time info (the format should be able to be parsed by pd.to_datetime),
and value_col indicating the column of observed time series values.
Attributes
----------
original_df : `pandas.DataFrame`
The original data df, used to retrieve original observations, if aggregation is used in
fitting change points.
time_col : `str`
The column name for time column.
value_col : `str`
The column name for value column.
trend_potential_changepoint_n: `int`
The number of change points that are evenly distributed over the time period.
yearly_seasonality_order : `int`
The yearly seasonality order used when fitting trend.
y : `pandas.Series`
The observations after aggregation.
trend_df : `pandas.DataFrame`
The augmented df of the original_df, including regressors of trend change points and
Fourier series for yearly seasonality.
trend_model : `sklearn.base.RegressionMixin`
The fitted trend model.
trend_coef : `numpy.array`
The estimated trend coefficients.
trend_intercept : `float`
The estimated trend intercept.
adaptive_lasso_coef : `list`
The list of length two, first element is estimated trend coefficients, and second element
is intercept, both estimated by adaptive lasso.
trend_changepoints : `list`
The list of detected trend change points, parsable by pd.to_datetime
trend_estimation : `pd.Series`
The estimated trend with detected trend change points.
seasonality_df : `pandas.DataFrame`
The augmented df of ``original_df``, including regressors of seasonality change points with
different Fourier series frequencies.
seasonality_changepoints : `dict`
The dictionary of detected seasonality change points for each component.
Keys are component names, and values are list of change points.
seasonality_estimation : `pandas.Series`
The estimated seasonality with detected seasonality change points.
The series has the same length as ``original_df``. Index is timestamp, and values
are the estimated seasonality at each timestamp.
The seasonality estimation is the estimated of seasonality effect with trend estimated
by `~greykite.algo.changepoint.adalasso.changepoints_utils.estimate_trend_with_detected_changepoints`
removed.
Methods
-------
find_trend_changepoints : callable
Finds the potential trend change points for a given time series df.
plot : callable
Plot the results after implementing find_trend_changepoints.
"""
def __init__(self):
self.original_df: Optional[pd.DataFrame] = None
self.time_col: Optional[str] = None
self.value_col: Optional[str] = None
self.trend_potential_changepoint_n: Optional[int] = None
self.yearly_seasonality_order: Optional[int] = None
self.y: Optional[pd.Series, pd.DataFrame] = None
self.trend_df: Optional[pd.Series, pd.DataFrame] = None
self.trend_model: Optional[RegressorMixin] = None
self.trend_coef: Optional[np.ndarray] = None
self.trend_intercept: Optional[float] = None
self.adaptive_lasso_coef: Optional[List] = None
self.trend_changepoints: Optional[List] = None
self.trend_estimation: Optional[pd.Series] = None
self.seasonality_df: Optional[pd.DataFrame] = None
self.seasonality_changepoints: Optional[dict] = None
self.seasonality_estimation: Optional[pd.Series] = None
self.shift_detector: Optional[ShiftDetection] = None
self.level_shift_df: Optional[pd.DataFrame] = None
def find_trend_changepoints(
self,
df,
time_col,
value_col,
shift_detector=None,
yearly_seasonality_order=8,
yearly_seasonality_change_freq=None,
resample_freq="D",
trend_estimator="ridge",
adaptive_lasso_initial_estimator="ridge",
regularization_strength=None,
actual_changepoint_min_distance="30D",
potential_changepoint_distance=None,
potential_changepoint_n=100,
potential_changepoint_n_max=None,
no_changepoint_distance_from_begin=None,
no_changepoint_proportion_from_begin=0.0,
no_changepoint_distance_from_end=None,
no_changepoint_proportion_from_end=0.0,
fast_trend_estimation=True):
"""Finds trend change points automatically by adaptive lasso.
The algorithm does an aggregation with a user-defined frequency, defaults daily.
If ``potential_changepoint_distance`` is not given, ``potential_changepoint_n``
potential change points are evenly distributed over the time period, else
``potential_changepoint_n`` is overridden by::
total_time_length / ``potential_changepoint_distance``
Users can specify either ``no_changepoint_proportion_from_end`` to specify what proportion
from the end of data they do not want changepoints, or ``no_changepoint_distance_from_end``
(overrides ``no_changepoint_proportion_from_end``) to specify how long from the end they
do not want change points.
Then all potential change points will be selected by adaptive lasso, with the initial
estimator specified by ``adaptive_lasso_initial_estimator``. If user specifies
``regularization_strength``, then the adaptive lasso will be run with a single tuning
parameter calculated based on user provided prior, else a cross-validation will be run to
automatically select the tuning parameter.
A yearly seasonality is also fitted at the same time, preventing trend from catching
yearly periodical changes.
A rule-based guard function is applied at the end to ensure change points are not
too close, as specified by ``actual_changepoint_min_distance``.
Parameters
----------
df: `pandas.DataFrame`
The data df
time_col : `str`
Time column name in ``df``
value_col : `str`
Value column name in ``df``
shift_detector: `greykite.algo.changepoint.shift_detection.shift_detector.ShiftDetection`
An instance of ShiftDetection for identifying level shifts and computing regressors. Level
shift points will be considered as regressors when selecting change points by adaptive lasso.
yearly_seasonality_order : `int`, default 8
Fourier series order to capture yearly seasonality.
yearly_seasonality_change_freq : `DateOffset`, `Timedelta` or `str` or `None`, default `None`
How often to change the yearly seasonality model. Set to `None` to disable this feature.
This is useful if you have more than 2.5 years of data and the detected trend without this
feature is inaccurate because yearly seasonality changes over the training period.
Modeling yearly seasonality separately over the each period can prevent trend changepoints
from fitting changes in yearly seasonality. For example, if you have 2.5 years of data and
yearly seasonality increases in magnitude after the first year, setting this parameter to
"365D" will model each year's yearly seasonality differently and capture both shapes.
However, without this feature, both years will have the same yearly seasonality, roughly
the average effect across the training set.
Note that if you use `str` as input, the maximal supported unit is day, i.e.,
you might use "200D" but not "12M" or "1Y".
resample_freq : `DateOffset`, `Timedelta`, `str` or None, default "D".
The frequency to aggregate data.
Coarser aggregation leads to fitting longer term trends.
If None, no aggregation will be done.
trend_estimator : `str` in ["ridge", "lasso" or "ols"], default "ridge".
The estimator to estimate trend. The estimated trend is only for plotting purposes.
'ols' is not recommended when ``yearly_seasonality_order`` is specified other than 0,
because significant over-fitting will happen.
In this case, the given value is overridden by "ridge".
adaptive_lasso_initial_estimator : `str` in ["ridge", "lasso" or "ols"], default "ridge".
The initial estimator to compute adaptive lasso weights
regularization_strength : `float` in [0, 1] or `None`
The regularization for change points. Greater value implies fewer change points.
0 indicates all change points, and 1 indicates no change point.
If `None`, the turning parameter will be selected by cross-validation.
If a value is given, it will be used as the tuning parameter.
actual_changepoint_min_distance : `DateOffset`, `Timedelta` or `str`, default "30D"
The minimal distance allowed between detected change points. If consecutive change points
are within this minimal distance, the one with smaller absolute change coefficient will
be dropped.
Note: maximal unit is 'D', i.e., you may use units no more than 'D' such as
'10D', '5H', '100T', '200S'. The reason is that 'W', 'M' or higher has either
cycles or indefinite number of days, thus is not parsable by pandas as timedelta.
potential_changepoint_distance : `DateOffset`, `Timedelta`, `str` or None, default None
The distance between potential change points.
If provided, will override the parameter ``potential_changepoint_n``.
Note: maximal unit is 'D', i.e., you may only use units no more than 'D' such as
'10D', '5H', '100T', '200S'. The reason is that 'W', 'M' or higher has either
cycles or indefinite number of days, thus is not parsable by pandas as timedelta.
potential_changepoint_n : `int`, default 100
Number of change points to be evenly distributed, recommended 1-2 per month, based
on the training data length.
potential_changepoint_n_max : `int` or None, default None
The maximum number of potential changepoints.
This parameter is effective when user specifies ``potential_changepoint_distance``,
and the number of potential changepoints in the training data is more than ``potential_changepoint_n_max``,
then it is equivalent to specifying ``potential_changepoint_n = potential_changepoint_n_max``,
and ignoring ``potential_changepoint_distance``.
no_changepoint_distance_from_begin : `DateOffset`, `Timedelta`, `str` or None, default None
The length of time from the beginning of training data, within which no change point will be placed.
If provided, will override the parameter ``no_changepoint_proportion_from_begin``.
Note: maximal unit is 'D', i.e., you may only use units no more than 'D' such as
'10D', '5H', '100T', '200S'. The reason is that 'W', 'M' or higher has either
cycles or indefinite number of days, thus is not parsable by pandas as timedelta.
no_changepoint_proportion_from_begin : `float` in [0, 1], default 0.0.
``potential_changepoint_n`` change points will be placed evenly over the whole training period,
however, change points that are located within the first ``no_changepoint_proportion_from_begin``
proportion of training period will not be used for change point detection.
no_changepoint_distance_from_end : `DateOffset`, `Timedelta`, `str` or None, default None
The length of time from the end of training data, within which no change point will be placed.
If provided, will override the parameter ``no_changepoint_proportion_from_end``.
Note: maximal unit is 'D', i.e., you may only use units no more than 'D' such as
'10D', '5H', '100T', '200S'. The reason is that 'W', 'M' or higher has either
cycles or indefinite number of days, thus is not parsable by pandas as timedelta.
no_changepoint_proportion_from_end : `float` in [0, 1], default 0.0.
``potential_changepoint_n`` change points will be placed evenly over the whole training period,
however, change points that are located within the last ``no_changepoint_proportion_from_end``
proportion of training period will not be used for change point detection.
fast_trend_estimation : `bool`, default True
If True, the trend estimation is not refitted on the original data,
but is a linear interpolation of the fitted trend from the resampled time series.
If False, the trend estimation is refitted on the original data.
Return
------
result : `dict`
result dictionary with keys:
``"trend_feature_df"`` : `pandas.DataFrame`
The augmented df for change detection, in other words, the design matrix for
the regression model. Columns:
- 'changepoint0': regressor for change point 0, equals the continuous time
of the observation minus the continuous time for time of origin.
- ...
- 'changepoint{potential_changepoint_n}': regressor for change point
{potential_changepoint_n}, equals the continuous time of the observation
minus the continuous time of the {potential_changepoint_n}th change point.
- 'cos1_conti_year_yearly': cosine yearly seasonality regressor of first order.
- 'sin1_conti_year_yearly': sine yearly seasonality regressor of first order.
- ...
- 'cos{yearly_seasonality_order}_conti_year_yearly' : cosine yearly seasonality
regressor of {yearly_seasonality_order}th order.
- 'sin{yearly_seasonality_order}_conti_year_yearly' : sine yearly seasonality
regressor of {yearly_seasonality_order}th order.
``"trend_changepoints"`` : `list`
The list of detected change points.
``"changepoints_dict"`` : `dict`
The change point dictionary that is compatible as an input with
`~greykite.algo.forecast.silverkite.forecast_silverkite.SilverkiteForecast.forecast`
``"trend_estimation"`` : `pandas.Series`
The estimated trend with detected trend change points.
"""
# Checks parameter rationality
if potential_changepoint_n < 0:
raise ValueError("potential_changepoint_n can not be negative. "
"A large number such as 100 is recommended")
if yearly_seasonality_order < 0:
raise ValueError("year_seasonality_order can not be negative. "
"A number less than or equal to 10 is recommended")
if df.dropna().shape[0] < 5:
raise ValueError("Change point detector does not work for less than "
"5 observations. Please increase sample size.")
if no_changepoint_proportion_from_begin < 0 or no_changepoint_proportion_from_begin > 1:
raise ValueError("no_changepoint_proportion_from_begin needs to be between 0 and 1.")
if no_changepoint_proportion_from_end < 0 or no_changepoint_proportion_from_end > 1:
raise ValueError("no_changepoint_proportion_from_end needs to be between 0 and 1.")
if no_changepoint_distance_from_begin is not None:
check_freq_unit_at_most_day(no_changepoint_distance_from_begin, "no_changepoint_distance_from_begin")
data_length = pd.to_datetime(df[time_col].iloc[-1]) - pd.to_datetime(df[time_col].iloc[0])
no_changepoint_proportion_from_begin = to_offset(no_changepoint_distance_from_begin).delta / data_length
no_changepoint_proportion_from_begin = min(no_changepoint_proportion_from_begin, 1)
if no_changepoint_distance_from_end is not None:
check_freq_unit_at_most_day(no_changepoint_distance_from_end, "no_changepoint_distance_from_end")
data_length = pd.to_datetime(df[time_col].iloc[-1]) - pd.to_datetime(df[time_col].iloc[0])
no_changepoint_proportion_from_end = to_offset(no_changepoint_distance_from_end).delta / data_length
no_changepoint_proportion_from_end = min(no_changepoint_proportion_from_end, 1)
if potential_changepoint_distance is not None:
check_freq_unit_at_most_day(potential_changepoint_distance, "potential_changepoint_distance")
data_length = pd.to_datetime(df[time_col].iloc[-1]) - pd.to_datetime(df[time_col].iloc[0])
potential_changepoint_n = data_length // to_offset(potential_changepoint_distance).delta
if potential_changepoint_n_max is not None:
if potential_changepoint_n_max <= 0:
raise ValueError("potential_changepoint_n_max must be a positive integer.")
if potential_changepoint_n > potential_changepoint_n_max:
log_message(
message=f"Number of potential changepoints is capped by 'potential_changepoint_n_max' "
f"as {potential_changepoint_n_max}. The 'potential_changepoint_distance' "
f"{potential_changepoint_distance} is ignored. "
f"The original number of changepoints was {potential_changepoint_n}.",
level=LoggingLevelEnum.INFO
)
potential_changepoint_n = potential_changepoint_n_max
if regularization_strength is not None and (regularization_strength < 0 or regularization_strength > 1):
raise ValueError("regularization_strength must be between 0.0 and 1.0.")
df = df.copy()
self.trend_potential_changepoint_n = potential_changepoint_n
self.time_col = time_col
self.value_col = value_col
self.original_df = df
self.shift_detector = shift_detector
# If a shift detector object is passed to the constructor, we're making Changepoint cognizant of level shifts
# and it will handle the level shifts as independent regressors.
if self.shift_detector is not None:
self.level_shift_cols, self.level_shift_df = self.shift_detector.detect(
df.copy(),
time_col=time_col,
value_col=value_col,
forecast_horizon=0
)
# Resamples df to get a coarser granularity to get rid of shorter seasonality.
# The try except below speeds up unnecessary datetime transformation.
if resample_freq is not None:
try:
df_resample = df.resample(resample_freq, on=time_col).mean().reset_index()
except TypeError:
df[time_col] = pd.to_datetime(df[time_col])
df_resample = df.resample(resample_freq, on=time_col).mean().reset_index()
else:
df[time_col] = pd.to_datetime(df[time_col])
df_resample = df.copy()
# The ``df.resample`` function creates NA when the original df has a missing observation
# or its value is NA.
# The estimation algorithm does not allow NA, so we drop those rows.
df_resample = df_resample.dropna()
self.original_df[time_col] = df[time_col]
# Prepares response df.
y = df_resample[value_col]
y.index = df_resample[time_col]
self.y = y
# Prepares trend feature df.
# Potential changepoints are placed uniformly among rows without a missing value, after resampling.
trend_df = build_trend_feature_df_with_changes(
df=df_resample,
time_col=time_col,
changepoints_dict={
"method": "uniform",
"n_changepoints": potential_changepoint_n
}
)
# Gets changepoint features only in range filtered by ``no_changepoint_proportion_from_begin`` and
# ``no_changepoint_proportion_from_end`` of time period.
n_changepoints_within_range_begin = int(potential_changepoint_n * no_changepoint_proportion_from_begin)
n_changepoints_within_range_end = int(potential_changepoint_n * (1 - no_changepoint_proportion_from_end))
if n_changepoints_within_range_begin < n_changepoints_within_range_end:
trend_df = trend_df.iloc[:, [0] + list(range(n_changepoints_within_range_begin + 1, n_changepoints_within_range_end + 1))]
else:
# Linear growth term only.
trend_df = trend_df.iloc[:, [0]]
# Builds yearly seasonality feature df
if yearly_seasonality_order is not None and yearly_seasonality_order > 0:
self.yearly_seasonality_order = yearly_seasonality_order
# Gets yearly seasonality changepoints, allowing varying yearly seasonality coefficients
# to capture yearly seasonality shape change.
yearly_seasonality_changepoint_dates = get_yearly_seasonality_changepoint_dates_from_freq(
df=df,
time_col=time_col,
yearly_seasonality_change_freq=yearly_seasonality_change_freq)
long_seasonality_df = build_seasonality_feature_df_with_changes(
df=df_resample,
time_col=time_col,
changepoints_dict=dict(
method="custom",
dates=yearly_seasonality_changepoint_dates),
fs_components_df=pd.DataFrame({
"name": [TimeFeaturesEnum.conti_year.value],
"period": [1.0],
"order": [yearly_seasonality_order],
"seas_names": ["yearly"]})
)
trend_df = pd.concat([trend_df, long_seasonality_df], axis=1)
# Augment the trend_df with the additional level shift regressors.
if self.shift_detector is not None and len(self.level_shift_cols) > 0:
# Rename each column to have level shift prefixed in name.
pad_char, pad_size = 0, 4 # Left pad the levelshift regressors with 0s for sorting.
new_col_names = {col_name: f"levelshift_{ndx:{pad_char}{pad_size}}_{col_name}" for ndx, col_name in enumerate(self.level_shift_cols)}
self.level_shift_df.rename(columns=new_col_names, inplace=True)
# Save regressors for concatenation to trend_df.
time_col_and_regressor_cols = [time_col] + sorted(new_col_names.values())
levelshift_regressors_df = self.level_shift_df[time_col_and_regressor_cols]
# Resample level shift df according to resample frequency
levelshift_regressors_df_copy = levelshift_regressors_df.copy()
if resample_freq is not None:
levelshift_regressors_df_resample = levelshift_regressors_df_copy.resample(resample_freq, on=time_col).mean().reset_index()
else:
levelshift_regressors_df_resample = levelshift_regressors_df_copy
# Set time column to be the index of the dataframe for level shift regressors.
levelshift_regressors_df_resample.set_index(time_col, inplace=True)
# Concatenate regressors column-wise.
trend_df = pd.concat([trend_df, levelshift_regressors_df_resample], axis=1)
trend_df.index = df_resample[time_col]
self.trend_df = trend_df
# Estimates trend.
if trend_estimator not in ["ridge", "lasso", "ols"]:
warnings.warn("trend_estimator not in ['ridge', 'lasso', 'ols'], "
"estimating using ridge")
trend_estimator = 'ridge'
if trend_estimator == 'ols' and yearly_seasonality_order > 0:
warnings.warn("trend_estimator = 'ols' with year_seasonality_order > 0 may create "
"over-fitting, trend_estimator has been set to 'ridge'.")
trend_estimator = 'ridge'
fit_algorithm_dict = {
"ridge": RidgeCV,
"lasso": LassoCV,
"ols": LinearRegression
}
trend_model = fit_algorithm_dict[trend_estimator]().fit(trend_df.values, y.values)
self.trend_model = trend_model
self.trend_coef, self.trend_intercept = trend_model.coef_.ravel(), trend_model.intercept_.ravel()
# Fetches change point dates for reference as datetime format.
changepoint_dates = get_evenly_spaced_changepoints_dates(
df=df_resample,
time_col=time_col,
n_changepoints=potential_changepoint_n
)
# Gets the changepoint dates filtered by ``no_changepoint_proportion_from_begin`` and ``no_changepoint_proportion_from_end``.
if n_changepoints_within_range_begin < n_changepoints_within_range_end:
changepoint_dates = changepoint_dates.iloc[[0] + list(range(n_changepoints_within_range_begin + 1, n_changepoints_within_range_end + 1))]
else:
# Linear growth term only.
changepoint_dates = changepoint_dates.iloc[[0]]
# Calculates the minimal allowed change point index distance.
min_changepoint_index_distance = compute_min_changepoint_index_distance(
df=df_resample,
time_col=time_col,
n_changepoints=potential_changepoint_n,
min_distance_between_changepoints=actual_changepoint_min_distance
)
# Uses adaptive lasso to select change points.
if adaptive_lasso_initial_estimator not in ['ridge', 'lasso', 'ols']:
warnings.warn("adaptive_lasso_initial_estimator not in ['ridge', 'lasso', 'ols'], "
"estimating with ridge")
adaptive_lasso_initial_estimator = "ridge"
if adaptive_lasso_initial_estimator == trend_estimator:
# When ``adaptive_lasso_initial_estimator`` is the same as ``trend_estimator``, the
# estimated trend coefficients will be used to calculate the weights. The
# ``get_trend_changes_from_adaptive_lasso`` function recognizes ``initial_coef`` as
# `numpy.array` and calculates the weights directly.
trend_changepoints, self.adaptive_lasso_coef = get_trend_changes_from_adaptive_lasso(
x=trend_df.values,
y=y.values,
changepoint_dates=changepoint_dates,
initial_coef=self.trend_coef,
min_index_distance=min_changepoint_index_distance,
regularization_strength=regularization_strength
)
else:
# When ``adaptive_lasso_initial_estimator`` is different from ``trend_estimator``, the
# ``adaptive_lasso_initial_estimator`` as a `str` will be passed. The
# ``get_trend_changes_from_adaptive_lasso`` function recognizes ``initial_coef`` as
# `str` and calculates the initial estimator with the corresponding estimator first
# then calculates the weights.
trend_changepoints, self.adaptive_lasso_coef = get_trend_changes_from_adaptive_lasso(
x=trend_df.values,
y=y.values,
changepoint_dates=changepoint_dates,
initial_coef=adaptive_lasso_initial_estimator,
min_index_distance=min_changepoint_index_distance,
regularization_strength=regularization_strength
)
# Checks if the beginning date is picked as a change point. If yes, drop it, because we
# always include the growth term in our model.
trend_changepoints = [cp for cp in trend_changepoints if cp > max(df_resample[time_col][0], df[time_col][0])]
self.trend_changepoints = trend_changepoints
# logging
log_message(f"The detected trend change points are\n{trend_changepoints}", LoggingLevelEnum.INFO)
# Creates changepoints_dict for silverkite to use.
changepoints_dict = {
"method": "custom",
"dates": trend_changepoints
}
# Computes trend estimates for seasonality use.
if fast_trend_estimation:
# Fast calculation of trend estimation.
# Do not fit trend again on the original df.
# This is much faster when the original df has small frequencies.
# Uses linear interpolation on the trend fitted with the resampled df.
trend_estimation = np.matmul(
trend_df.values[:, :(len(trend_changepoints) + 1)],
trend_model.coef_[:(len(trend_changepoints) + 1)]
) + trend_model.intercept_
trend_estimation = pd.DataFrame({
time_col: df_resample[time_col],
"trend": trend_estimation
})
trend_estimation = trend_estimation.merge(
df[[time_col]],
on=time_col,
how="right"
)
trend_estimation["trend"].interpolate(inplace=True)
trend_estimation.index = df[time_col]
trend_estimation = trend_estimation["trend"]
else:
trend_estimation = estimate_trend_with_detected_changepoints(
df=df,
time_col=time_col,
value_col=value_col,
changepoints=trend_changepoints
)
self.trend_estimation = trend_estimation
result = {
"trend_feature_df": trend_df,
"trend_changepoints": trend_changepoints,
"changepoints_dict": changepoints_dict,
"trend_estimation": trend_estimation
}
return result
def find_seasonality_changepoints(
self,
df,
time_col,
value_col,
seasonality_components_df=pd.DataFrame({
"name": [
TimeFeaturesEnum.tod.value,
TimeFeaturesEnum.tow.value,
TimeFeaturesEnum.conti_year.value],
"period": [24.0, 7.0, 1.0],
"order": [3, 3, 5],
"seas_names": ["daily", "weekly", "yearly"]}),
resample_freq="H",
regularization_strength=0.6,
actual_changepoint_min_distance="30D",
potential_changepoint_distance=None,
potential_changepoint_n=50,
no_changepoint_distance_from_end=None,
no_changepoint_proportion_from_end=0.0,
trend_changepoints=None):
"""Finds the seasonality change points (defined as the time points where seasonality
magnitude changes, i.e., the time series becomes "fatter" or "thinner".)
Subtracts the estimated trend from the original time series first,
then uses regression-based regularization methods to select important seasonality
change points. Regressors are built from truncated Fourier series.
If you have run ``find_trend_changepoints`` before running ``find_seasonality_changepoints``
with the same df, the estimated trend will be automatically used for removing trend in
``find_seasonality_changepoints``.
Otherwise, ``find_trend_changepoints`` will be run automatically with the same parameters
as you passed to ``find_seasonality_changepoints``. If you do not want to use the same
parameters, run ``find_trend_changepoints`` with your desired parameter before calling
``find_seasonality_changepoints``.
The algorithm does an aggregation with a user-defined frequency, default hourly.
The regression features consists of ``potential_changepoint_n`` + 1 blocks of
predictors. The first block consists of Fourier series according to
``seasonality_components_df``, and other blocks are a copy of the first block
truncated at the corresponding potential change point.
If ``potential_changepoint_distance`` is not given, ``potential_changepoint_n``
potential change points are evenly distributed over the time period, else
``potential_changepoint_n`` is overridden by::
total_time_length / ``potential_changepoint_distance``
Users can specify either ``no_changepoint_proportion_from_end`` to specify what proportion
from the end of data they do not want changepoints, or ``no_changepoint_distance_from_end``
(overrides ``no_changepoint_proportion_from_end``) to specify how long from the end they
do not want change points.
Then all potential change points will be selected by adaptive lasso, with the initial
estimator specified by ``adaptive_lasso_initial_estimator``. The regularization strength
is specified by ``regularization_strength``, which lies between 0 and 1.
A rule-based guard function is applied at the end to ensure change points are not
too close, as specified by ``actual_changepoint_min_distance``.
Parameters
----------
df: `pandas.DataFrame`
The data df
time_col : `str`
Time column name in ``df``
value_col : `str`
Value column name in ``df``
seasonality_components_df : `pandas.DataFrame`
The df to generate seasonality design matrix, which is compatible with
``seasonality_components_df`` in
`~greykite.algo.changepoint.adalasso.changepoint_detector.ChangepointDetector.find_seasonality_changepoints`
resample_freq : `DateOffset, Timedelta or str`, default "H".
The frequency to aggregate data.
Coarser aggregation leads to fitting longer term trends.
regularization_strength : `float` in [0, 1] or `None`, default 0.6.
The regularization for change points. Greater value implies fewer change points.
0 indicates all change points, and 1 indicates no change point.
If `None`, the turning parameter will be selected by cross-validation.
If a value is given, it will be used as the tuning parameter.
Here "None" is not recommended, because seasonality change has different levels,
and automatic selection by cross-validation may produce more change points than
desired. Practically, 0.6 is a good choice for most cases. Tuning around
0.6 is recommended.
actual_changepoint_min_distance : `DateOffset`, `Timedelta` or `str`, default "30D"
The minimal distance allowed between detected change points. If consecutive change points
are within this minimal distance, the one with smaller absolute change coefficient will
be dropped.
Note: maximal unit is 'D', i.e., you may use units no more than 'D' such as
'10D', '5H', '100T', '200S'. The reason is that 'W', 'M' or higher has either
cycles or indefinite number of days, thus is not parsable by pandas as timedelta.
potential_changepoint_distance : `DateOffset`, `Timedelta`, `str` or None, default None
The distance between potential change points.
If provided, will override the parameter ``potential_changepoint_n``.
Note: maximal unit is 'D', i.e., you may only use units no more than 'D' such as
'10D', '5H', '100T', '200S'. The reason is that 'W', 'M' or higher has either
cycles or indefinite number of days, thus is not parsable by pandas as timedelta.
potential_changepoint_n : `int`, default 50
Number of change points to be evenly distributed, recommended 1 per month, based
on the training data length.
no_changepoint_distance_from_end : `DateOffset`, `Timedelta`, `str` or None, default None
The length of time from the end of training data, within which no change point will be placed.
If provided, will override the parameter ``no_changepoint_proportion_from_end``.
Note: maximal unit is 'D', i.e., you may only use units no more than 'D' such as
'10D', '5H', '100T', '200S'. The reason is that 'W', 'M' or higher has either
cycles or indefinite number of days, thus is not parsable by pandas as timedelta.
no_changepoint_proportion_from_end : `float` in [0, 1], default 0.0.
``potential_changepoint_n`` change points will be placed evenly over the whole training period,
however, only change points that are not located within the last ``no_changepoint_proportion_from_end``
proportion of training period will be used for change point detection.
trend_changepoints : `list` or None
A list of user specified trend change points, used to estimated the trend to be removed
from the time series before detecting seasonality change points. If provided, the algorithm
will not check existence of detected trend change points or run ``find_trend_changepoints``,
but will use these change points directly for trend estimation.
Return
------
result : `dict`
result dictionary with keys:
``"seasonality_feature_df"`` : `pandas.DataFrame`
The augmented df for seasonality changepoint detection, in other words, the design matrix for
the regression model. Columns:
- "cos1_tod_daily": cosine daily seasonality regressor of first order at change point 0.
- "sin1_tod_daily": sine daily seasonality regressor of first order at change point 0.
- ...
- "cos1_conti_year_yearly": cosine yearly seasonality regressor of first order at
change point 0.
- "sin1_conti_year_yearly": sine yearly seasonality regressor of first order at
change point 0.
- ...
- "cos{daily_seasonality_order}_tod_daily_cp{potential_changepoint_n}" : cosine
daily seasonality regressor of {yearly_seasonality_order}th order at change point
{potential_changepoint_n}.
- "sin{daily_seasonality_order}_tod_daily_cp{potential_changepoint_n}" : sine
daily seasonality regressor of {yearly_seasonality_order}th order at change point
{potential_changepoint_n}.
- ...
- "cos{yearly_seasonality_order}_conti_year_yearly_cp{potential_changepoint_n}" : cosine
yearly seasonality regressor of {yearly_seasonality_order}th order at change point
{potential_changepoint_n}.
- "sin{yearly_seasonality_order}_conti_year_yearly_cp{potential_changepoint_n}" : sine
yearly seasonality regressor of {yearly_seasonality_order}th order at change point
{potential_changepoint_n}.
``"seasonality_changepoints"`` : `dict`[`list`[`datetime`]]
The dictionary of detected seasonality change points for each component.
Keys are component names, and values are list of change points.
``"seasonality_estimation"`` : `pandas.Series`
The estimated seasonality with detected seasonality change points.
The series has the same length as ``original_df``. Index is timestamp, and values
are the estimated seasonality at each timestamp.
The seasonality estimation is the estimated of seasonality effect with trend estimated
by `~greykite.algo.changepoint.adalasso.changepoints_utils.estimate_trend_with_detected_changepoints`
removed.
``"seasonality_components_df`` : `pandas.DataFrame`
The processed ``seasonality_components_df``. Daily component row is removed if
inferred frequency or aggregation frequency is at least one day.
"""
# Checks parameter rationality.
if potential_changepoint_n < 0:
raise ValueError("potential_changepoint_n can not be negative. "
"A large number such as 50 is recommended")
if df.dropna().shape[0] < 5:
raise ValueError("Change point detector does not work for less than "
"5 observations. Please increase sample size.")
if no_changepoint_proportion_from_end < 0 or no_changepoint_proportion_from_end > 1:
raise ValueError("``no_changepoint_proportion_from_end`` needs to be between 0 and 1.")
if no_changepoint_distance_from_end is not None:
check_freq_unit_at_most_day(no_changepoint_distance_from_end, "no_changepoint_distance_from_end")
data_length = pd.to_datetime(df[time_col].iloc[-1]) - pd.to_datetime(df[time_col].iloc[0])
no_changepoint_proportion_from_end = to_offset(no_changepoint_distance_from_end).delta / data_length
if potential_changepoint_distance is not None:
check_freq_unit_at_most_day(potential_changepoint_distance, "potential_changepoint_distance")
data_length = pd.to_datetime(df[time_col].iloc[-1]) - pd.to_datetime(df[time_col].iloc[0])
potential_changepoint_n = data_length // to_offset(potential_changepoint_distance).delta
if regularization_strength is None:
warnings.warn("regularization_strength is set to None. This will trigger cross-validation to "
"select the tuning parameter which might result in too many change points. "
"Keep the default value or tuning around it is recommended.")
if regularization_strength is not None and (regularization_strength < 0 or regularization_strength > 1):
raise ValueError("regularization_strength must be between 0.0 and 1.0.")
df[time_col] = pd.to_datetime(df[time_col])
# If user provides a list of trend change points, these points will be used to estimate trend.
if trend_changepoints is not None:
trend_estimation = estimate_trend_with_detected_changepoints(
df=df,
time_col=time_col,
value_col=value_col,
changepoints=trend_changepoints
)
self.trend_changepoints = trend_changepoints
self.trend_estimation = trend_estimation
self.original_df = df
self.time_col = time_col
self.value_col = value_col
self.y = df[value_col]
self.y.index = df[time_col]
# If user doesn't provide trend change points, the trend change points will be found automatically.
else:
# Checks if trend change point is available.
# Runs trend change point detection with default value if not.
compare_df = df.copy()
if time_col != self.time_col or value_col != self.value_col:
compare_df.rename({time_col: self.time_col, value_col: self.value_col}, axis=1, inplace=True)
if (self.original_df is not None
and self.original_df[[self.time_col, self.value_col]].equals(
compare_df[[self.time_col, self.value_col]])
and self.trend_estimation is not None):
# If the passed df is the same as ``self.original_df``, then the previous
# ``self.trend_estimation`` is to be subtracted from the time series.
trend_estimation = self.trend_estimation
warnings.warn("Trend changepoints are already identified, using past trend estimation. "
"If you would like to run trend change point detection again, "
"please call ``find_trend_changepoints`` with desired parameters "
"before calling ``find_seasonality_changepoints``.")
else:
# If the passed df is different from ``self.original_df``, then trend change point
# detection algorithm is run first, and trend estimation is calculated afterward.
# In this case, the parameters passed to ``find_seasonality_changepoints`` are also
# passed to ``find_trend_changepoint``.
# If you do not want the parameters passed, run ``find_trend_changepoints`` with
# desired parameters before calling ``find_seasonality_changepoints``.
trend_result = self.find_trend_changepoints(
df=df,
time_col=time_col,
value_col=value_col,
actual_changepoint_min_distance=actual_changepoint_min_distance,
no_changepoint_distance_from_end=no_changepoint_distance_from_end,
no_changepoint_proportion_from_end=no_changepoint_proportion_from_end
)
warnings.warn(f"Trend changepoints are not identified for the input dataframe, "
f"triggering trend change point detection with parameters"
f"actual_changepoint_min_distance={actual_changepoint_min_distance}\n"
f"no_changepoint_proportion_from_end={no_changepoint_proportion_from_end}\n"
f"no_changepoint_distance_from_end={no_changepoint_distance_from_end}\n"
f" Found trend change points\n{self.trend_changepoints}\n"
"If you would like to run trend change point detection with customized "
"parameters, please call ``find_trend_changepoints`` with desired parameters "
"before calling ``find_seasonality_changepoints``.")
trend_estimation = trend_result["trend_estimation"]
# Splits trend effects from time series.
df_without_trend = df.copy()
df_without_trend[value_col] -= trend_estimation.values
# Aggregates df.
df_resample = df_without_trend.resample(resample_freq, on=time_col).mean().reset_index()
df_resample = df_resample.dropna()
# Removes daily component from seasonality_components_df if data has minimum freq daily.
freq_at_least_day = (min(np.diff(df_resample[time_col]).astype("timedelta64[s]")) >= timedelta(days=1))
if (freq_at_least_day
and "daily" in seasonality_components_df["seas_names"].tolist()):
warnings.warn("Inferred minimum data frequency is at least 1 day, daily component is "
"removed from seasonality_components_df.")
seasonality_components_df = seasonality_components_df.loc[
seasonality_components_df["seas_names"] != "daily"]
# Builds seasonality feature df.
seasonality_df = build_seasonality_feature_df_with_changes(
df=df_resample,
time_col=time_col,
fs_components_df=seasonality_components_df,
changepoints_dict={
"method": "uniform",
"n_changepoints": potential_changepoint_n
}
)
# Eliminates change points from the end
# the generated seasonality_df has {``potential_changepoint_n`` + 1} blocks, where there
# are {sum_i(order of component i) * 2} columns consisting of the cosine and sine functions
# for each order for each component.
# The selection below selects the first {``n_changepoints_within_range`` + 1} columns,
# which corresponds to the regular block (first block) and the blocks that correspond
# to the change points that are within range.
n_changepoints_within_range = int(potential_changepoint_n * (1 - no_changepoint_proportion_from_end))
orders = seasonality_components_df["order"].tolist()
seasonality_df = seasonality_df.iloc[:, :(n_changepoints_within_range + 1) * sum(orders) * 2]
self.seasonality_df = seasonality_df
# Fetches change point dates for reference as datetime format.
changepoint_dates = get_evenly_spaced_changepoints_dates(
df=df,
time_col=time_col,
n_changepoints=potential_changepoint_n
)
# Gets the changepoint dates that are not within ``no_changepoint_proportion_from_end``.
changepoint_dates = changepoint_dates.iloc[0: n_changepoints_within_range + 1]
# Calculates the minimal allowed change point index distance.
min_changepoint_index_distance = compute_min_changepoint_index_distance(
df=df_resample,
time_col=time_col,
n_changepoints=potential_changepoint_n,
min_distance_between_changepoints=actual_changepoint_min_distance
)
seasonality_changepoints = get_seasonality_changes_from_adaptive_lasso(
x=seasonality_df.values,
y=df_resample[value_col].values,
changepoint_dates=changepoint_dates,
initial_coef="lasso",
seasonality_components_df=seasonality_components_df,
min_index_distance=min_changepoint_index_distance,
regularization_strength=regularization_strength
)
# Checks if the beginning date is picked as a change point. If yes, drop it, because we
# always include the overall seasonality term in our model.
for key in seasonality_changepoints.keys():
if df_resample[time_col][0] in seasonality_changepoints[key]:
seasonality_changepoints[key] = seasonality_changepoints[key][1:]
self.seasonality_changepoints = seasonality_changepoints
# logging
log_message(f"The detected seasonality changepoints are\n"
f"{pprint(seasonality_changepoints)}", LoggingLevelEnum.INFO)
# Performs a seasonality estimation for plotting purposes.
seasonality_estimation = estimate_seasonality_with_detected_changepoints(
df=df_without_trend,
time_col=time_col,
value_col=value_col,
seasonality_changepoints=seasonality_changepoints,
seasonality_components_df=seasonality_components_df
)
self.seasonality_estimation = seasonality_estimation
result = {
"seasonality_feature_df": seasonality_df,
"seasonality_changepoints": seasonality_changepoints,
"seasonality_estimation": seasonality_estimation,
"seasonality_components_df": seasonality_components_df
}
return result
def plot(
self,
observation=True,
observation_original=True,
trend_estimate=True,
trend_change=True,
yearly_seasonality_estimate=False,
adaptive_lasso_estimate=False,
seasonality_change=False,
seasonality_change_by_component=True,
seasonality_estimate=False,
plot=True):
"""Makes a plot to show the observations/estimations/change points.
In this function, component parameters specify if each component in the plot is
included or not. These are `bool` variables.
For those components that are set to True, their values will be replaced by the
corresponding data. Other components values will be set to None. Then these variables
will be fed into
`~greykite.algo.changepoint.adalasso.changepoints_utils.plot_change`
Parameters
----------
observation : `bool`
Whether to include observation
observation_original : `bool`
Set True to plot original observations, and False to plot aggregated observations.
No effect is ``observation`` is False
trend_estimate : `bool`
Set True to add trend estimation.
trend_change : `bool`
Set True to add change points.
yearly_seasonality_estimate : `bool`
Set True to add estimated yearly seasonality.
adaptive_lasso_estimate : `bool`
Set True to add adaptive lasso estimated trend.
seasonality_change : `bool`
Set True to add seasonality change points.
seasonality_change_by_component : `bool`
If true, seasonality changes will be plotted separately for different components,
else all will be in the same symbol.
No effect if ``seasonality_change`` is False
seasonality_estimate : `bool`
Set True to add estimated seasonality.
The seasonality if plotted around trend, so the actual seasonality shown is
trend estimation + seasonality estimation.
plot : `bool`, default True
Set to True to display the plot, and set to False to return the plotly figure object.
Returns
-------
None (if ``plot`` == True)
The function shows a plot.
fig : `plotly.graph_objects.Figure`
The plot object.
"""
# Adds observation
if observation:
if observation_original:
observation = self.original_df[self.value_col]
observation.index = self.original_df[self.time_col]
else:
observation = self.y
else:
observation = None
# Adds trend estimation
if trend_estimate:
trend_estimate = compute_fitted_components(
x=self.trend_df,
coef=self.trend_coef,
regex='^changepoint',
include_intercept=True,
intercept=self.trend_intercept
)
else:
trend_estimate = None
# Adds trend change points
if trend_change:
if self.trend_changepoints is None:
warnings.warn("You haven't run trend change point detection algorithm yet. "
"Please call find_trend_changepoints first.")
trend_change = self.trend_changepoints
else:
trend_change = None
# Adds yearly seasonality estimates
if yearly_seasonality_estimate:
yearly_seasonality_estimate = compute_fitted_components(
x=self.trend_df,
coef=self.trend_coef,
regex='^.*yearly.*$',
include_intercept=False
)
else:
yearly_seasonality_estimate = None
# Adds adaptive lasso trend estimates
if adaptive_lasso_estimate and self.adaptive_lasso_coef is not None:
adaptive_lasso_estimate = compute_fitted_components(
x=self.trend_df,
coef=self.adaptive_lasso_coef[1],
regex='^changepoint',
include_intercept=True,
intercept=self.adaptive_lasso_coef[0])
else:
adaptive_lasso_estimate = None
# Adds seasonality change points
if seasonality_change:
if self.seasonality_changepoints is None:
warnings.warn("You haven't run seasonality change point detection algorithm yet. "
"Please call find_seasonality_changepoints first.")
if seasonality_change_by_component:
seasonality_change = self.seasonality_changepoints
else:
seasonality_change = []
for key in self.seasonality_changepoints.keys():
seasonality_change += self.seasonality_changepoints[key]
else:
seasonality_change = None
# Adds seasonality estimates
if seasonality_estimate:
if self.seasonality_estimation is None:
warnings.warn("You haven't run seasonality change point detection algorithm yet. "
"Please call find_seasonality_changepoints first.")
seasonality_estimate = None
else:
seasonality_estimate = self.seasonality_estimation + self.trend_estimation
else:
seasonality_estimate = None
fig = plot_change(
observation=observation,
trend_estimate=trend_estimate,
trend_change=trend_change,
year_seasonality_estimate=yearly_seasonality_estimate,
adaptive_lasso_estimate=adaptive_lasso_estimate,
seasonality_change=seasonality_change,
seasonality_estimate=seasonality_estimate,
yaxis=self.value_col
)
if fig is not None and len(fig.data) > 0:
if plot:
fig.show()
else:
return fig
else:
warnings.warn("Figure is empty, at least one component has to be true.")
return None
class HolidayInferrer:
"""Implements methods to automatically infer holiday effects.
The class works for daily and sub-daily data.
Sub-daily data is aggregated into daily data.
It pulls holiday candidates from `pypi:holidays-ext`,
and adds a pre-specified number of days before/after the holiday candidates
as the whole holiday candidates pool.
Every day in the candidate pool is compared with a pre-defined baseline imputed from surrounding days
(e.g. the average of -7 and +7 days)
and a score is generated to indicate deviation.
The score is averaged if a holiday has multiple occurrences through the timeseries period.
The holidays are ranked according to the magnitudes of the scores.
Holidays are classified into:
- model independently
- model together
- do not model
according to their score magnitudes.
For example, if the sum of the absolute scores is 1000,
and the threshold for independent holidays is 0.8,
the method keeps adding holidays to the independent modeling list
from the largest magnitude until the sum reaches 1000 x 0.8 = 800.
Then it continues to count the together modeling list.
Attributes
----------
baseline_offsets : `list` [`int`] or None
The offsets in days to calculate baselines.
post_search_days : `int` or None
The number of days after each holiday to be counted as candidates.
pre_search_days : `int` or None
The number of days before each holiday to be counted as candidates.
independent_holiday_thres : `float` or None
A certain proportion of the total holiday effects that are allocated for holidays
that are modeled independently. For example, 0.8 means the holidays that contribute
to the first 80% of the holiday effects are modeled independently.
together_holiday_thres : `float` or None
A certain proportion of the total holiday effects that are allocated for holidays
that are modeled together. For example, if ``independent_holiday_thres`` is 0.8 and
``together_holiday_thres`` is 0.9, then after the first 80% of the holiday effects
are counted, the rest starts to be allocated for the holidays that are modeled together
until the cum sum exceeds 0.9.
extra_years : `int`, default 2
Extra years after ``self.year_end`` to pull holidays in ``self.country_holiday_df``.
This can be used to cover the forecast periods.
df : `pandas.DataFrame` or None
The timeseries after daily aggregation.
time_col : `str` or None
The column name for timestamps in ``df``.
value_col : `str` or None
The column name for values in ``df``.
year_start : `int` or None
The year of the first timeseries observation in ``df``.
year_end : `int` or None
The year of the last timeseries observation in ``df``.
ts : `set` [`datetime`] or None
The existing timestamps in ``df`` for fast look up.
country_holiday_df : `pandas.DataFrame` or None
The holidays between ``year_start`` and ``year_end``.
This is the output from `pypi:holidays-ext`.
Duplicates are dropped.
Observed holidays are merged.
all_holiday_dates : `list` [`datetime`] or None
All holiday dates contained in ``country_holiday_df``.
holidays : `list` [`str`] or None
A list of holidays in ``country_holiday_df``.
score_result : `dict` [`str`, `list` [`float`]] or None
The scores from comparing holidays and their baselines.
The keys are holidays.
The values are a list of the scores for each occurrence.
score_result_avg : `dict` [`str`, `float`] or None
The scores from ``score_result`` where the values are averaged.
result : `dict` [`str`, any]
The output of the model. Includes:
- "scores": `dict` [`str`, `list` [`float`]]
The ``score_result`` from ``self._get_scores_for_holidays``.
- "country_holiday_df": `pandas.DataFrame`
The ``country_holiday_df`` from ``pypi:holidays_ext``.
- "independent_holidays": `list` [`tuple` [`str`, `str`]]
The holidays to be modeled independently. Each item is in (country, holiday) format.
- "together_holidays_positive": `list` [`tuple` [`str`, `str`]]
The holidays with positive effects to be modeled together. Each item is in (country, holiday) format.
- "together_holidays_negative": `list` [`tuple` [`str`, `str`]]
The holidays with negative effects to be modeled together. Each item is in (country, holiday) format.
- "fig": `plotly.graph_objs.Figure`
The visualization if activated.
"""
def __init__(self):
# Parameters
self.baseline_offsets: Optional[List[int]] = None
self.post_search_days: Optional[int] = None
self.pre_search_days: Optional[int] = None
self.independent_holiday_thres: Optional[float] = None
self.together_holiday_thres: Optional[float] = None
self.extra_years: Optional[int] = None
self.use_relative_score: Optional[bool] = None
# Data set info
self.df: Optional[pd.DataFrame] = None
self.time_col: Optional[str] = None
self.value_col: Optional[str] = None
self.year_start: Optional[int] = None
self.year_end: Optional[int] = None
self.ts: Optional[Set[datetime.date]] = None
# Derived results
self.country_holiday_df: Optional[pd.DataFrame] = None
self.all_holiday_dates: Optional[List[datetime.date]] = None
self.holidays: Optional[List[str]] = None
self.score_result: Optional[Dict[str, List[float]]] = None
self.score_result_avg: Optional[Dict[str, float]] = None
self.result: Optional[dict] = None
def infer_holidays(
self,
df: pd.DataFrame,
time_col: str = TIME_COL,
value_col: str = VALUE_COL,
countries: List[str] = ("US",),
pre_search_days: int = 2,
post_search_days: int = 2,
baseline_offsets: List[int] = (-7, 7),
plot: bool = False,
independent_holiday_thres: float = 0.8,
together_holiday_thres: float = 0.99,
extra_years: int = 2,
use_relative_score: bool = False) -> Optional[Dict[str, any]]:
"""Infers significant holidays and holiday configurations.
The class works for daily and sub-daily data.
Sub-daily data is aggregated into daily data.
It pulls holiday candidates from `pypi:holidays-ext`,
and adds a pre-specified number of days before/after the holiday candidates
as the whole holiday candidates pool.
Every day in the candidate pool is compared with a pre-defined baseline imputed from surrounding days
(e.g. the average of -7 and +7 days)
and a score is generated to indicate deviation.
The score is averaged if a holiday has multiple occurrences through the timeseries period.
The holidays are ranked according to the magnitudes of the scores.
Holidays are classified into:
- model independently
- model together
- do not model
according to their score magnitudes.
For example, if the sum of the absolute scores is 1000,
and the threshold for independent holidays is 0.8,
the method keeps adding holidays to the independent modeling list
from the largest magnitude until the sum reaches 1000 x 0.8 = 800.
Then it continues to count the together modeling list.
Parameters
----------
df : `pd.DataFrame`
The input timeseries.
time_col : `str`, default `TIME_COL`
The column name for timestamps in ``df``.
value_col : `str`, default `VALUE_COL`
The column name for values in ``df``.
countries : `list` [`str`], default ("UnitedStates",)
A list of countries to look up holiday candidates.
Available countries can be listed with
``holidays_ext.get_holidays.get_available_holiday_lookup_countries()``.
Two-character country names are preferred.
pre_search_days : `int`, default 2
The number of days to include as holidays candidates before each holiday.
post_search_days : `int`, default 2
The number of days to include as holidays candidates after each holiday.
baseline_offsets : `list` [`int`], default (-7, 7)
The offsets in days as a baseline to compare with each holiday.
plot : `bool`, default False
Whether to generate visualization.
independent_holiday_thres : `float`, default 0.8
A certain proportion of the total holiday effects that are allocated for holidays
that are modeled independently. For example, 0.8 means the holidays that contribute
to the first 80% of the holiday effects are modeled independently.
together_holiday_thres : `float`, default 0.99
A certain proportion of the total holiday effects that are allocated for holidays
that are modeled together. For example, if ``independent_holiday_thres`` is 0.8 and
``together_holiday_thres`` is 0.9, then after the first 80% of the holiday effects
are counted, the rest starts to be allocated for the holidays that are modeled together
until the cum sum exceeds 0.9.
extra_years : `int`, default 2
Extra years after ``self.year_end`` to pull holidays in ``self.country_holiday_df``.
This can be used to cover the forecast periods.
use_relative_score : `bool`, default False
Whether the holiday effect is calculated as a relative ratio.
If `False`, `~greykite.algo.common.holiday_inferrer.HolidayInferrer._get_score_for_dates`
will use absolute difference compared to the baseline as the score.
If `True`, it uses relative ratio compared to the baseline as the score.
Returns
-------
result : `dict` [`str`, any] or None
A dictionary with the following keys:
- "scores": `dict` [`str`, `list` [`float`]]
The ``score_result`` from ``self._get_scores_for_holidays``.
- "country_holiday_df": `pandas.DataFrame`
The ``country_holiday_df`` from ``pypi:holidays_ext``.
- "independent_holidays": `list` [`tuple` [`str`, `str`]]
The holidays to be modeled independently. Each item is in (country, holiday) format.
- "together_holidays_positive": `list` [`tuple` [`str`, `str`]]
The holidays with positive effects to be modeled together.
Each item is in (country, holiday) format.
- "together_holidays_negative": `list` [`tuple` [`str`, `str`]]
The holidays with negative effects to be modeled together.
Each item is in (country, holiday) format.
- "fig": `plotly.graph_objs.Figure`
The visualization if activated.
"""
# Sets model parameters.
self.baseline_offsets = baseline_offsets
if post_search_days < 0 or pre_search_days < 0:
raise ValueError("Both 'post_search_days' and 'pre_search_days' must be non-negative integers.")
self.post_search_days = post_search_days
self.pre_search_days = pre_search_days
if not 0 <= independent_holiday_thres <= together_holiday_thres <= 1:
raise ValueError("Both 'independent_holiday_thres' and 'together_holiday_thres' must be between "
"0 and 1 (inclusive).")
self.independent_holiday_thres = independent_holiday_thres
self.together_holiday_thres = together_holiday_thres
if extra_years < 1:
# At least 1 year for completeness.
raise ValueError("The parameter 'extra_years' must be a positive integer.")
self.extra_years = extra_years
self.use_relative_score = use_relative_score
# Pre-processes data.
df = df.copy()
df[time_col] = pd.to_datetime(df[time_col])
min_increment = min((df[time_col] - df[time_col].shift(1)).dropna())
# Holidays is not activated for frequencies greater than daily.
if min_increment > timedelta(days=1):
log_message(
message="Data frequency is greater than daily, "
"holiday inferring is skipped.",
level=LoggingLevelEnum.INFO
)
return None
# Holidays are daily events.
# If data frequency is sub-daily,
# we aggregate into daily.
if min_increment < timedelta(days=1):
df = df.resample("D", on=time_col).sum().reset_index(drop=False)
df[time_col] = df[time_col].dt.date
# From now on, data is in daily frequency.
# Sets data attributes.
self.year_start = df[time_col].min().year
self.year_end = df[time_col].max().year
self.ts = set(df[time_col])
self.df = df
self.time_col = time_col
self.value_col = value_col
# Gets holiday candidates.
self.country_holiday_df, self.holidays = self._get_candidate_holidays(countries=countries)
self.all_holiday_dates = self.country_holiday_df["ts"].tolist()
# Gets scores for holidays.
self.score_result = self._get_scores_for_holidays()
# Gets the average scores over multiple occurrences for each holiday.
self.score_result_avg = self._get_averaged_scores()
# Gets significant holidays.
self.result = self._infer_holidays()
# Makes plots if needed.
if plot:
self.result["fig"] = self._plot()
else:
self.result["fig"] = None
return self.result
def _infer_holidays(self) -> Dict[str, any]:
"""When the scores are computed,
calculates the contributions and classifies holidays into:
- model independently
- model together
- do not model
Returns
-------
result : `dict` [`str`, any]
A dictionary with the following keys:
- "scores": `dict` [`str`, `list` [`float`]]
The ``score_result`` from ``self._get_scores_for_holidays``.
- "country_holiday_df": `pandas.DataFrame`
The ``country_holiday_df`` from ``pypi:holidays_ext``.
- "independent_holidays": `list` [`tuple` [`str`, `str`]]
The holidays to be modeled independently. Each item is in (country, holiday) format.
- "together_holidays_positive": `list` [`tuple` [`str`, `str`]]
The holidays with positive effects to be modeled together.
Each item is in (country, holiday) format.
- "together_holidays_negative": `list` [`tuple` [`str`, `str`]]
The holidays with negative effects to be modeled together.
Each item is in (country, holiday) format.
"""
independent_holidays, together_holidays_positive, together_holidays_negative = self._get_significant_holidays()
return {
"scores": self.score_result,
"country_holiday_df": self.country_holiday_df,
INFERRED_INDEPENDENT_HOLIDAYS_KEY: independent_holidays,
INFERRED_GROUPED_POSITIVE_HOLIDAYS_KEY: together_holidays_positive,
INFERRED_GROUPED_NEGATIVE_HOLIDAYS_KEY: together_holidays_negative
}
def _get_candidate_holidays(
self,
countries: List[str]) -> (pd.DataFrame, List[str]):
"""Gets the candidate holidays from a list of countries.
Uses `pypi:holidays-ext`.
Duplicates are dropped.
Observed holidays are renamed to original holidays
and corresponding original holidays in the same years are removed.
Parameters
----------
countries : `list` [`str`]
A list of countries to look up candidate holidays.
Returns
-------
result : `tuple`
Includes:
country_holiday_df : `pandas.DataFrame`
The holidays between ``year_start`` and ``year_end``.
This is the output from `pypi:holidays-ext`.
Duplicates are dropped.
Observed holidays are merged.
holidays : `list` [`str`]
A list of holidays in ``country_holiday_df``.
The holidays are in the format of "{country_name}_{holiday_name}".
"""
country_holiday_df = get_holiday_df(
country_list=countries,
years=list(range(self.year_start, self.year_end + self.extra_years))
)
# Drops duplications.
country_holiday_df.drop_duplicates(keep="first", subset=["ts"], inplace=True)
# Handles observed holidays.
# If observed holiday and original holiday are both listed in the same year,
# the observed holiday will be renamed to the original holiday
# and the original holiday in the same year will be removed.
# Sub-df that contains observed holidays only.
observed_df = country_holiday_df[country_holiday_df["holiday"].str[-10:] == "(Observed)"]
# Row indices to rename.
rows_to_rename = observed_df.index.tolist()
# Date-holiday tuple to remove.
# ":-11" truncates the " (Observed)" suffix.
# This is used to identify rows to remove.
date_holiday_to_remove = [(row[1]["ts"], row[1]["holiday"][:-11]) for row in observed_df.iterrows()]
# Row indices to remove.
# For each (date, holiday) tuple, look up the match in ``country_holiday_df`` and record the row indices.
# The match happens when the holiday name matches and the time diff is at most 3 days.
rows_to_remove = [idx for date, holiday in date_holiday_to_remove
for idx in country_holiday_df[
(abs((pd.DatetimeIndex(country_holiday_df["ts"]) - date).days) <= 3) &
(country_holiday_df["holiday"] == holiday)].index.tolist()]
# Renames and removes.
country_holiday_df.loc[rows_to_rename, "holiday"] = country_holiday_df.loc[
rows_to_rename, "holiday"].str[:-11]
country_holiday_df.loc[rows_to_rename, "country_holiday"] = country_holiday_df.loc[
rows_to_rename, "country_holiday"].str[:-11]
country_holiday_df.drop(
rows_to_remove,
axis=0,
inplace=True
)
country_holiday_df.reset_index(drop=True, inplace=True)
holidays = country_holiday_df["country_holiday"].unique().tolist()
return country_holiday_df, holidays
def _transform_country_holidays(
country_holidays: List[Union[str, Tuple[str, str]]]) -> List[Union[Tuple[str, str], str]]:
"""Decouples a list of {country}_{holiday} names into a list of (country, holiday) tuple
or the other way around, depending on the input type.
Parameters
----------
country_holidays : `list` [`str` or `tuple` [`str`, `str`]]
One of:
- A list of country-holiday strings of the format {country}_{holiday}.
The country part is not expected to have "_".
- A list of (country, holiday) tuples.
Returns
-------
country_holiday_list : `list` [`tuple` [`str`, `str`] or `str`]
A list of (country, holiday) tuples or a list of {country}_{holiday} strings,
depending on the input type.
"""
country_holiday_list = []
for country_holiday in country_holidays:
if isinstance(country_holiday, str):
split = country_holiday.split("_")
country = split[0]
holiday = "_".join(split[1:])
country_holiday_list.append((country, holiday))
elif isinstance(country_holiday, tuple) and len(country_holiday) == 2:
country_holiday_item = f"{country_holiday[0]}_{country_holiday[1]}"
country_holiday_list.append(country_holiday_item)
else:
raise ValueError("Every item in ``country_holidays`` must be a string or a length-2 tuple.")
return country_holiday_list
def _get_score_for_dates(
self,
event_dates: List[pd.Timestamp]) -> List[float]:
"""Gets the score for each day in ``event_dates``.
The score is defined as the observation on the day minus the baseline,
which is the average of the ``self.baseline_offsets`` offset observations.
Parameters
----------
event_dates : `list` [`pandas.Timestamp`]
The timestamps for a single event.
Returns
-------
scores : `list` [`float`]
The scores for a list of occurrences of an event.
"""
scores = []
for date in event_dates:
log_message(message=f"Current holiday date: {date}.\n", level=LoggingLevelEnum.DEBUG)
# Calculates the dates for baseline.
baseline_dates = []
for offset in self.baseline_offsets:
new_date = date + timedelta(days=offset)
counter = 1
# If a baseline date falls on another holiday, it is moving further.
# But the total iterations cannot exceed 3.
while new_date in self.all_holiday_dates and counter <= 3:
log_message(
message=f"Skipping {new_date}, new date is {new_date + timedelta(days=offset)}.\n",
level=LoggingLevelEnum.DEBUG
)
counter += 1
new_date += timedelta(days=offset)
baseline_dates.append(new_date)
log_message(message=f"Baseline dates are: {baseline_dates}.\n", level=LoggingLevelEnum.DEBUG)
# Calculates the average of the baseline observations.
baseline = self.df[self.df[self.time_col].isin(baseline_dates)][self.value_col].mean()
# Calculates the score for the current occurrence.
score = self.df[self.df[self.time_col] == date][self.value_col].values[0] - baseline
if self.use_relative_score:
score /= baseline
scores.append(score)
return scores
def _get_scores_for_holidays(self) -> Dict[str, List[float]]:
"""Calculates the scores for a list of events, each with multiple occurrences.
Returns
-------
result : `dict` [`str`, `list` [`float`]]
A dictionary with keys being the holiday names and values
being the scores for all occurrences of the holiday.
"""
result = {}
for holiday in self.holidays:
# Gets all occurrences of the holiday
holiday_dates = self.country_holiday_df[
self.country_holiday_df["country_holiday"] == holiday]["ts"].tolist()
# Iterates over pre/post days to get the scores
for i in range(-self.pre_search_days, self.post_search_days + 1):
event_dates = [(date + timedelta(days=1) * i).date() for date in holiday_dates]
event_dates = [date for date in event_dates if date in self.ts]
score = self._get_score_for_dates(
event_dates=event_dates,
)
result[f"{holiday}_{'{0:+}'.format(i)}"] = score # format is with +/- signs
return result
def _get_averaged_scores(self) -> Dict[str, float]:
"""Calculates the average score for each event date.
Returns
-------
result : `dict` [`str`, `float`]
A dictionary with keys being the holiday names and values
being the average scores.
"""
result = {}
for holiday, score in self.score_result.items():
result[holiday] = np.nanmean(score)
return result
def _get_significant_holidays(self) -> (List[str], List[str], List[str]):
"""Classifies holidays into model independently, model together
and do not model according to their scores.
Returns
-------
result : `tuple`
A result tuple including:
- "independent_holidays": `list` [`tuple` [`str`, `str`]]
The holidays to be modeled independently. Each item is in (country, holiday) format.
- "together_holidays_positive": `list` [`tuple` [`str`, `str`]]
The holidays with positive effects to be modeled together.
Each item is in (country, holiday) format.
- "together_holidays_negative": `list` [`tuple` [`str`, `str`]]
The holidays with negative effects to be modeled together.
Each item is in (country, holiday) format.
"""
# Calculates the total holiday deviations.
total_changes = np.nansum(np.abs(list(self.score_result_avg.values())))
independent_holiday_thres = self.independent_holiday_thres * total_changes
together_holiday_thres = self.together_holiday_thres * total_changes
# Sorts the holidays by their magnitudes.
ranked_effects = sorted(self.score_result_avg.items(), key=lambda x: abs(x[1]), reverse=True)
# Iterates over the sorted holidays until it reaches the thresholds.
cum_effect = 0 # cumulative holiday deviations so far
idx = 0 # index for the current holiday
independent_holidays = [] # stores holidays to be modeled independently
together_holidays_positive = [] # stores holidays with positive effects to be modeled together
together_holidays_negative = [] # stores holidays with negative effects to be modeled together
# Starts adding independent holidays until threshold
while cum_effect < independent_holiday_thres and idx < len(ranked_effects):
if np.isfinite(ranked_effects[idx][1]):
independent_holidays.append(ranked_effects[idx][0])
cum_effect += abs(ranked_effects[idx][1])
idx += 1
# Starts adding together holidays until threshold
while cum_effect < together_holiday_thres and idx < len(ranked_effects):
if np.isfinite(ranked_effects[idx][1]):
if ranked_effects[idx][1] > 0:
together_holidays_positive.append(ranked_effects[idx][0])
elif ranked_effects[idx][1] < 0:
together_holidays_negative.append(ranked_effects[idx][0])
cum_effect += abs(ranked_effects[idx][1])
idx += 1
return (self._transform_country_holidays(independent_holidays),
self._transform_country_holidays(together_holidays_positive),
self._transform_country_holidays(together_holidays_negative))
def _plot(self) -> go.Figure:
"""Makes a plot that includes the following two subplots:
- Bar chart for holiday effects grouped by holidays ordered by their holiday effects.
- Bar chart for holiday effects and their classifications
ranked by their effects.
Returns
-------
fig : `plotly.graph_objs`
The figure object.
"""
# Makes the plot.
fig = make_subplots(
rows=2,
cols=1,
subplot_titles=[
"Inferred holiday effects grouped by holiday",
"Inferred holiday effects grouped by effects"
],
vertical_spacing=0.4
)
# Adds the subplot: holiday effects grouped by holidays.
# Gets all holidays and their scores.
holidays = []
scores = []
for holiday, score in self.score_result_avg.items():
holidays.append(holiday)
scores.append(score)
# Removes the pre/post numbers of days from the end of the holiday names.
# This is used to make the plot grouped by holidays.
holidays_without_plus_minus = list(set(["_".join(holiday.split("_")[:-1]) for holiday in holidays]))
# Sorts holidays according to their effects.
holidays_without_plus_minus = sorted(
holidays_without_plus_minus,
key=lambda x: abs(self.score_result_avg[f"{x}_+0"]),
reverse=True)
# Iterates over each holiday + i day to plot the bars.
for i in range(-self.pre_search_days, self.post_search_days + 1):
if i == 0:
name = "holiday"
elif abs(i) == 1:
name = f"holiday {'{0:+}'.format(i)} day"
else:
name = f"holiday {'{0:+}'.format(i)} days"
# Gets the list of holiday names with the current +/- day.
holidays_with_plus_minus = [key + f"_{'{0:+}'.format(i)}" for key in holidays_without_plus_minus]
# Gets the corresponding scores for the current +/- day.
current_values = [scores[idx] for idx in [
holidays.index(holiday) for holiday in holidays_with_plus_minus]]
# Adds to the plot.
fig.add_trace(
go.Bar(
# Truncates the text for better view.
x=[holiday[:30] for holiday in holidays_without_plus_minus],
y=current_values,
name=name,
legendgroup=1
),
row=1,
col=1
)
# Adds the subplot: holiday effects grouped by effects.
# Sorts holidays by their effect magnitude.
ranked_holidays, ranked_scores = list(zip(
*sorted(self.score_result_avg.items(), key=lambda x: abs(x[1]), reverse=True)))
# Adds to the plot.
fig.add_trace(
go.Bar(
# Truncates the text for better view.
x=["_".join(holiday.split("_")[:-1])[:30] + holiday.split("_")[-1] for holiday in ranked_holidays],
y=ranked_scores,
legendgroup=2,
name="holidays"
),
row=2,
col=1
)
# Adds vertical regions to indicate the classification of the holidays.
start = -0.5 # start of bar chart x axis
independent_holidays_end = start + len(self.result[INFERRED_INDEPENDENT_HOLIDAYS_KEY])
together_holiday_end = (independent_holidays_end + len(self.result[INFERRED_GROUPED_POSITIVE_HOLIDAYS_KEY])
+ len(self.result[INFERRED_GROUPED_NEGATIVE_HOLIDAYS_KEY]))
end = start + len(holidays)
fig.add_vrect(
x0=start,
x1=independent_holidays_end,
annotation_text="model independently",
annotation_position="top left",
opacity=0.15,
fillcolor="green",
line_width=0,
row=2,
col=1
)
fig.add_vrect(
x0=independent_holidays_end,
x1=together_holiday_end,
annotation_text="model together",
annotation_position="top left",
opacity=0.15,
fillcolor="purple",
line_width=0,
row=2,
col=1
)
fig.add_vrect(
x0=together_holiday_end,
x1=end,
annotation_text="do not model",
annotation_position="top left",
opacity=0.15,
fillcolor="yellow",
line_width=0,
row=2,
col=1
)
fig.add_vline(
x=independent_holidays_end,
line=dict(color="black"),
line_width=1,
row=2,
col=1
)
fig.add_vline(
x=together_holiday_end,
line=dict(color="black"),
line_width=1,
row=2,
col=1
)
# Adjusts layouts.
fig.update_layout(
height=1000,
title="Inferred holiday effects",
legend_tracegroupgap=360,
)
fig.update_xaxes(
tickangle=90,
title="Holidays",
row=1,
col=1
)
fig.update_yaxes(
title="Effect",
row=1,
col=1
)
fig.update_xaxes(
tickangle=90,
title="Holidays",
row=2,
col=1
)
fig.update_yaxes(
title="Effect",
row=2,
col=1
)
return fig
def _get_event_df_for_single_event(
self,
holiday: Tuple[str, str],
country_holiday_df: pd.DataFrame) -> pd.DataFrame:
"""Gets the event df for a single holiday.
An event df has the format:
pd.DataFrame({
"date": ["2020-09-01", "2021-09-01"],
"event_name": "is_event"
})
Parameters
----------
holiday : `tuple` [`str`, `str`]
A tuple of length 2.
The first element is the country name.
The second element has the format of f"{holiday}_{x}",
where "x" is a signed integer acting as a neighboring operator.
For example, ("US", "Christmas Day_+1") means the day after
every US's Christmas Day.
This is consistent with the output from ``self.infer_holidays``.
country_holiday_df : `pandas.DataFrame`
The dataframe that contains the country/holiday/dates information
for holidays. Must cover the periods need in training/forecasting
for all holidays.
This has the same format as ``self.country_holiday_df``.
Returns
-------
event_df : `pandas.DataFrame`
The event df for a single holiday in the format of
pd.DataFrame({
"date": ["2020-12-24", "2021-12-24"],
"event_name": "US_Christmas Day_minus_1"
})
"""
# Splits holiday into country name, holiday name and neighboring offset days.
country = holiday[0]
holiday_split = holiday[1].split("_")
holiday_name = "_".join(holiday_split[:-1])
neighboring_offset = int(holiday_split[-1])
# Gets holiday dates from ``country_holiday_df``.
holiday_dates = country_holiday_df[
(country_holiday_df["country"] == country) &
(country_holiday_df["holiday"] == holiday_name)]["ts"].tolist()
holiday_dates = [date + timedelta(days=neighboring_offset) for date in holiday_dates]
# Constructs the event df.
# The holiday name matches the column names
# constructed from `SimpleSilverkiteForecast`'s holiday generating functions.
if neighboring_offset < 0:
holiday_name_adj = f"{country}_{holiday_name}_minus_{abs(neighboring_offset)}"
elif neighboring_offset == 0:
holiday_name_adj = f"{country}_{holiday_name}"
else:
holiday_name_adj = f"{country}_{holiday_name}_plus_{neighboring_offset}"
holiday_name_adj = holiday_name_adj.replace("'", "") # Single quote conflicts patsy formula.
event_df = pd.DataFrame({
EVENT_DF_DATE_COL: holiday_dates,
EVENT_DF_LABEL_COL: holiday_name_adj
})
return event_df
def generate_daily_event_dict(
self,
country_holiday_df: Optional[pd.DataFrame] = None,
holiday_result: Optional[Dict[str, List[Tuple[str, str]]]] = None) -> Dict[str, pd.DataFrame]:
"""Generates daily event dict for all holidays inferred.
The daily event dict will contain:
- Single events for every holiday or holiday neighboring day
that is to be modeled independently.
- A single event for all holiday or holiday neighboring days
with positive effects that are modeled together.
- A single event for all holiday or holiday neighboring days
with negative effects that are modeled together.
Parameters
----------
country_holiday_df : `pandas.DataFrame` or None, default None
The dataframe that contains the country/holiday/dates information
for holidays. Must cover the periods need in training/forecasting
for all holidays.
This has the same format as ``self.country_holiday_df``.
If None, it pulls from ``self.country_holiday_df``.
holiday_result : `dict` [`str`, `list` [`tuple` [`str`, `str`]]] or None, default None
A dictionary with the following keys:
- INFERRED_INDEPENDENT_HOLIDAYS_KEY
- INFERRED_GROUPED_POSITIVE_HOLIDAYS_KEY
- INFERRED_GROUPED_NEGATIVE_HOLIDAYS_KEY
Each key's value is a list of length-2 tuples of the format (country, holiday).
This format is the output of ``self.infer_holidays``.
If None, it pulls from ``self.result``.
Returns
-------
daily_event_dict : `dict`
The daily event dict that is consumable by
`~greykite.algo.forecast.silverkite.forecast_simple_silverkite.SimpleSilverkiteForecast` or
`~greykite.algo.forecast.silverkite.forecast_silverkite.SilverkiteForecast`.
The keys are the event names.
The values are dataframes with the event dates.
"""
daily_event_dict = {}
# Gets default parameters.
if country_holiday_df is None:
country_holiday_df = self.country_holiday_df
if holiday_result is None:
holiday_result = self.result
if country_holiday_df is None or holiday_result is None:
raise ValueError("Both 'country_holiday_df' and 'holidays' must be given. "
"Alternatively, you can run 'infer_holidays' first and "
"they will be pulled automatically.")
# Gets independent holidays.
independent_holidays = holiday_result.get(INFERRED_INDEPENDENT_HOLIDAYS_KEY, [])
for holiday in independent_holidays:
event_df = self._get_event_df_for_single_event(
holiday=holiday,
country_holiday_df=country_holiday_df
)
if event_df.shape[0] > 0:
event_name = event_df[EVENT_DF_LABEL_COL].iloc[0]
daily_event_dict[event_name] = event_df
# Gets positive together holidays.
together_holidays_positive = holiday_result.get(INFERRED_GROUPED_POSITIVE_HOLIDAYS_KEY, [])
event_df = pd.DataFrame()
for holiday in together_holidays_positive:
event_df_temp = self._get_event_df_for_single_event(
holiday=holiday,
country_holiday_df=country_holiday_df
)
event_df = pd.concat([event_df, event_df_temp], axis=0)
if event_df.shape[0] > 0:
event_df[EVENT_DF_LABEL_COL] = EVENT_INDICATOR
daily_event_dict[HOLIDAY_POSITIVE_GROUP_NAME] = event_df.drop_duplicates(subset=[EVENT_DF_DATE_COL]).reset_index(drop=True)
# Gets negative together holidays.
together_holidays_negative = holiday_result.get(INFERRED_GROUPED_NEGATIVE_HOLIDAYS_KEY, [])
event_df = pd.DataFrame()
for holiday in together_holidays_negative:
event_df_temp = self._get_event_df_for_single_event(
holiday=holiday,
country_holiday_df=country_holiday_df
)
event_df = pd.concat([event_df, event_df_temp], axis=0)
if event_df.shape[0] > 0:
event_df[EVENT_DF_LABEL_COL] = EVENT_INDICATOR
daily_event_dict[HOLIDAY_NEGATIVE_GROUP_NAME] = event_df.drop_duplicates(subset=[EVENT_DF_DATE_COL]).reset_index(drop=True)
return daily_event_dict
TIME_COL = "ts"
VALUE_COL = "y"
class SeasonalityEnum(Enum):
"""Valid types of seasonality available to use"""
DAILY_SEASONALITY = "DAILY_SEASONALITY"
WEEKLY_SEASONALITY = "WEEKLY_SEASONALITY"
MONTHLY_SEASONALITY = "MONTHLY_SEASONALITY"
QUARTERLY_SEASONALITY = "QUARTERLY_SEASONALITY"
YEARLY_SEASONALITY = "YEARLY_SEASONALITY"
def min_gap_in_seconds(df, time_col):
"""Returns the smallest gap between observations in df[time_col].
Assumes df[time_col] is sorted in ascending order without duplicates.
:param df: pd.DataFrame
input timeseries
:param time_col: str
time column name in `df`
:return: float
minimum gap between observations, in seconds
"""
if df.shape[0] < 2:
raise ValueError(f"Must provide at least two data points. Found {df.shape[0]}.")
timestamps = pd.to_datetime(df[time_col])
period = (timestamps - timestamps.shift()).min()
return period.days*24*3600 + period.seconds
def get_simple_time_frequency_from_period(period):
"""Returns SimpleTimeFrequencyEnum based on input data period
:param period: float
Period of each observation (i.e. average time between observations, in seconds)
:return: SimpleTimeFrequencyEnum
SimpleTimeFrequencyEnum is used to define default values for horizon, seasonality, etc.
(but original data frequency is not modified)
"""
freq_threshold = [
(SimpleTimeFrequencyEnum.MINUTE, 10.05), # <= 10 minutes is considered minute-level, buffer for abnormalities
(SimpleTimeFrequencyEnum.HOUR, 6.05), # <= 6 hours is considered hourly, buffer for abnormalities
(SimpleTimeFrequencyEnum.DAY, 2.05), # <= 2 days is considered daily, buffer for daylight savings
(SimpleTimeFrequencyEnum.WEEK, 2.05), # <= 2 weeks is considered weekly, buffer for daylight savings
(SimpleTimeFrequencyEnum.MONTH, 2.05), # <= 2 months is considered monthly, buffer for 31-day month
(SimpleTimeFrequencyEnum.YEAR, 1.01), # <= 1 years is considered yearly, buffer for leap year
]
for simple_freq, threshold in freq_threshold:
if period <= simple_freq.value.seconds_per_observation * threshold:
return simple_freq
return SimpleTimeFrequencyEnum.MULTIYEAR
class UnivariateTimeSeries:
"""Defines univariate time series input. The dataset can include regressors,
but only one metric is designated as the target metric to forecast.
Loads time series into a standard format. Provides statistics, plotting
functions, and ability to generate future dataframe for prediction.
Attributes
----------
df: `pandas.DataFrame`
Data frame containing timestamp and value, with standardized column names for internal use
(TIME_COL, VALUE_COL). Rows are sorted by time index, and missing gaps between dates are filled
in so that dates are spaced at regular intervals. Values are adjusted for anomalies
according to ``anomaly_info``.
The index can be timezone aware (but TIME_COL is not).
y: `pandas.Series`, dtype float64
Value of time series to forecast.
time_stats: `dict`
Summary statistics about the timestamp column.
value_stats: `dict`
Summary statistics about the value column.
original_time_col: `str`
Name of time column in original input data.
original_value_col: `str`
Name of value column in original input data.
regressor_cols: `list` [`str`]
A list of regressor columns in the training and prediction DataFrames.
lagged_regressor_cols: `list` [`str`]
A list of additional columns needed for lagged regressors in the training and prediction DataFrames.
last_date_for_val: `datetime.datetime` or None, default None
Date or timestamp corresponding to last non-null value in ``df[original_value_col]``.
last_date_for_reg: `datetime.datetime` or None, default None
Date or timestamp corresponding to last non-null value in ``df[regressor_cols]``.
If ``regressor_cols`` is None, ``last_date_for_reg`` is None.
last_date_for_lag_reg: `datetime.datetime` or None, default None
Date or timestamp corresponding to last non-null value in ``df[lagged_regressor_cols]``.
If ``lagged_regressor_cols`` is None, ``last_date_for_lag_reg`` is None.
train_end_date: `datetime.datetime`
Last date or timestamp in ``fit_df``. It is always less than or equal to
minimum non-null values of ``last_date_for_val`` and ``last_date_for_reg``.
fit_cols: `list` [`str`]
A list of columns used in the training and prediction DataFrames.
fit_df: `pandas.DataFrame`
Data frame containing timestamp and value, with standardized column names for internal use.
Will be used for fitting (train, cv, backtest).
fit_y: `pandas.Series`, dtype float64
Value of time series for fit_df.
freq: `str`
timeseries frequency, DateOffset alias, e.g. {'T' (minute), 'H', D', 'W', 'M' (month end), 'MS' (month start),
'Y' (year end), 'Y' (year start)}
See https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases
anomaly_info : `dict` or `list` [`dict`] or None, default None
Anomaly adjustment info. Anomalies in ``df``
are corrected before any forecasting is done.
See ``self.load_data()``
df_before_adjustment : `pandas.DataFrame` or None, default None
``self.df`` before adjustment by ``anomaly_info``.
Used by ``self.plot()`` to show the adjustment.
"""
def __init__(self) -> None:
self.df: Optional[pd.DataFrame] = None
self.y: Optional[pd.Series] = None
self.time_stats: Optional[Dict] = None
self.value_stats: Optional[Dict] = None
self.original_time_col: Optional[str] = None
self.original_value_col: Optional[str] = None
self.regressor_cols: List[str] = []
self.lagged_regressor_cols: List[str] = []
self.last_date_for_val: Optional[datetime] = None
self.last_date_for_reg: Optional[datetime] = None
self.last_date_for_lag_reg: Optional[datetime] = None
self.train_end_date: Optional[str, datetime] = None
self.fit_cols: List[str] = []
self.fit_df: Optional[pd.DataFrame] = None
self.fit_y: Optional[pd.DataFrame] = None
self.freq: Optional[str] = None
self.anomaly_info: Optional[Union[Dict, List[Dict]]] = None
self.df_before_adjustment: Optional[pd.DataFrame] = None
def load_data(
self,
df: pd.DataFrame,
time_col: str = TIME_COL,
value_col: str = VALUE_COL,
freq: Optional[str] = None,
date_format: Optional[str] = None,
tz: Optional[str] = None,
train_end_date: Optional[Union[str, datetime]] = None,
regressor_cols: Optional[List[str]] = None,
lagged_regressor_cols: Optional[List[str]] = None,
anomaly_info: Optional[Union[Dict, List[Dict]]] = None):
"""Loads data to internal representation. Parses date column,
sets timezone aware index.
Checks for irregularities and raises an error if input is invalid.
Adjusts for anomalies according to ``anomaly_info``.
Parameters
----------
df : `pandas.DataFrame`
Input timeseries. A data frame which includes the timestamp column
as well as the value column.
time_col : `str`
The column name in ``df`` representing time for the time series data.
The time column can be anything that can be parsed by pandas DatetimeIndex.
value_col: `str`
The column name which has the value of interest to be forecasted.
freq : `str` or None, default None
Timeseries frequency, DateOffset alias, If None automatically inferred.
date_format : `str` or None, default None
strftime format to parse time column, eg ``%m/%d/%Y``.
Note that ``%f`` will parse all the way up to nanoseconds.
If None (recommended), inferred by `pandas.to_datetime`.
tz : `str` or pytz.timezone object or None, default None
Passed to `pandas.tz_localize` to localize the timestamp.
train_end_date : `str` or `datetime.datetime` or None, default None
Last date to use for fitting the model. Forecasts are generated after this date.
If None, it is set to the minimum of ``self.last_date_for_val`` and
``self.last_date_for_reg``.
regressor_cols: `list` [`str`] or None, default None
A list of regressor columns used in the training and prediction DataFrames.
If None, no regressor columns are used.
Regressor columns that are unavailable in ``df`` are dropped.
lagged_regressor_cols: `list` [`str`] or None, default None
A list of additional columns needed for lagged regressors in the training and prediction DataFrames.
This list can have overlap with ``regressor_cols``.
If None, no additional columns are added to the DataFrame.
Lagged regressor columns that are unavailable in ``df`` are dropped.
anomaly_info : `dict` or `list` [`dict`] or None, default None
Anomaly adjustment info. Anomalies in ``df``
are corrected before any forecasting is done.
If None, no adjustments are made.
A dictionary containing the parameters to
`~greykite.common.features.adjust_anomalous_data.adjust_anomalous_data`.
See that function for details.
The possible keys are:
``"value_col"`` : `str`
The name of the column in ``df`` to adjust. You may adjust the value
to forecast as well as any numeric regressors.
``"anomaly_df"`` : `pandas.DataFrame`
Adjustments to correct the anomalies.
``"start_time_col"``: `str`, default START_TIME_COL
Start date column in ``anomaly_df``.
``"end_time_col"``: `str`, default END_TIME_COL
End date column in ``anomaly_df``.
``"adjustment_delta_col"``: `str` or None, default None
Impact column in ``anomaly_df``.
``"filter_by_dict"``: `dict` or None, default None
Used to filter ``anomaly_df`` to the relevant anomalies for
the ``value_col`` in this dictionary.
Key specifies the column name, value specifies the filter value.
``"filter_by_value_col""``: `str` or None, default None
Adds ``{filter_by_value_col: value_col}`` to ``filter_by_dict``
if not None, for the ``value_col`` in this dictionary.
``"adjustment_method"`` : `str` ("add" or "subtract"), default "add"
How to make the adjustment, if ``adjustment_delta_col`` is provided.
Accepts a list of such dictionaries to adjust multiple columns in ``df``.
Returns
-------
self : Returns self.
Sets ``self.df`` with standard column names,
value adjusted for anomalies, and time gaps filled in,
sorted by time index.
"""
self.original_time_col = time_col
self.original_value_col = value_col
self.anomaly_info = anomaly_info
canonical_data_dict = get_canonical_data(
df=df,
time_col=time_col,
value_col=value_col,
freq=freq,
date_format=date_format,
tz=tz,
train_end_date=pd.to_datetime(train_end_date, format=date_format),
regressor_cols=regressor_cols,
lagged_regressor_cols=lagged_regressor_cols,
anomaly_info=anomaly_info)
self.df = canonical_data_dict["df"]
self.df_before_adjustment = canonical_data_dict["df_before_adjustment"]
self.fit_df = canonical_data_dict["fit_df"]
self.freq = canonical_data_dict["freq"]
self.time_stats = canonical_data_dict["time_stats"]
self.regressor_cols = canonical_data_dict["regressor_cols"]
self.lagged_regressor_cols = canonical_data_dict["lagged_regressor_cols"]
self.fit_cols = canonical_data_dict["fit_cols"]
self.train_end_date = canonical_data_dict["train_end_date"]
self.last_date_for_val = canonical_data_dict["last_date_for_val"]
self.last_date_for_reg = canonical_data_dict["last_date_for_reg"]
self.last_date_for_lag_reg = canonical_data_dict["last_date_for_lag_reg"]
# y (possibly with null values) after gaps have been filled in and anomalies corrected
self.y = self.df[VALUE_COL]
self.fit_y = self.fit_df[VALUE_COL]
# computes statistics of processed dataset
self.describe_time_col()
self.describe_value_col() # compute value statistics
log_message(f"last date for fit: {self.train_end_date}", LoggingLevelEnum.INFO)
log_message(f"last date for {self.original_value_col}: {self.last_date_for_val}", LoggingLevelEnum.INFO)
log_message(f"last date with any regressor: {self.last_date_for_reg}", LoggingLevelEnum.INFO)
log_message(f"columns available to use as regressors: {', '.join(self.regressor_cols)}", LoggingLevelEnum.INFO)
log_message(f"columns available to use as lagged regressors: {', '.join(self.lagged_regressor_cols)}", LoggingLevelEnum.INFO)
return self
def describe_time_col(self):
"""Basic descriptive stats on the timeseries time column.
Returns
-------
time_stats: `dict`
Dictionary with descriptive stats on the timeseries time column.
* data_points: int
number of time points
* mean_increment_secs: float
mean frequency
* min_timestamp: datetime64
start date
* max_timestamp: datetime64
end date
"""
if self.df is None:
raise RuntimeError("Must load data before describing dataset")
timeseries_info = describe_timeseries(df=self.df, time_col=TIME_COL)
data_points = self.df.shape[0]
mean_increment_secs = timeseries_info["mean_increment_secs"]
min_timestamp = timeseries_info["min_timestamp"]
max_timestamp = timeseries_info["max_timestamp"]
log_message("Input time stats:", LoggingLevelEnum.INFO)
log_message(f" data points: {data_points}", LoggingLevelEnum.INFO)
log_message(f" avg increment (sec): {mean_increment_secs:.2f}", LoggingLevelEnum.INFO)
log_message(f" start date: {min_timestamp}", LoggingLevelEnum.INFO)
log_message(f" end date: {max_timestamp}", LoggingLevelEnum.INFO)
time_stats = {
"data_points": data_points, # total number of time points, including missing ones
"mean_increment_secs": mean_increment_secs, # after filling in gaps
"min_timestamp": min_timestamp,
"max_timestamp": max_timestamp,
}
self.time_stats.update(time_stats) # compute time statistics
return time_stats
def describe_value_col(self):
"""Basic descriptive stats on the timeseries value column.
Returns
-------
value_stats : `dict` [`str`, `float`]
Dict with keys: count, mean, std, min, 25%, 50%, 75%, max
"""
if self.df is None:
raise RuntimeError("Must load data before describing values")
self.value_stats = self.df[VALUE_COL].describe() # count is the total number of provided timepoints
log_message("Input value stats:", LoggingLevelEnum.INFO)
log_message(repr(self.value_stats), LoggingLevelEnum.INFO)
return self.value_stats
def make_future_dataframe(self, periods: int = None, include_history=True):
"""Extends the input data for prediction into the future.
Includes the historical values (VALUE_COL) so this can be fed
into a Pipeline that transforms input data for fitting, and for
use in evaluation.
Parameters
----------
periods : int or None
Number of periods to forecast.
If there are no regressors, default is 30.
If there are regressors, default is to predict all available dates.
include_history : bool
Whether to return historical dates and values with future dates.
Returns
-------
future_df : `pandas.DataFrame`
Dataframe with future timestamps for prediction.
Contains columns for:
* prediction dates (``TIME_COL``),
* values (``VALUE_COL``),
* optional regressors
"""
if self.df is None:
raise RuntimeError("Must load data before generating future dates.")
# determines the number of future periods to predict
if self.regressor_cols:
max_regressor_periods = len(self.df[
(self.df[TIME_COL] > self.train_end_date)
& (self.df[TIME_COL] <= self.last_date_for_reg)
])
if periods is None:
periods = max_regressor_periods
elif periods > max_regressor_periods:
warnings.warn(
f"Provided periods '{periods}' is more than allowed ('{max_regressor_periods}') due to "
f"the length of regressor columns. Using '{max_regressor_periods}'.",
UserWarning)
periods = max_regressor_periods
elif periods is None:
periods = 30
# the future dates for prediction
dates = pd.date_range(
start=self.train_end_date,
periods=periods + 1, # an extra in case we include start
freq=self.freq)
dates = dates[dates > self.train_end_date] # drops values up to train_end_date
dates = dates[:periods] # returns the correct number of periods
if self.regressor_cols:
# return TIME_COL, VALUE_COL, and regressors
last_date_for_predict = dates.max()
if include_history:
valid_indices = (self.df[TIME_COL] <= last_date_for_predict)
else:
valid_indices = ((self.df[TIME_COL] > self.train_end_date)
& (self.df[TIME_COL] <= last_date_for_predict))
future_df = self.df[valid_indices]
else:
# return TIME_COL, VALUE_COL
future_df = self.df.reindex(index=dates)
future_df[TIME_COL] = future_df.index
if include_history:
future_df = pd.concat([self.fit_df, future_df], axis=0, sort=False)
return future_df[self.fit_cols]
def plot(
self,
color="rgb(32, 149, 212)",
show_anomaly_adjustment=False,
**kwargs):
"""Returns interactive plotly graph of the value against time.
If anomaly info is provided, there is an option to show the anomaly adjustment.
Parameters
----------
color : `str`, default "rgb(32, 149, 212)" (light blue)
Color of the value line (after adjustment, if applicable).
show_anomaly_adjustment : `bool`, default False
Whether to show the anomaly adjustment.
kwargs : additional parameters
Additional parameters to pass to
`~greykite.common.viz.timeseries_plotting.plot_univariate`
such as title and color.
Returns
-------
fig : `plotly.graph_objects.Figure`
Interactive plotly graph of the value against time.
See `~greykite.common.viz.timeseries_plotting.plot_forecast_vs_actual`
return value for how to plot the figure and add customization.
"""
df = self.df.copy()
# Plots value after anomaly adjustment
y_col_style_dict = {
VALUE_COL: dict(
name=self.original_value_col,
mode="lines",
line=dict(
color=color,
),
opacity=0.8
)
}
if show_anomaly_adjustment:
if self.anomaly_info is not None:
# Adds value before adjustment to ``df``
postfix = "_unadjusted"
df[f"{VALUE_COL}{postfix}"] = self.df_before_adjustment[VALUE_COL]
y_col_style_dict[f"{VALUE_COL}{postfix}"] = dict(
name=f"{self.original_value_col}{postfix}",
mode="lines",
line=dict(
color="#B3B3B3", # light gray
),
opacity=0.8
)
else:
raise ValueError("There is no `anomaly_info` to show. `show_anomaly_adjustment` must be False.")
return plot_multivariate(
df,
TIME_COL,
y_col_style_dict,
xlabel=self.original_time_col,
ylabel=self.original_value_col,
**kwargs)
def get_grouping_evaluation(
self,
aggregation_func=np.nanmean,
aggregation_func_name="mean",
groupby_time_feature=None,
groupby_sliding_window_size=None,
groupby_custom_column=None):
"""Group-wise computation of aggregated timeSeries value.
Can be used to evaluate error/ aggregated value by a time feature,
over time, or by a user-provided column.
Exactly one of: ``groupby_time_feature``, ``groupby_sliding_window_size``,
``groupby_custom_column`` must be provided.
Parameters
----------
aggregation_func : callable, optional, default ``numpy.nanmean``
Function that aggregates an array to a number.
Signature (y: array) -> aggregated value: float.
aggregation_func_name : `str` or None, optional, default "mean"
Name of grouping function, used to report results.
If None, defaults to "aggregation".
groupby_time_feature : `str` or None, optional
If provided, groups by a column generated by
`~greykite.common.features.timeseries_features.build_time_features_df`.
See that function for valid values.
groupby_sliding_window_size : `int` or None, optional
If provided, sequentially partitions data into groups of size
``groupby_sliding_window_size``.
groupby_custom_column : `pandas.Series` or None, optional
If provided, groups by this column value. Should be same length as the DataFrame.
Returns
-------
grouped_df : `pandas.DataFrame` with two columns:
(1) grouping_func_name:
evaluation metric for aggregation of timeseries.
(2) group name:
group name depends on the grouping method:
``groupby_time_feature`` for ``groupby_time_feature``
``cst.TIME_COL`` for ``groupby_sliding_window_size``
``groupby_custom_column.name`` for ``groupby_custom_column``.
"""
df = self.df.copy()
if aggregation_func_name:
grouping_func_name = f"{aggregation_func_name} of {VALUE_COL}"
else:
grouping_func_name = f"aggregation of {VALUE_COL}"
def grouping_func(grp):
return aggregation_func(grp[VALUE_COL])
result = add_groupby_column(
df=df,
time_col=TIME_COL,
groupby_time_feature=groupby_time_feature,
groupby_sliding_window_size=groupby_sliding_window_size,
groupby_custom_column=groupby_custom_column)
grouped_df = grouping_evaluation(
df=result["df"],
groupby_col=result["groupby_col"],
grouping_func=grouping_func,
grouping_func_name=grouping_func_name)
return grouped_df
def plot_grouping_evaluation(
self,
aggregation_func=np.nanmean,
aggregation_func_name="mean",
groupby_time_feature=None,
groupby_sliding_window_size=None,
groupby_custom_column=None,
xlabel=None,
ylabel=None,
title=None):
"""Computes aggregated timeseries by group and plots the result.
Can be used to plot aggregated timeseries by a time feature, over time,
or by a user-provided column.
Exactly one of: ``groupby_time_feature``, ``groupby_sliding_window_size``,
``groupby_custom_column`` must be provided.
Parameters
----------
aggregation_func : callable, optional, default ``numpy.nanmean``
Function that aggregates an array to a number.
Signature (y: array) -> aggregated value: float.
aggregation_func_name : `str` or None, optional, default "mean"
Name of grouping function, used to report results.
If None, defaults to "aggregation".
groupby_time_feature : `str` or None, optional
If provided, groups by a column generated by
`~greykite.common.features.timeseries_features.build_time_features_df`.
See that function for valid values.
groupby_sliding_window_size : `int` or None, optional
If provided, sequentially partitions data into groups of size
``groupby_sliding_window_size``.
groupby_custom_column : `pandas.Series` or None, optional
If provided, groups by this column value. Should be same length as the DataFrame.
xlabel : `str`, optional, default None
X-axis label of the plot.
ylabel : `str`, optional, default None
Y-axis label of the plot.
title : `str` or None, optional
Plot title. If None, default is based on axis labels.
Returns
-------
fig : `plotly.graph_objects.Figure`
plotly graph object showing aggregated timeseries by group.
x-axis label depends on the grouping method:
``groupby_time_feature`` for ``groupby_time_feature``
``TIME_COL`` for ``groupby_sliding_window_size``
``groupby_custom_column.name`` for ``groupby_custom_column``.
"""
grouped_df = self.get_grouping_evaluation(
aggregation_func=aggregation_func,
aggregation_func_name=aggregation_func_name,
groupby_time_feature=groupby_time_feature,
groupby_sliding_window_size=groupby_sliding_window_size,
groupby_custom_column=groupby_custom_column)
xcol, ycol = grouped_df.columns
fig = plot_univariate(
df=grouped_df,
x_col=xcol,
y_col=ycol,
xlabel=xlabel,
ylabel=ylabel,
title=title)
return fig
def get_quantiles_and_overlays(
self,
groupby_time_feature=None,
groupby_sliding_window_size=None,
groupby_custom_column=None,
show_mean=False,
show_quantiles=False,
show_overlays=False,
overlay_label_time_feature=None,
overlay_label_sliding_window_size=None,
overlay_label_custom_column=None,
center_values=False,
value_col=VALUE_COL,
mean_col_name="mean",
quantile_col_prefix="Q",
**overlay_pivot_table_kwargs):
"""Computes mean, quantiles, and overlays by the requested grouping dimension.
Overlays are best explained in the plotting context. The grouping dimension goes on
the x-axis, and one line is shown for each level of the overlay dimension. This
function returns a column for each line to plot (e.g. mean, each quantile,
each overlay value).
Exactly one of: ``groupby_time_feature``, ``groupby_sliding_window_size``,
``groupby_custom_column`` must be provided as the grouping dimension.
If ``show_overlays`` is True, exactly one of: ``overlay_label_time_feature``,
``overlay_label_sliding_window_size``, ``overlay_label_custom_column`` can be
provided to specify the ``label_col`` (overlay dimension). Internally, the
function calls `pandas.DataFrame.pivot_table` with ``index=groupby_col``,
``columns=label_col``, ``values=value_col`` to get the overlay values for plotting.
You can pass additional parameters to `pandas.DataFrame.pivot_table` via
``overlay_pivot_table_kwargs``, e.g. to change the aggregation method. If an explicit
label is not provided, the records are labeled by their position within the group.
For example, to show yearly seasonality mean, quantiles, and overlay plots for
each individual year, use::
self.get_quantiles_and_overlays(
groupby_time_feature="doy", # Rows: a row for each day of year (1, 2, ..., 366)
show_mean=True, # mean value on that day
show_quantiles=[0.1, 0.9], # quantiles of the observed distribution on that day
show_overlays=True, # Include overlays defined by ``overlay_label_time_feature``
overlay_label_time_feature="year") # One column for each observed "year" (2016, 2017, 2018, ...)
To show weekly seasonality over time, use::
self.get_quantiles_and_overlays(
groupby_time_feature="dow", # Rows: a row for each day of week (1, 2, ..., 7)
show_mean=True, # mean value on that day
show_quantiles=[0.1, 0.5, 0.9], # quantiles of the observed distribution on that day
show_overlays=True, # Include overlays defined by ``overlay_label_time_feature``
overlay_label_sliding_window_size=90, # One column for each 90 period sliding window in the dataset,
aggfunc="median") # overlay value is the median value for the dow over the period (default="mean").
It may be difficult to assess the weekly seasonality from the previous result,
because overlays shift up/down over time due to trend/yearly seasonality.
Use ``center_values=True`` to adjust each overlay so its average value is centered at 0.
Mean and quantiles are shifted by a single constant to center the mean at 0, while
preserving their relative values::
self.get_quantiles_and_overlays(
groupby_time_feature="dow",
show_mean=True,
show_quantiles=[0.1, 0.5, 0.9],
show_overlays=True,
overlay_label_sliding_window_size=90,
aggfunc="median",
center_values=True) # Centers the output
Centering reduces the variability in the overlays to make it easier to isolate
the effect by the groupby column. As a result, centered overlays have smaller
variability than that reported by the quantiles, which operate on the original,
uncentered data points. Similarly, if overlays are aggregates of individual values
(i.e. ``aggfunc`` is needed in the call to `pandas.DataFrame.pivot_table`),
the quantiles of overlays will be less extreme than those of the original data.
- To assess variability conditioned on the groupby value, check the quantiles.
- To assess variability conditioned on both the groupby and overlay value,
after any necessary aggregation, check the variability of the overlay values.
Compute quantiles of overlays from the return value if desired.
Parameters
----------
groupby_time_feature : `str` or None, default None
If provided, groups by a column generated by
`~greykite.common.features.timeseries_features.build_time_features_df`.
See that function for valid values.
groupby_sliding_window_size : `int` or None, default None
If provided, sequentially partitions data into groups of size
``groupby_sliding_window_size``.
groupby_custom_column : `pandas.Series` or None, default None
If provided, groups by this column value. Should be same length as the DataFrame.
show_mean : `bool`, default False
Whether to return the mean value by the groupby column.
show_quantiles : `bool` or `list` [`float`] or `numpy.array`, default False
Whether to return the quantiles of the value by the groupby column.
If False, does not return quantiles. If True, returns default
quantiles (0.1 and 0.9). If array-like, a list of quantiles
to compute (e.g. (0.1, 0.25, 0.75, 0.9)).
show_overlays : `bool` or `int` or array-like [`int` or `str`] or None, default False
Whether to return overlays of the value by the groupby column.
If False, no overlays are shown.
If True and ``label_col`` is defined, calls `pandas.DataFrame.pivot_table` with
``index=groupby_col``, ``columns=label_col``, ``values=value_col``.
``label_col`` is defined by one of ``overlay_label_time_feature``,
``overlay_label_sliding_window_size``, or ``overlay_label_custom_column``.
Returns one column for each value of the ``label_col``.
If True and the ``label_col`` is not defined, returns the raw values within
each group. Values across groups are put into columns by their position in
the group (1st element in group, 2nd, 3rd, etc.). Positional order in a group
is not guaranteed to correspond to anything meaningful, so the items within a
column may not have anything in common. It is better to specify one of ``overlay_*``
to explicitly define the overlay labels.
If an integer, the number of overlays to randomly sample. The same as True,
then randomly samples up to `int` columns. This is useful if there are too many values.
If a list [int], a list of column indices (int type). The same as True,
then selects the specified columns by index.
If a list [str], a list of column names. Column names are matched by their
string representation to the names in this list. The same as True,
then selects the specified columns by name.
overlay_label_time_feature : `str` or None, default None
If ``show_overlays`` is True, can be used to define ``label_col``,
i.e. which dimension to show separately as overlays.
If provided, uses a column generated by
`~greykite.common.features.timeseries_features.build_time_features_df`.
See that function for valid values.
overlay_label_sliding_window_size : `int` or None, default None
If ``show_overlays`` is True, can be used to define ``label_col``,
i.e. which dimension to show separately as overlays.
If provided, uses a column that sequentially partitions data into groups
of size ``groupby_sliding_window_size``.
overlay_label_custom_column : `pandas.Series` or None, default None
If ``show_overlays`` is True, can be used to define ``label_col``,
i.e. which dimension to show separately as overlays.
If provided, uses this column value. Should be same length as the DataFrame.
value_col : `str`, default VALUE_COL
The column name for the value column. By default,
shows the univariate time series value, but it can be any
other column in ``self.df``.
mean_col_name : `str`, default "mean"
The name to use for the mean column in the output.
Applies if ``show_mean=True``.
quantile_col_prefix : `str`, default "Q"
The prefix to use for quantile column names in the output.
Columns are named with this prefix followed by the quantile,
rounded to 2 decimal places.
center_values : `bool`, default False
Whether to center the return values.
If True, shifts each overlay so its average value is centered at 0.
Shifts mean and quantiles by a constant to center the mean at 0, while
preserving their relative values.
If False, values are not centered.
overlay_pivot_table_kwargs : additional parameters
Additional keyword parameters to pass to `pandas.DataFrame.pivot_table`,
used in generating the overlays. See above description for details.
Returns
-------
grouped_df : `pandas.DataFrame`
Dataframe with mean, quantiles, and overlays by the grouping column. Overlays
are defined by the grouping column and overlay dimension.
ColumnIndex is a multiindex with first level as the "category", a subset of
[MEAN_COL_GROUP, QUANTILE_COL_GROUP, OVERLAY_COL_GROUP] depending on what is requests.
- grouped_df[MEAN_COL_GROUP] = df with single column, named ``mean_col_name``.
- grouped_df[QUANTILE_COL_GROUP] = df with a column for each quantile, named
f"{quantile_col_prefix}{round(str(q))}", where ``q`` is the quantile.
- grouped_df[OVERLAY_COL_GROUP] = df with one column per overlay value, named
by the overlay value.
For example, it might look like::
category mean quantile overlay
name mean Q0.1 Q0.9 2007 2008 2009
doy
1 8.42 7.72 9.08 8.29 7.75 8.33
2 8.82 8.20 9.56 8.43 8.80 8.53
3 8.95 8.25 9.88 8.26 9.12 8.70
4 9.07 8.60 9.49 8.10 9.99 8.73
5 8.73 8.29 9.24 7.95 9.26 8.37
... ... ... ... ... ... ...
"""
# Default quantiles to show if `show_quantiles` is boolean
if isinstance(show_quantiles, bool):
if show_quantiles:
show_quantiles = [0.1, 0.9]
else:
show_quantiles = None
# Adds grouping dimension
result = add_groupby_column(
df=self.df,
time_col=TIME_COL, # Already standardized
groupby_time_feature=groupby_time_feature,
groupby_sliding_window_size=groupby_sliding_window_size,
groupby_custom_column=groupby_custom_column)
df = result["df"]
groupby_col = result["groupby_col"]
grouped_df = None
# Whether an overlay label is provided
add_overlay_label = (overlay_label_time_feature is not None) or \
(overlay_label_sliding_window_size is not None) or \
(overlay_label_custom_column is not None)
overlay_df = None
# Defines an aggregation function to compute mean, quantiles, and overlays
agg_kwargs = {}
if show_mean:
agg_kwargs.update({mean_col_name: pd.NamedAgg(column=value_col, aggfunc=np.nanmean)})
if show_quantiles is not None:
# Returns the quantiles of the group's `value_col` as a list
agg_kwargs.update({quantile_col_prefix: pd.NamedAgg(
column=value_col,
aggfunc=lambda grp_values: partial(np.nanquantile, q=show_quantiles)(grp_values).tolist())})
if show_overlays is not False:
if add_overlay_label:
# Uses DataFrame pivot_table to get overlay labels as columns, `groupby_col` as index
label_result = add_groupby_column(
df=df,
time_col=TIME_COL,
groupby_time_feature=overlay_label_time_feature,
groupby_sliding_window_size=overlay_label_sliding_window_size,
groupby_custom_column=overlay_label_custom_column)
label_col = label_result["groupby_col"]
overlay_df = label_result["df"].pivot_table(
index=groupby_col,
columns=label_col,
values=value_col,
**overlay_pivot_table_kwargs)
else:
# Uses aggregation to get overlays.
# Takes original values within each group.
# Values across groups are put into columns by their position
# within the group (1st element in group, 2nd, 3rd, etc.)
agg_kwargs.update({"overlay": pd.NamedAgg(column=value_col, aggfunc=tuple)})
# Names the quantile columns
# Keeps to 2 decimal places to handle numerical imprecision.
list_names_dict = {quantile_col_prefix: [
f"{quantile_col_prefix}{str(round(x, 2))}" for x in show_quantiles]}\
if show_quantiles is not None else {}
if agg_kwargs:
grouped_df = flexible_grouping_evaluation(
result["df"],
map_func_dict=None,
groupby_col=result["groupby_col"],
agg_kwargs=agg_kwargs,
extend_col_names=False,
unpack_list=True,
list_names_dict=list_names_dict)
# Adds overlays if requested and not already computed during aggregation
if overlay_df is not None:
overlay_df.columns = map(str, overlay_df.columns)
# Either overlay_df or grouped_df is populated
if grouped_df is None and overlay_df is None:
raise ValueError("Must enable at least one of: show_mean, show_quantiles, show_overlays.")
grouped_df = pd.concat([grouped_df, overlay_df], axis=1)
# Creates MultiIndex for column names to categorize the column names by their type
mean_cols = [mean_col_name] if show_mean else []
quantile_cols = list_names_dict.get(quantile_col_prefix, [])
overlay_cols = [col for col in list(grouped_df.columns) if col not in mean_cols + quantile_cols]
if isinstance(show_overlays, int) and not isinstance(show_overlays, bool):
# Samples from `overlay_cols`
which_overlays = sorted(np.random.choice(
range(len(overlay_cols)),
size=min(show_overlays, len(overlay_cols)),
replace=False))
overlay_cols = list(np.array(overlay_cols)[which_overlays])
elif isinstance(show_overlays, (list, tuple, np.ndarray)):
# Selects from `overlay_cols`
all_integers = np.issubdtype(np.array(show_overlays).dtype, np.integer)
if all_integers:
overlay_cols = [col for i, col in enumerate(overlay_cols) if i in show_overlays]
else:
overlay_cols = [col for col in overlay_cols if str(col) in show_overlays]
cols = mean_cols + quantile_cols + overlay_cols # Reorders columns by group
grouped_df = grouped_df[cols]
categories = list(np.repeat(
[MEAN_COL_GROUP, QUANTILE_COL_GROUP, OVERLAY_COL_GROUP], # Labels columns by category
[len(mean_cols), len(quantile_cols), len(overlay_cols)]))
cateory_col_index = pd.MultiIndex.from_arrays([categories, cols], names=["category", "name"])
grouped_df.columns = cateory_col_index
if center_values:
# Each overlay is independently shifted to have mean 0.
if OVERLAY_COL_GROUP in grouped_df:
grouped_df[OVERLAY_COL_GROUP] -= grouped_df[OVERLAY_COL_GROUP].mean()
# Mean and quantiles are shifted by the same constant, so the mean column is centered at 0.
if MEAN_COL_GROUP in grouped_df:
mean_shift = grouped_df[MEAN_COL_GROUP].mean()[0]
grouped_df[MEAN_COL_GROUP] -= mean_shift
else:
mean_shift = self.df[value_col].mean()
if QUANTILE_COL_GROUP in grouped_df:
grouped_df[QUANTILE_COL_GROUP] -= mean_shift
return grouped_df
def plot_quantiles_and_overlays(
self,
groupby_time_feature=None,
groupby_sliding_window_size=None,
groupby_custom_column=None,
show_mean=False,
show_quantiles=False,
show_overlays=False,
overlay_label_time_feature=None,
overlay_label_sliding_window_size=None,
overlay_label_custom_column=None,
center_values=False,
value_col=VALUE_COL,
mean_col_name="mean",
quantile_col_prefix="Q",
mean_style=None,
quantile_style=None,
overlay_style=None,
xlabel=None,
ylabel=None,
title=None,
showlegend=True,
**overlay_pivot_table_kwargs):
"""Plots mean, quantiles, and overlays by the requested grouping dimension.
The grouping dimension goes on the x-axis, and one line is shown for the mean,
each quantile, and each level of the overlay dimension, as requested. By default,
shading is applied between the quantiles.
Exactly one of: ``groupby_time_feature``, ``groupby_sliding_window_size``,
``groupby_custom_column`` must be provided as the grouping dimension.
If ``show_overlays`` is True, exactly one of: ``overlay_label_time_feature``,
``overlay_label_sliding_window_size``, ``overlay_label_custom_column`` can be
provided to specify the ``label_col`` (overlay dimension). Internally, the
function calls `pandas.DataFrame.pivot_table` with ``index=groupby_col``,
``columns=label_col``, ``values=value_col`` to get the overlay values for plotting.
You can pass additional parameters to `pandas.DataFrame.pivot_table` via
``overlay_pivot_table_kwargs``, e.g. to change the aggregation method. If an explicit
label is not provided, the records are labeled by their position within the group.
For example, to show yearly seasonality mean, quantiles, and overlay plots for
each individual year, use::
self.plot_quantiles_and_overlays(
groupby_time_feature="doy", # Rows: a row for each day of year (1, 2, ..., 366)
show_mean=True, # mean value on that day
show_quantiles=[0.1, 0.9], # quantiles of the observed distribution on that day
show_overlays=True, # Include overlays defined by ``overlay_label_time_feature``
overlay_label_time_feature="year") # One column for each observed "year" (2016, 2017, 2018, ...)
To show weekly seasonality over time, use::
self.plot_quantiles_and_overlays(
groupby_time_feature="dow", # Rows: a row for each day of week (1, 2, ..., 7)
show_mean=True, # mean value on that day
show_quantiles=[0.1, 0.5, 0.9], # quantiles of the observed distribution on that day
show_overlays=True, # Include overlays defined by ``overlay_label_time_feature``
overlay_label_sliding_window_size=90, # One column for each 90 period sliding window in the dataset,
aggfunc="median") # overlay value is the median value for the dow over the period (default="mean").
It may be difficult to assess the weekly seasonality from the previous result,
because overlays shift up/down over time due to trend/yearly seasonality.
Use ``center_values=True`` to adjust each overlay so its average value is centered at 0.
Mean and quantiles are shifted by a single constant to center the mean at 0, while
preserving their relative values::
self.plot_quantiles_and_overlays(
groupby_time_feature="dow",
show_mean=True,
show_quantiles=[0.1, 0.5, 0.9],
show_overlays=True,
overlay_label_sliding_window_size=90,
aggfunc="median",
center_values=True) # Centers the output
Centering reduces the variability in the overlays to make it easier to isolate
the effect by the groupby column. As a result, centered overlays have smaller
variability than that reported by the quantiles, which operate on the original,
uncentered data points. Similarly, if overlays are aggregates of individual values
(i.e. ``aggfunc`` is needed in the call to `pandas.DataFrame.pivot_table`),
the quantiles of overlays will be less extreme than those of the original data.
- To assess variability conditioned on the groupby value, check the quantiles.
- To assess variability conditioned on both the groupby and overlay value,
after any necessary aggregation, check the variability of the overlay values.
Compute quantiles of overlays from the return value if desired.
Parameters
----------
groupby_time_feature : `str` or None, default None
If provided, groups by a column generated by
`~greykite.common.features.timeseries_features.build_time_features_df`.
See that function for valid values.
groupby_sliding_window_size : `int` or None, default None
If provided, sequentially partitions data into groups of size
``groupby_sliding_window_size``.
groupby_custom_column : `pandas.Series` or None, default None
If provided, groups by this column value. Should be same length as the DataFrame.
show_mean : `bool`, default False
Whether to return the mean value by the groupby column.
show_quantiles : `bool` or `list` [`float`] or `numpy.array`, default False
Whether to return the quantiles of the value by the groupby column.
If False, does not return quantiles. If True, returns default
quantiles (0.1 and 0.9). If array-like, a list of quantiles
to compute (e.g. (0.1, 0.25, 0.75, 0.9)).
show_overlays : `bool` or `int` or array-like [`int` or `str`], default False
Whether to return overlays of the value by the groupby column.
If False, no overlays are shown.
If True and ``label_col`` is defined, calls `pandas.DataFrame.pivot_table` with
``index=groupby_col``, ``columns=label_col``, ``values=value_col``.
``label_col`` is defined by one of ``overlay_label_time_feature``,
``overlay_label_sliding_window_size``, or ``overlay_label_custom_column``.
Returns one column for each value of the ``label_col``.
If True and the ``label_col`` is not defined, returns the raw values within
each group. Values across groups are put into columns by their position in
the group (1st element in group, 2nd, 3rd, etc.). Positional order in a group
is not guaranteed to correspond to anything meaningful, so the items within a
column may not have anything in common. It is better to specify one of ``overlay_*``
to explicitly define the overlay labels.
If an integer, the number of overlays to randomly sample. The same as True,
then randomly samples up to `int` columns. This is useful if there are too many values.
If a list [int], a list of column indices (int type). The same as True,
then selects the specified columns by index.
If a list [str], a list of column names. Column names are matched by their
string representation to the names in this list. The same as True,
then selects the specified columns by name.
overlay_label_time_feature : `str` or None, default None
If ``show_overlays`` is True, can be used to define ``label_col``,
i.e. which dimension to show separately as overlays.
If provided, uses a column generated by
`~greykite.common.features.timeseries_features.build_time_features_df`.
See that function for valid values.
overlay_label_sliding_window_size : `int` or None, default None
If ``show_overlays`` is True, can be used to define ``label_col``,
i.e. which dimension to show separately as overlays.
If provided, uses a column that sequentially partitions data into groups
of size ``groupby_sliding_window_size``.
overlay_label_custom_column : `pandas.Series` or None, default None
If ``show_overlays`` is True, can be used to define ``label_col``,
i.e. which dimension to show separately as overlays.
If provided, uses this column value. Should be same length as the DataFrame.
value_col : `str`, default VALUE_COL
The column name for the value column. By default,
shows the univariate time series value, but it can be any
other column in ``self.df``.
mean_col_name : `str`, default "mean"
The name to use for the mean column in the output.
Applies if ``show_mean=True``.
quantile_col_prefix : `str`, default "Q"
The prefix to use for quantile column names in the output.
Columns are named with this prefix followed by the quantile,
rounded to 2 decimal places.
center_values : `bool`, default False
Whether to center the return values.
If True, shifts each overlay so its average value is centered at 0.
Shifts mean and quantiles by a constant to center the mean at 0, while
preserving their relative values.
If False, values are not centered.
mean_style: `dict` or None, default None
How to style the mean line, passed as keyword arguments to
`plotly.graph_objects.Scatter`. If None, the default is::
mean_style = {
"line": dict(
width=2,
color="#595959"), # gray
"legendgroup": MEAN_COL_GROUP}
quantile_style: `dict` or None, default None
How to style the quantile lines, passed as keyword arguments to
`plotly.graph_objects.Scatter`. If None, the default is::
quantile_style = {
"line": dict(
width=2,
color="#1F9AFF", # blue
dash="solid"),
"legendgroup": QUANTILE_COL_GROUP, # show/hide them together
"fill": "tonexty"}
Note that fill style is removed from to the first quantile line, to
fill only between items in the same category.
overlay_style: `dict` or None, default None
How to style the overlay lines, passed as keyword arguments to
`plotly.graph_objects.Scatter`. If None, the default is::
overlay_style = {
"opacity": 0.5, # makes it easier to see density
"line": dict(
width=1,
color="#B3B3B3", # light gray
dash="solid"),
"legendgroup": OVERLAY_COL_GROUP}
xlabel : `str`, optional, default None
X-axis label of the plot.
ylabel : `str`, optional, default None
Y-axis label of the plot. If None, uses ``value_col``.
title : `str` or None, default None
Plot title. If None, default is based on axis labels.
showlegend : `bool`, default True
Whether to show the legend.
overlay_pivot_table_kwargs : additional parameters
Additional keyword parameters to pass to `pandas.DataFrame.pivot_table`,
used in generating the overlays.
See `~greykite.framework.input.univariate_time_series.UnivariateTimeSeries.get_quantiles_and_overlays`
description for details.
Returns
-------
fig : `plotly.graph_objects.Figure`
plotly graph object showing the mean, quantiles, and overlays.
See Also
--------
`~greykite.framework.input.univariate_time_series.UnivariateTimeSeries.get_quantiles_and_overlays`
To get the mean, quantiles, and overlays as a `pandas.DataFrame` without plotting.
"""
if ylabel is None:
ylabel = value_col
grouped_df = self.get_quantiles_and_overlays(
groupby_time_feature=groupby_time_feature,
groupby_sliding_window_size=groupby_sliding_window_size,
groupby_custom_column=groupby_custom_column,
show_mean=show_mean,
show_quantiles=show_quantiles,
show_overlays=show_overlays,
overlay_label_time_feature=overlay_label_time_feature,
overlay_label_sliding_window_size=overlay_label_sliding_window_size,
overlay_label_custom_column=overlay_label_custom_column,
center_values=center_values,
value_col=value_col,
mean_col_name=mean_col_name,
quantile_col_prefix=quantile_col_prefix,
**overlay_pivot_table_kwargs)
if mean_style is None:
mean_style = {
"line": dict(
width=2,
color="#595959"), # gray
"legendgroup": MEAN_COL_GROUP}
if quantile_style is None:
quantile_style = {
"line": dict(
width=2,
color="#1F9AFF", # blue
dash="solid"),
"legendgroup": QUANTILE_COL_GROUP, # show/hide them together
"fill": "tonexty"}
if overlay_style is None:
overlay_style = {
"opacity": 0.5, # makes it easier to see density
"line": dict(
width=1,
color="#B3B3B3", # light gray
dash="solid"),
"legendgroup": OVERLAY_COL_GROUP}
style_dict = {
MEAN_COL_GROUP: mean_style,
QUANTILE_COL_GROUP: quantile_style,
OVERLAY_COL_GROUP: overlay_style}
y_col_style_dict = {}
# All categories in grouped_df. Reverses the order so the first category is plotted last (on top).
categories = grouped_df.columns.get_level_values(0).unique()[::-1]
for category in categories:
style = style_dict.get(category, {})
if "fill" in style:
# If fill is part of the style, plotly fills the area between this line and
# the previous line added to the plot.
# Since we only want to fill between lines in the same category (e.g. between quantiles),
# we remove the "fill" from the first line within each category. Otherwise the first
# line in this category would fill to the last line in the previous category.
category_style_dict = {grouped_df[category].columns[0]: {k: v for k, v in style.items() if k != "fill"}}
category_style_dict.update({col: style for col in grouped_df[category].columns[1:]})
else:
category_style_dict = {col: style for col in grouped_df[category].columns}
y_col_style_dict.update(category_style_dict)
grouped_df.columns = list(grouped_df.columns.get_level_values(1)) # MultiIndex is not needed for plotting
x_col = grouped_df.index.name
grouped_df.reset_index(inplace=True)
fig = plot_multivariate(
grouped_df,
x_col=x_col,
y_col_style_dict=y_col_style_dict,
xlabel=xlabel,
ylabel=ylabel,
title=title,
showlegend=showlegend)
return fig
The provided code snippet includes necessary dependencies for implementing the `get_exploratory_plots` function. Write a Python function `def get_exploratory_plots( df, time_col, value_col, freq=None, anomaly_info=None, output_path=None)` to solve the following problem:
Computes multiple exploratory data analysis (EDA) plots to visualize the metric in ``value_col``and aid in modeling. The EDA plots are written in an `html` file at ``output_path``. For details on how to interpret these EDA plots, check the tutorials. Parameters ---------- df : `pandas.DataFrame` Input timeseries. A data frame which includes the timestamp column as well as the value column. time_col : `str` The column name in ``df`` representing time for the time series data. The time column can be anything that can be parsed by pandas DatetimeIndex. value_col: `str` The column name which has the value of interest to be forecasted. freq : `str` or None, default None Timeseries frequency, DateOffset alias, If None automatically inferred. anomaly_info : `dict` or `list` [`dict`] or None, default None Anomaly adjustment info. Anomalies in ``df`` are corrected before any plotting is done. output_path : `str` or None, default None Path where the `html` file is written. If None, it is set to "EDA_{value_col}.html". Returns ------- eda.html : `html` file An html file containing the EDA plots is written at ``output_path``.
Here is the function:
def get_exploratory_plots(
df,
time_col,
value_col,
freq=None,
anomaly_info=None,
output_path=None):
"""Computes multiple exploratory data analysis (EDA) plots to visualize the
metric in ``value_col``and aid in modeling. The EDA plots are written in
an `html` file at ``output_path``.
For details on how to interpret these EDA plots, check the tutorials.
Parameters
----------
df : `pandas.DataFrame`
Input timeseries. A data frame which includes the timestamp column
as well as the value column.
time_col : `str`
The column name in ``df`` representing time for the time series data.
The time column can be anything that can be parsed by pandas DatetimeIndex.
value_col: `str`
The column name which has the value of interest to be forecasted.
freq : `str` or None, default None
Timeseries frequency, DateOffset alias, If None automatically inferred.
anomaly_info : `dict` or `list` [`dict`] or None, default None
Anomaly adjustment info. Anomalies in ``df``
are corrected before any plotting is done.
output_path : `str` or None, default None
Path where the `html` file is written.
If None, it is set to "EDA_{value_col}.html".
Returns
-------
eda.html : `html` file
An html file containing the EDA plots is written at ``output_path``.
"""
# General styles
style = """
<style>
caption {
text-align: left;
margin-top: 5px;
margin-bottom: 0px;
font-size: 120%;
padding: 5px;
font-weight: bold;
}
td, th {
border: 1px solid black;
padding: 3px;
text-align: left;
}
div {
white-space: pre-wrap;
}
</style>
"""
# Generates the HTML
html_header = f"Exploratory plots for {value_col}"
html_str = f"<!DOCTYPE html><html><h1>{html_header}</h1><body>"
html_str += style
html_str += "<h3>Please see the " \
"<a href='https://linkedin.github.io/greykite/docs/0.4.0/html/gallery/tutorials/0100_forecast_tutorial.html#sphx-glr-gallery-tutorials-0100-forecast-tutorial-py'>tutorials</a> to learn how to interpret these plots.</h3>" # noqa: E501
html_str += "<h4>Most of these plots have multiple overlays and traces. Feel free to toggle these on and off by " \
"clicking the legends to the right of the plots.</h4>"
ts = UnivariateTimeSeries()
ts.load_data(
df=df,
time_col=time_col,
value_col=value_col,
freq=freq,
anomaly_info=anomaly_info
)
period = min_gap_in_seconds(df=ts.df, time_col=TIME_COL)
simple_freq = get_simple_time_frequency_from_period(period)
valid_seas = simple_freq.value.valid_seas
# Metric plot
html_str += "We first plot the raw timeseries. If 'anomaly_info' is provided, the anomalous data " \
"is removed before plotting. Be careful of anomalies and missing values, as these can lead " \
"to sharp drop(s) in the plots."
fig = ts.plot(title="Raw metric value")
html_str += fig.to_html(full_html=False, include_plotlyjs=True)
# Changepoints
html_str += "<h2>Changepoints</h2>"
html_str += "Let's look at few changepoint plots to identify significant systemic changes in the metric value. Please see " \
"<a href='https://linkedin.github.io/greykite/docs/0.4.0/html/gallery/quickstart/0200_changepoint_detection.html#sphx-glr-gallery-quickstart-0200-changepoint-detection-py'>Changepoint tutorial</a> to learn more." # noqa: E501
model = ChangepointDetector()
res = model.find_trend_changepoints(
df=ts.df,
time_col=TIME_COL,
value_col=VALUE_COL
)
fig = model.plot(plot=False)
fig.update_layout(title_text="changepoints - default configuration", title_x=0.5)
html_str += fig.to_html(full_html=False, include_plotlyjs=True)
# Custom changepoints with less regularization
res = model.find_trend_changepoints(
df=ts.df,
time_col=TIME_COL,
value_col=VALUE_COL,
yearly_seasonality_order=15,
regularization_strength=0.4,
resample_freq="7D",
potential_changepoint_n=25,
no_changepoint_distance_from_end="60D"
)
fig = model.plot(plot=False)
fig.update_layout(title_text="changepoints - less regularization", title_x=0.5)
html_str += fig.to_html(full_html=False, include_plotlyjs=True)
# Holidays
html_str += "<h2>Holiday Effects</h2>"
html_str += "Now let's look at how different holidays affect the metric value."
model = HolidayInferrer()
res = model.infer_holidays(
df=ts.df,
countries=("US",),
pre_search_days=2,
post_search_days=2,
baseline_offsets=(-7, 7),
plot=True
)
fig = res["fig"]
html_str += fig.to_html(full_html=False, include_plotlyjs=True)
# Trend
html_str += "<h2>Trend</h2>"
html_str += "If trend exists, expect to see a gentle upward / downward slope in the plot. "
fig = ts.plot_quantiles_and_overlays(
groupby_time_feature="year_woy",
show_mean=True,
show_quantiles=False,
show_overlays=False,
overlay_label_time_feature="year",
overlay_style={"line": {"width": 1}, "opacity": 0.5},
center_values=False,
ylabel=ts.original_value_col
)
html_str += fig.to_html(full_html=False, include_plotlyjs=True)
# Seasonalities
if valid_seas:
html_str += "<h2>Seasonalities</h2>"
html_str += "To assess seasonal patterns we"
html_str += "<ol>" \
"<li> Start with the longest seasonal cycles to see the big picture, then proceed to shorter " \
"cycles e.g. yearly -> quarterly -> monthly -> weekly -> daily." \
"<li> First check for seasonal effect over the entire timeseries (main effect)." \
"<li> If the quantiles are large in the overlay plots we check for interaction effects." \
"</ol>"
html_str += "Seasonality overlay plots are centered to remove the effect of trend and longer seasonal cycles. " \
"This helps in isolating the effect against the selected groupby feature. Please see " \
"<a href='https://linkedin.github.io/greykite/docs/0.4.0/html/gallery/quickstart/0300_seasonality.html#sphx-glr-gallery-quickstart-0300-seasonality-py'>Seasonality tutorial</a> to learn more." # noqa: E501
html_str += "<h4>Note that partial (incomplete) seasonal periods can throw off the mean and should be ignored.</h4>"
# Yearly Seasonality
if SeasonalityEnum.YEARLY_SEASONALITY.name in valid_seas:
html_str += "<h3>Yearly Seasonality (main effect)</h3>"
html_str += "To check for overall yearly seasonality, we group by day of year (`doy`). Different years are overlaid " \
"to provide a sense of consistency in the seasonal pattern between years."
fig = ts.plot_quantiles_and_overlays(
groupby_time_feature="doy",
show_mean=True,
show_quantiles=True,
show_overlays=True,
overlay_label_time_feature="year",
overlay_style={"line": {"width": 1}, "opacity": 0.5},
center_values=True,
xlabel="day of year",
ylabel=ts.original_value_col,
title="yearly seasonality for each year (centered)",
)
html_str += fig.to_html(full_html=False, include_plotlyjs=True)
html_str += "<h3>Yearly Seasonality (interaction effect)</h3>"
fig = ts.plot_quantiles_and_overlays(
groupby_time_feature="month_dom",
show_mean=True,
show_quantiles=True,
show_overlays=True,
overlay_label_time_feature="year",
overlay_style={"line": {"width": 1}, "opacity": 0.5},
center_values=True,
xlabel="month_day of month",
ylabel=ts.original_value_col,
title="yearly and monthly seasonality for each year",
)
html_str += fig.to_html(full_html=False, include_plotlyjs=True)
fig = ts.plot_quantiles_and_overlays(
groupby_time_feature="woy_dow", # week of year and day of week
show_mean=True,
show_quantiles=True,
show_overlays=True,
overlay_label_time_feature="year",
overlay_style={"line": {"width": 1}, "opacity": 0.5},
center_values=True,
xlabel="week of year_day of week",
ylabel=ts.original_value_col,
title="yearly and weekly seasonality for each year",
)
html_str += fig.to_html(full_html=False, include_plotlyjs=True)
# Quarterly Seasonality
if SeasonalityEnum.QUARTERLY_SEASONALITY.name in valid_seas:
html_str += "<h3>Quarterly Seasonality (main effect)</h3>"
html_str += "To check for overall quarterly seasonality, we group by day of quarter (`doq`). " \
"Quarterly pattern for up to 20 randomly selected quarters are shown. In the legend, each overlay " \
"is labeled by the first date in the sliding window."
fig = ts.plot_quantiles_and_overlays(
groupby_time_feature="doq",
show_mean=True,
show_quantiles=True,
show_overlays=20, # randomly selects up to 20 overlays
overlay_label_time_feature="quarter_start",
center_values=True,
xlabel="day of quarter",
ylabel=ts.original_value_col,
title="quarterly seasonality with overlays"
)
html_str += fig.to_html(full_html=False, include_plotlyjs=True)
html_str += "<h3>Quarterly Seasonality (interaction effect)</h3>"
fig = ts.plot_quantiles_and_overlays(
groupby_time_feature="doq",
show_mean=True,
show_quantiles=False,
show_overlays=True,
center_values=True,
overlay_label_time_feature="year",
overlay_style={"line": {"width": 1}, "opacity": 0.5},
xlabel="day of quarter",
ylabel=ts.original_value_col,
title="quarterly seasonality by year"
)
html_str += fig.to_html(full_html=False, include_plotlyjs=True)
# Monthly Seasonality
if SeasonalityEnum.MONTHLY_SEASONALITY.name in valid_seas:
html_str += "<h3>Monthly Seasonality (main effect)</h3>"
html_str += "To check overall monthly seasonality, we group by day of month (`dom`). " \
"Monthly pattern for up to 20 randomly selected months are shown. In the legend, each overlay " \
"is labeled by the first date in the sliding window."
fig = ts.plot_quantiles_and_overlays(
groupby_time_feature="dom",
show_mean=True,
show_quantiles=True,
show_overlays=20,
overlay_label_time_feature="year_month",
center_values=True,
xlabel="day of month",
ylabel=ts.original_value_col,
title="monthly seasonality with overlays"
)
html_str += fig.to_html(full_html=False, include_plotlyjs=True)
html_str += "<h3>Monthly Seasonality (interaction effect)</h3>"
fig = ts.plot_quantiles_and_overlays(
groupby_time_feature="dom",
show_mean=True,
show_quantiles=False,
show_overlays=True,
center_values=True,
overlay_label_time_feature="year",
overlay_style={"line": {"width": 1}, "opacity": 0.5},
xlabel="day of month",
ylabel=ts.original_value_col,
title="monthly seasonality by year"
)
html_str += fig.to_html(full_html=False, include_plotlyjs=True)
# Weekly Seasonality
if SeasonalityEnum.WEEKLY_SEASONALITY.name in valid_seas:
html_str += "<h3>Weekly Seasonality (main effect)</h3>"
html_str += "To check overall weekly seasonality, we group by day of week (`str_dow`). " \
"Weekly pattern for up to 20 randomly selected weeks are shown. In the legend, each overlay " \
"is labeled by the first date in the sliding window."
fig = ts.plot_quantiles_and_overlays(
groupby_time_feature="str_dow",
show_mean=True,
show_quantiles=True,
show_overlays=20, # randomly selects up to 20 overlays
overlay_label_time_feature="year_woy",
center_values=True,
xlabel="day of week",
ylabel=ts.original_value_col,
title="weekly seasonality with overlays"
)
html_str += fig.to_html(full_html=False, include_plotlyjs=True)
html_str += "<h3>Weekly Seasonality (interaction effect)</h3>"
fig = ts.plot_quantiles_and_overlays(
groupby_time_feature="str_dow",
show_mean=True,
show_quantiles=False,
show_overlays=True,
center_values=True,
overlay_label_time_feature="year",
overlay_style={"line": {"width": 1}, "opacity": 0.5},
xlabel="day of week",
ylabel=ts.original_value_col,
title="weekly seasonality by year",
)
html_str += fig.to_html(full_html=False, include_plotlyjs=True)
fig = ts.plot_quantiles_and_overlays(
groupby_time_feature="str_dow",
show_mean=True,
show_quantiles=False,
show_overlays=True,
center_values=True,
overlay_label_time_feature="month",
overlay_style={"line": {"width": 1}, "opacity": 0.5},
xlabel="day of week",
ylabel=ts.original_value_col,
title="weekly seasonality by month",
)
html_str += fig.to_html(full_html=False, include_plotlyjs=True)
# Daily Seasonality
if SeasonalityEnum.DAILY_SEASONALITY.name in valid_seas:
html_str += "<h3>Daily Seasonality (main effect)</h3>"
html_str += "To check overall daily seasonality, we group by time of day (`tod`). " \
"Daily pattern for up to 20 randomly selected days are shown."
fig = ts.plot_quantiles_and_overlays(
groupby_time_feature="tod",
show_mean=True,
show_quantiles=True,
show_overlays=20,
overlay_label_time_feature="year_woy_dow",
center_values=True,
xlabel="time of day",
ylabel=ts.original_value_col,
title="daily seasonality with overlays"
)
html_str += fig.to_html(full_html=False, include_plotlyjs=True)
html_str += "<h3>Daily Seasonality (interaction effect)</h3>"
fig = ts.plot_quantiles_and_overlays(
groupby_time_feature="tod",
show_mean=True,
show_quantiles=False,
show_overlays=True,
center_values=True,
overlay_label_time_feature="year",
overlay_style={"line": {"width": 1}, "opacity": 0.5},
xlabel="time of day",
ylabel=ts.original_value_col,
title="daily seasonality by year",
)
html_str += fig.to_html(full_html=False, include_plotlyjs=True)
fig = ts.plot_quantiles_and_overlays(
groupby_time_feature="tod",
show_mean=True,
show_quantiles=False,
show_overlays=True,
center_values=True,
overlay_label_time_feature="month",
overlay_style={"line": {"width": 1}, "opacity": 0.5},
xlabel="time of day",
ylabel=ts.original_value_col,
title="daily seasonality by month",
)
html_str += fig.to_html(full_html=False, include_plotlyjs=True)
fig = ts.plot_quantiles_and_overlays(
groupby_time_feature="tod",
show_mean=True,
show_quantiles=False,
show_overlays=True,
center_values=True,
overlay_label_time_feature="week",
overlay_style={"line": {"width": 1}, "opacity": 0.5},
xlabel="time of day",
ylabel=ts.original_value_col,
title="daily seasonality by week",
)
html_str += fig.to_html(full_html=False, include_plotlyjs=True)
# Auto-correlation
html_str += "<h2>Auto-correlation</h2>"
html_str += "Partial auto-correlation plot can be a good guide to choose appropriate auto-regression lag terms. " \
"Use large spikes to model individual lag terms (`lag_dict`). " \
"Smaller but significant spikes can be grouped under `agg_lag_dict`."
ts.df[VALUE_COL].fillna(ts.df[VALUE_COL].median(), inplace=True)
fig, ax = plt.subplots(1, 2, figsize=(20, 10))
plot_pacf(ts.df[VALUE_COL].values, lags=40, ax=ax[0])
plot_acf(ts.df[VALUE_COL].values, lags=40, ax=ax[1])
tmpfile = BytesIO()
fig.savefig(tmpfile, format='png')
encoded = base64.b64encode(tmpfile.getvalue()).decode('utf-8')
html_str += '<img src=\'data:image/png;base64,{}\'>'.format(encoded)
# Writes the HTML
if not output_path:
output_path = f"EDA_{value_col}.html"
with open(output_path, "w+") as f:
f.write(html_str) | Computes multiple exploratory data analysis (EDA) plots to visualize the metric in ``value_col``and aid in modeling. The EDA plots are written in an `html` file at ``output_path``. For details on how to interpret these EDA plots, check the tutorials. Parameters ---------- df : `pandas.DataFrame` Input timeseries. A data frame which includes the timestamp column as well as the value column. time_col : `str` The column name in ``df`` representing time for the time series data. The time column can be anything that can be parsed by pandas DatetimeIndex. value_col: `str` The column name which has the value of interest to be forecasted. freq : `str` or None, default None Timeseries frequency, DateOffset alias, If None automatically inferred. anomaly_info : `dict` or `list` [`dict`] or None, default None Anomaly adjustment info. Anomalies in ``df`` are corrected before any plotting is done. output_path : `str` or None, default None Path where the `html` file is written. If None, it is set to "EDA_{value_col}.html". Returns ------- eda.html : `html` file An html file containing the EDA plots is written at ``output_path``. |
167,510 | import warnings
from functools import partial
import numpy as np
from greykite.common.constants import PREDICTED_COL
The provided code snippet includes necessary dependencies for implementing the `_cached_call` function. Write a Python function `def _cached_call(cache, estimator, method, *args, **kwargs)` to solve the following problem:
Call estimator with method and args and kwargs. This code is private in scikit-learn 0.24, so it is copied here.
Here is the function:
def _cached_call(cache, estimator, method, *args, **kwargs):
"""Call estimator with method and args and kwargs.
This code is private in scikit-learn 0.24, so it is copied here.
"""
if cache is None:
return getattr(estimator, method)(*args, **kwargs)
try:
return cache[method]
except KeyError:
result = getattr(estimator, method)(*args, **kwargs)
cache[method] = result
return result | Call estimator with method and args and kwargs. This code is private in scikit-learn 0.24, so it is copied here. |
167,511 | import datetime
import numpy as np
import pandas as pd
import plotly
import plotly.express as px
from greykite.common.constants import ANOMALY_COL
from greykite.common.constants import TIME_COL
from greykite.common.constants import VALUE_COL
from greykite.common.testing_utils import generate_df_for_tests
from greykite.common.testing_utils_anomalies import contaminate_df_with_anomalies
from greykite.common.viz.timeseries_annotate import plot_lines_markers
from greykite.detection.common.ad_evaluation import f1_score
from greykite.detection.common.ad_evaluation import precision_score
from greykite.detection.common.ad_evaluation import recall_score
from greykite.detection.detector.ad_utils import partial_return
from greykite.detection.detector.config import ADConfig
from greykite.detection.detector.data import DetectorData
from greykite.detection.detector.greykite import GreykiteDetector
from greykite.detection.detector.reward import Reward
from greykite.framework.templates.autogen.forecast_config import EvaluationPeriodParam
from greykite.framework.templates.autogen.forecast_config import ForecastConfig
from greykite.framework.templates.autogen.forecast_config import MetadataParam
from greykite.framework.templates.autogen.forecast_config import ModelComponentsParam
f1 = partial_return(f1_score, True)
def f1_reward(data):
return f1(
y_true=data.y_true,
y_pred=data.y_pred) | null |
167,512 | import datetime
import numpy as np
import pandas as pd
import plotly
import plotly.express as px
from greykite.common.constants import ANOMALY_COL
from greykite.common.constants import TIME_COL
from greykite.common.constants import VALUE_COL
from greykite.common.testing_utils import generate_df_for_tests
from greykite.common.testing_utils_anomalies import contaminate_df_with_anomalies
from greykite.common.viz.timeseries_annotate import plot_lines_markers
from greykite.detection.common.ad_evaluation import f1_score
from greykite.detection.common.ad_evaluation import precision_score
from greykite.detection.common.ad_evaluation import recall_score
from greykite.detection.detector.ad_utils import partial_return
from greykite.detection.detector.config import ADConfig
from greykite.detection.detector.data import DetectorData
from greykite.detection.detector.greykite import GreykiteDetector
from greykite.detection.detector.reward import Reward
from greykite.framework.templates.autogen.forecast_config import EvaluationPeriodParam
from greykite.framework.templates.autogen.forecast_config import ForecastConfig
from greykite.framework.templates.autogen.forecast_config import MetadataParam
from greykite.framework.templates.autogen.forecast_config import ModelComponentsParam
precision = partial_return(precision_score, True)
def precision_func(data):
return precision(
y_true=data.y_true,
y_pred=data.y_pred) | null |
167,513 | import datetime
import numpy as np
import pandas as pd
import plotly
import plotly.express as px
from greykite.common.constants import ANOMALY_COL
from greykite.common.constants import TIME_COL
from greykite.common.constants import VALUE_COL
from greykite.common.testing_utils import generate_df_for_tests
from greykite.common.testing_utils_anomalies import contaminate_df_with_anomalies
from greykite.common.viz.timeseries_annotate import plot_lines_markers
from greykite.detection.common.ad_evaluation import f1_score
from greykite.detection.common.ad_evaluation import precision_score
from greykite.detection.common.ad_evaluation import recall_score
from greykite.detection.detector.ad_utils import partial_return
from greykite.detection.detector.config import ADConfig
from greykite.detection.detector.data import DetectorData
from greykite.detection.detector.greykite import GreykiteDetector
from greykite.detection.detector.reward import Reward
from greykite.framework.templates.autogen.forecast_config import EvaluationPeriodParam
from greykite.framework.templates.autogen.forecast_config import ForecastConfig
from greykite.framework.templates.autogen.forecast_config import MetadataParam
from greykite.framework.templates.autogen.forecast_config import ModelComponentsParam
recall = partial_return(recall_score, True)
def recall_func(data):
return recall(
y_true=data.y_true,
y_pred=data.y_pred) | null |
167,514 | import warnings
from collections import defaultdict
import plotly
import pandas as pd
from greykite.common.constants import TIME_COL
from greykite.common.constants import VALUE_COL
from greykite.framework.benchmark.data_loader_ts import DataLoader
from greykite.framework.input.univariate_time_series import UnivariateTimeSeries
from greykite.framework.templates.autogen.forecast_config import EvaluationPeriodParam
from greykite.framework.templates.autogen.forecast_config import ForecastConfig
from greykite.framework.templates.autogen.forecast_config import MetadataParam
from greykite.framework.templates.autogen.forecast_config import ModelComponentsParam
from greykite.framework.templates.forecaster import Forecaster
from greykite.framework.utils.result_summary import summarize_grid_search_results
print(ts.df.head())
print(ts.describe_time_col())
print(ts.describe_value_col())
def summarize_grid_search_results(
grid_search,
only_changing_params=True,
combine_splits=True,
decimals=None,
score_func=EvaluationMetricEnum.MeanAbsolutePercentError.name,
score_func_greater_is_better=False,
cv_report_metrics=CV_REPORT_METRICS_ALL,
column_order=None):
"""Summarizes CV results for each grid search parameter combination.
While ``grid_search.cv_results_`` could be imported into
a `pandas.DataFrame` without this function, the following conveniences
are provided:
- returns the correct ranks based on each metric's greater_is_better direction.
- summarizes the hyperparameter space, only showing the parameters that change
- combines split scores into a tuple to save table width
- rounds the values to specified decimals
- orders columns by type (test score, train score, metric, etc.)
Parameters
----------
grid_search : `~sklearn.model_selection.RandomizedSearchCV`
Grid search output (fitted RandomizedSearchCV object).
only_changing_params : `bool`, default True
If True, only show parameters with multiple values in
the hyperparameter_grid.
combine_splits : `bool`, default True
Whether to report split scores as a tuple in a single column.
- If True, adds a column for the test splits scores for each
requested metric. Adds a column with train split scores if those
are available.
For example, "split_train_score" would contain the values
(split1_train_score, split2_train_score, split3_train_score)
as as tuple.
- If False, this summary column is not added.
The original split columns are available either way.
decimals : `int` or None, default None
Number of decimal places to round to.
If decimals is negative, it specifies the number of
positions to the left of the decimal point.
If None, does not round.
score_func : `str` or callable, default ``EvaluationMetricEnum.MeanAbsolutePercentError.name``
Score function used to select optimal model in CV.
If a callable, takes arrays ``y_true``, ``y_pred`` and returns a float.
If a string, must be either a
`~greykite.common.evaluation.EvaluationMetricEnum` member name
or `~greykite.common.constants.FRACTION_OUTSIDE_TOLERANCE`.
Used in this function to fix the ``"rank_test_score"`` column if
``score_func_greater_is_better=False``.
Should be the same as what was passed to
:py:meth:`~greykite.framework.templates.forecaster.Forecaster.run_forecast_config`,
or `~greykite.framework.pipeline.pipeline.forecast_pipeline`,
or `~greykite.framework.pipeline.utils.get_hyperparameter_searcher`.
score_func_greater_is_better : `bool`, default False
True if ``score_func`` is a score function, meaning higher is better,
and False if it is a loss function, meaning lower is better.
Must be provided if ``score_func`` is a callable (custom function).
Ignored if ``score_func`` is a string, because the direction is known.
Used in this function to fix the ``"rank_test_score"`` column if
``score_func_greater_is_better=False``.
Should be the same as what was passed to
:py:meth:`~greykite.framework.templates.forecaster.Forecaster.run_forecast_config`,
or `~greykite.framework.pipeline.pipeline.forecast_pipeline`,
or `~greykite.framework.pipeline.utils.get_hyperparameter_searcher`.
cv_report_metrics : `~greykite.framework.constants.CV_REPORT_METRICS_ALL`, or `list` [`str`], or None, default `~greykite.common.constants.CV_REPORT_METRICS_ALL` # noqa: E501
Additional metrics to show in the summary, besides the one specified by ``score_func``.
If a metric is specified but not available, a warning will be given.
Should be the same as what was passed to
:py:meth:`~greykite.framework.templates.forecaster.Forecaster.run_forecast_config`,
or `~greykite.framework.pipeline.pipeline.forecast_pipeline`,
or `~greykite.framework.pipeline.utils.get_hyperparameter_searcher`,
or a subset of computed metric to show.
If a list of strings, valid strings are
`greykite.common.evaluation.EvaluationMetricEnum` member names
and `~greykite.common.constants.FRACTION_OUTSIDE_TOLERANCE`.
column_order : `list` [`str`] or None, default None
How to order the columns.
A list of regex to order column names, in greedy fashion. Column names matching
the first item are placed first. Among remaining items, those matching the second
items are placed next, etc.
Use "*" as the last element to select all available columns, if desired.
If None, uses default ordering::
column_order = ["rank_test", "mean_test", "split_test", "mean_train",
"params", "param", "split_train", "time", ".*"]
Notes
-----
Metrics are named in ``grid_search.cv_results_`` according to the ``scoring``
parameter passed to `~sklearn.model_selection.RandomizedSearchCV`.
``"score"`` is the default used by sklearn for single metric
evaluation.
If a dictionary is provided to ``scoring``, as is the case through
templates, then the metrics are named by its keys, and the
metric used for selection is defined by ``refit``. The keys
are derived from ``score_func`` and ``cv_report_metrics``
in `~greykite.framework.pipeline.utils.get_scoring_and_refit`.
- The key for ``score_func`` if it is a callable is
`~greykite.common.constants.CUSTOM_SCORE_FUNC_NAME`.
- The key for ``EvaluationMetricEnum`` member name is the short name
from ``.get_metric_name()``.
- The key for `~greykite.common.constants.FRACTION_OUTSIDE_TOLERANCE`
is `~greykite.common.constants.FRACTION_OUTSIDE_TOLERANCE_NAME`.
Returns
-------
cv_results : `pandas.DataFrame`
A summary of cross-validation results in tabular format.
Each row corresponds to a set of parameters used in the grid search.
The columns have the following format, where name is the canonical short
name for the metric.
``"rank_test_{name}"`` : `int`
The params ranked by mean_test_score (1 is best).
``"mean_test_{name}"`` : `float`
Average test score.
``"split_test_{name}"`` : `list` [`float`]
Test score on each split. [split 0, split 1, ...]
``"std_test_{name}"`` : `float`
Standard deviation of test scores.
``"mean_train_{name}"`` : `float`
Average train score.
``"split_train_{name}"`` : `list` [`float`]
Train score on each split. [split 0, split 1, ...]
``"std_train_{name}"`` : `float`
Standard deviation of train scores.
``"mean_fit_time"`` : `float`
Average time to fit each CV split (in seconds)
``"std_fit_time"`` : `float`
Std of time to fit each CV split (in seconds)
``"mean_score_time"`` : `float`
Average time to score each CV split (in seconds)
``"std_score_time"`` : `float`
Std of time to score each CV split (in seconds)
``"params"`` : `dict`
The parameters used. If ``only_changing==True``,
only shows the parameters which are not identical
across all CV splits.
``"param_{pipeline__param__name}"`` : Any
The value of pipeline parameter `pipeline__param__name`
for each row.
"""
if column_order is None:
column_order = ["rank_test", "mean_test", "split_test", "mean_train", "params", "param", "split_train", "time", ".*"]
cv_results = grid_search.cv_results_.copy()
# Overwrites the params
selected_params = []
if only_changing_params:
# Removes keys that don't vary
keep_params = set()
seen_params = {}
for params in cv_results['params']:
for k, v in params.items():
if k in seen_params:
try:
assert_equal(v, seen_params[k])
except AssertionError:
# the values are different
keep_params.add(k)
else:
seen_params[k] = v
for params in cv_results['params']:
explore_params = [(k, v) for k, v in params.items() if k in keep_params]
selected_params.append(explore_params)
cv_results['params'] = selected_params
# Overwrites the ranks and computes combined split score columns
# for the requested metrics.
metric_list = [(score_func, score_func_greater_is_better, True)]
if cv_report_metrics == CV_REPORT_METRICS_ALL:
cv_report_metrics = EvaluationMetricEnum.__dict__["_member_names_"].copy()
# Computes `FRACTION_OUTSIDE_TOLERANCE` if `relative_error_tolerance` is specified
cv_report_metrics.append(FRACTION_OUTSIDE_TOLERANCE)
metric_list += [(metric, None, False) for metric in cv_report_metrics]
elif cv_report_metrics is not None:
# greater_is_better is derived from the metric name
metric_list += [(metric, None, True) for metric in cv_report_metrics]
keep_metrics = set()
for metric, greater_is_better, warn_metric in metric_list:
ranks_and_splits = get_ranks_and_splits(
grid_search=grid_search,
score_func=metric,
greater_is_better=greater_is_better,
combine_splits=combine_splits,
decimals=decimals,
warn_metric=warn_metric)
short_name = ranks_and_splits["short_name"]
if ranks_and_splits["ranks"] is not None:
cv_results[f"rank_test_{short_name}"] = ranks_and_splits["ranks"]
if ranks_and_splits["split_train"] is not None:
cv_results[f"split_train_{short_name}"] = ranks_and_splits["split_train"]
if ranks_and_splits["split_test"] is not None:
cv_results[f"split_test_{short_name}"] = ranks_and_splits["split_test"]
keep_metrics.add(short_name)
# Creates DataFrame and orders the columns.
# Dictionary keys are unordered, but appears to follow insertion order.
cv_results_df = pd.DataFrame(cv_results)
available_cols = list(cv_results_df.columns)
# Removes metrics not selected
all_metrics = set(col.replace("mean_test_", "") for col in cv_results.keys()
if re.search("mean_test_", col))
remove_metrics = all_metrics - keep_metrics
remove_regex = "|".join(remove_metrics)
if remove_regex:
available_cols = [col for col in available_cols
if not re.search(remove_regex, col)]
# Orders the columns
ordered_cols = []
for regex in column_order:
selected_cols = [col for col in available_cols
if col not in ordered_cols and re.search(regex, col)]
ordered_cols += selected_cols
cv_results_df = cv_results_df[ordered_cols]
if decimals is not None:
cv_results_df = cv_results_df.round(decimals)
return cv_results_df
The provided code snippet includes necessary dependencies for implementing the `get_model_results_summary` function. Write a Python function `def get_model_results_summary(result)` to solve the following problem:
Generates model results summary. Parameters ---------- result : `ForecastResult` See :class:`~greykite.framework.pipeline.pipeline.ForecastResult` for documentation. Returns ------- Prints out model coefficients, cross-validation results, overall train/test evalautions.
Here is the function:
def get_model_results_summary(result):
"""Generates model results summary.
Parameters
----------
result : `ForecastResult`
See :class:`~greykite.framework.pipeline.pipeline.ForecastResult` for documentation.
Returns
-------
Prints out model coefficients, cross-validation results, overall train/test evalautions.
"""
# Get the useful fields from the forecast result
model = result.model[-1]
backtest = result.backtest
grid_search = result.grid_search
# Check model coefficients / variables
# Get model summary with p-values
print(model.summary())
# Get cross-validation results
cv_results = summarize_grid_search_results(
grid_search=grid_search,
decimals=2,
cv_report_metrics=None,
column_order=[
"rank", "mean_test", "split_test", "mean_train", "split_train",
"mean_fit_time", "mean_score_time", "params"])
# Transposes to save space in the printed output
print("================================= CV Results ==================================")
print(cv_results.transpose())
# Check historical evaluation metrics (on the historical training/test set).
backtest_eval = defaultdict(list)
for metric, value in backtest.train_evaluation.items():
backtest_eval[metric].append(value)
backtest_eval[metric].append(backtest.test_evaluation[metric])
metrics = pd.DataFrame(backtest_eval, index=["train", "test"]).T
print("=========================== Train/Test Evaluation =============================")
print(metrics) | Generates model results summary. Parameters ---------- result : `ForecastResult` See :class:`~greykite.framework.pipeline.pipeline.ForecastResult` for documentation. Returns ------- Prints out model coefficients, cross-validation results, overall train/test evalautions. |
167,515 | import plotly
import warnings
import pandas as pd
from greykite.framework.benchmark.data_loader_ts import DataLoaderTS
from greykite.framework.templates.autogen.forecast_config import EvaluationPeriodParam
from greykite.framework.templates.autogen.forecast_config import ForecastConfig
from greykite.framework.templates.autogen.forecast_config import MetadataParam
from greykite.framework.templates.autogen.forecast_config import ModelComponentsParam
from greykite.framework.templates.forecaster import Forecaster
from greykite.framework.templates.model_templates import ModelTemplateEnum
from greykite.framework.utils.result_summary import summarize_grid_search_results
from greykite.common.viz.timeseries_plotting import plot_multivariate
df = data["train_df"]
value_col = "count"
forecast_horizon = 7
print("\n Model Summary:")
print(trained_estimator.summary())
test_df = data["test_df"].reset_index()
test_df[value_col] = None
print(f"\n test_df: \n {test_df}")
test_df["regressor_bin_pn"] = [0, 1, 0, 0, 0, 0, 0]
test_df["regressor_bin_heavy_pn"] = [0, 1, 0, 0, 0, 0, 0]
test_df["regressor_tmin"] = [15, 0, 15, 15, 15, 15, 15]
print(f"altered test_df: \n {test_df}")
class DataLoaderTS(DataLoader):
"""Returns datasets included in the library in `pandas.DataFrame` or
`~greykite.framework.input.univariate_time_series.UnivariateTimeSeries` format.
Extends `~greykite.common.data_loader.DataLoader`
"""
def __init__(self):
super().__init__()
def load_peyton_manning_ts(self):
"""Loads the Daily Peyton Manning dataset.
This dataset contains log daily page views for the Wikipedia page for Peyton Manning.
One of the primary datasets used for demonstrations by Facebook ``Prophet`` algorithm.
Source: https://github.com/facebook/prophet/blob/master/examples/example_wp_log_peyton_manning.csv
Below is the dataset attribute information:
ts : date of the page view
y : log of the number of page views
Returns
-------
ts : `~greykite.framework.input.univariate_time_series.UnivariateTimeSeries`
Peyton Manning page views data. Time and value column:
``time_col`` : "ts"
Date of the page view.
``value_col`` : "y"
Log of the number of page views.
"""
df = self.load_peyton_manning()
ts = UnivariateTimeSeries()
ts.load_data(
df=df,
time_col=TIME_COL,
value_col=VALUE_COL,
freq="1D"
)
return ts
def load_parking_ts(self, system_code_number=None):
"""Loads the Hourly Parking dataset.
This dataset contains occupancy rates (8:00 to 16:30) from 2016/10/04 to 2016/12/19
from car parks in Birmingham that are operated by NCP from Birmingham City Council.
Source: https://archive.ics.uci.edu/ml/datasets/Parking+Birmingham
UK Open Government Licence (OGL)
Below is the dataset attribute information:
SystemCodeNumber: car park ID
Capacity: car park capacity
Occupancy: car park occupancy rate
LastUpdated: date and time of the measure
Parameters
----------
system_code_number : `str` or None, default None
If None, occupancy rate is averaged across all the ``SystemCodeNumber``.
Else only the occupancy rate of the given ``system_code_number`` is returned.
Returns
-------
ts : `~greykite.framework.input.univariate_time_series.UnivariateTimeSeries`
Parking data. Time and value column:
``time_col`` : "LastUpdated"
Date and Time of the Occupancy Rate, rounded to the nearest half hour.
``value_col`` : "OccupancyRatio"
``Occupancy`` divided by ``Capacity``.
"""
df = self.load_parking(system_code_number=system_code_number)
ts = UnivariateTimeSeries()
ts.load_data(
df=df,
time_col="LastUpdated",
value_col="OccupancyRatio",
freq="30min",
)
return ts
def load_bikesharing_ts(self):
"""Loads the Hourly Bike Sharing Count dataset.
This dataset contains aggregated hourly count of the number of rented bikes.
The data also includes weather data: Maximum Daily temperature (tmax);
Minimum Daily Temperature (tmin); Precipitation (pn)
The raw bike-sharing data is provided by Capital Bikeshare.
Source: https://www.capitalbikeshare.com/system-data
The raw weather data (Baltimore-Washington INTL Airport)
https://www.ncdc.noaa.gov/data-access/land-based-station-data
Below is the dataset attribute information:
ts : hour and date
count : number of shared bikes
tmin : minimum daily temperature
tmax : maximum daily temperature
pn : precipitation
Returns
-------
ts : `~greykite.framework.input.univariate_time_series.UnivariateTimeSeries`
Bike Sharing Count data. Time and value column:
``time_col`` : "ts"
Hour and Date.
``value_col`` : "y"
Number of rented bikes across Washington DC.
Additional regressors:
"tmin" : minimum daily temperature
"tmax" : maximum daily temperature
"pn" : precipitation
"""
df = self.load_bikesharing()
ts = UnivariateTimeSeries()
ts.load_data(
df=df,
time_col="ts",
value_col="count",
freq="H",
regressor_cols=["tmin", "tmax", "pn"]
)
return ts
def load_beijing_pm_ts(self):
"""Loads the Beijing Particulate Matter (PM2.5) dataset.
https://archive.ics.uci.edu/ml/datasets/Beijing+PM2.5+Data
This hourly data set contains the PM2.5 data of US Embassy in Beijing. Meanwhile, meteorological data
from Beijing Capital International Airport are also included.
The dataset's time period is between Jan 1st, 2010 to Dec 31st, 2014. Missing data are denoted as NA.
Below is the dataset attribute information:
No : row number
year : year of data in this row
month : month of data in this row
day : day of data in this row
hour : hour of data in this row
pm2.5: PM2.5 concentration (ug/m^3)
DEWP : dew point (celsius)
TEMP : temperature (celsius)
PRES : pressure (hPa)
cbwd : combined wind direction
Iws : cumulated wind speed (m/s)
Is : cumulated hours of snow
Ir : cumulated hours of rain
Returns
-------
ts : `~greykite.framework.input.univariate_time_series.UnivariateTimeSeries`
Beijing PM2.5 data. Time and value column:
``time_col`` : TIME_COL
hourly timestamp
``value_col`` : "pm"
PM2.5 concentration (ug/m^3)
Additional regressors:
"dewp" : dew point (celsius)
"temp" : temperature (celsius)
"pres" : pressure (hPa)
"cbwd" : combined wind direction
"iws" : cumulated wind speed (m/s)
"is" : cumulated hours of snow
"ir" : cumulated hours of rain
"""
df = self.load_beijing_pm()
ts = UnivariateTimeSeries()
ts.load_data(
df=df,
time_col=TIME_COL,
value_col="pm",
freq="H",
regressor_cols=["dewp", "temp", "pres", "cbwd", "iws", "is", "ir"]
)
return ts
def load_data_ts(self, data_name, **kwargs):
"""Loads dataset by name from the internal data library.
Parameters
----------
data_name : `str`
Dataset to load from the internal data library.
Returns
-------
ts : `~greykite.framework.input.univariate_time_series.UnivariateTimeSeries`
Has the requested ``data_name``.
"""
if data_name == "daily_peyton_manning":
ts = self.load_peyton_manning_ts()
elif data_name == "hourly_parking":
ts = self.load_parking_ts(**kwargs)
elif data_name == "hourly_bikesharing":
ts = self.load_bikesharing_ts()
elif data_name == "hourly_beijing_pm":
ts = self.load_beijing_pm_ts()
else:
data_inventory = self.get_data_inventory()
raise ValueError(f"Input data name '{data_name}' is not recognized. "
f"Must be one of {data_inventory}.")
return ts
The provided code snippet includes necessary dependencies for implementing the `prepare_bikesharing_data` function. Write a Python function `def prepare_bikesharing_data()` to solve the following problem:
Loads bike-sharing data and adds proper regressors.
Here is the function:
def prepare_bikesharing_data():
"""Loads bike-sharing data and adds proper regressors."""
dl = DataLoaderTS()
agg_func = {"count": "sum", "tmin": "mean", "tmax": "mean", "pn": "mean"}
df = dl.load_bikesharing(agg_freq="daily", agg_func=agg_func)
# There are some zero values which cause issue for MAPE
# This adds a small number to all data to avoid that issue
value_col = "count"
df[value_col] += 10
# We drop last value as data might be incorrect as original data is hourly
df.drop(df.tail(1).index, inplace=True)
# We only use data from 2018 for demonstration purposes (run time is shorter)
df = df.loc[df["ts"] > "2018-01-01"]
df.reset_index(drop=True, inplace=True)
print(f"\n df.tail(): \n {df.tail()}")
# Creates useful regressors from existing raw regressors
df["bin_pn"] = (df["pn"] > 5).map(float)
df["bin_heavy_pn"] = (df["pn"] > 20).map(float)
df.columns = [
"ts",
value_col,
"regressor_tmin",
"regressor_tmax",
"regressor_pn",
"regressor_bin_pn",
"regressor_bin_heavy_pn"]
forecast_horizon = 7
train_df = df.copy()
test_df = df.tail(forecast_horizon).reset_index(drop=True)
# When using the pipeline (as done in the ``fit_forecast`` below),
# fitting and prediction are done in one step
# Therefore for demonstration purpose we remove the response values of last 7 days.
# This is needed because we are using regressors,
# and future regressor data must be augmented to ``df``.
# We mimic that by removal of the values of the response.
train_df.loc[(len(train_df) - forecast_horizon):len(train_df), value_col] = None
print(f"train_df shape: \n {train_df.shape}")
print(f"test_df shape: \n {test_df.shape}")
print(f"train_df.tail(14): \n {train_df.tail(14)}")
print(f"test_df: \n {test_df}")
return {
"train_df": train_df,
"test_df": test_df} | Loads bike-sharing data and adds proper regressors. |
167,516 | import plotly
import warnings
import pandas as pd
from greykite.framework.benchmark.data_loader_ts import DataLoaderTS
from greykite.framework.templates.autogen.forecast_config import EvaluationPeriodParam
from greykite.framework.templates.autogen.forecast_config import ForecastConfig
from greykite.framework.templates.autogen.forecast_config import MetadataParam
from greykite.framework.templates.autogen.forecast_config import ModelComponentsParam
from greykite.framework.templates.forecaster import Forecaster
from greykite.framework.templates.model_templates import ModelTemplateEnum
from greykite.framework.utils.result_summary import summarize_grid_search_results
from greykite.common.viz.timeseries_plotting import plot_multivariate
forecast_horizon = 7
result = fit_forecast(
df=df,
time_col=time_col,
value_col=value_col)
print("\n Model Summary:")
print(trained_estimator.summary())
print(f"\n test_df: \n {test_df}")
print(f"altered test_df: \n {test_df}")
class EvaluationPeriodParam:
"""How to split data for evaluation."""
cv_expanding_window: Optional[bool] = None
"""See :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`."""
cv_horizon: Optional[int] = None
"""See :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`."""
cv_max_splits: Optional[int] = None
"""See :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`."""
cv_min_train_periods: Optional[int] = None
"""See :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`."""
cv_periods_between_splits: Optional[int] = None
"""See :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`."""
cv_periods_between_train_test: Optional[int] = None
"""See :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`."""
cv_use_most_recent_splits: Optional[bool] = None
"""See :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`."""
periods_between_train_test: Optional[int] = None
"""See :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`"""
test_horizon: Optional[int] = None
"""See :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`."""
def from_dict(obj: Any) -> 'EvaluationPeriodParam':
assert isinstance(obj, dict)
cv_expanding_window = from_union([from_bool, from_none], obj.get("cv_expanding_window"))
cv_horizon = from_union([from_int, from_none], obj.get("cv_horizon"))
cv_max_splits = from_union([from_int, from_none], obj.get("cv_max_splits"))
cv_min_train_periods = from_union([from_int, from_none], obj.get("cv_min_train_periods"))
cv_periods_between_splits = from_union([from_int, from_none], obj.get("cv_periods_between_splits"))
cv_periods_between_train_test = from_union([from_int, from_none], obj.get("cv_periods_between_train_test"))
cv_use_most_recent_splits = from_union([from_bool, from_none], obj.get("cv_use_most_recent_splits"))
periods_between_train_test = from_union([from_int, from_none], obj.get("periods_between_train_test"))
test_horizon = from_union([from_int, from_none], obj.get("test_horizon"))
return EvaluationPeriodParam(
cv_expanding_window=cv_expanding_window,
cv_horizon=cv_horizon,
cv_max_splits=cv_max_splits,
cv_min_train_periods=cv_min_train_periods,
cv_periods_between_splits=cv_periods_between_splits,
cv_periods_between_train_test=cv_periods_between_train_test,
cv_use_most_recent_splits=cv_use_most_recent_splits,
periods_between_train_test=periods_between_train_test,
test_horizon=test_horizon)
def to_dict(self) -> dict:
result: dict = {}
result["cv_expanding_window"] = from_union([from_bool, from_none], self.cv_expanding_window)
result["cv_horizon"] = from_union([from_int, from_none], self.cv_horizon)
result["cv_max_splits"] = from_union([from_int, from_none], self.cv_max_splits)
result["cv_min_train_periods"] = from_union([from_int, from_none], self.cv_min_train_periods)
result["cv_periods_between_splits"] = from_union([from_int, from_none], self.cv_periods_between_splits)
result["cv_periods_between_train_test"] = from_union([from_int, from_none], self.cv_periods_between_train_test)
result["cv_use_most_recent_splits"] = from_union([from_bool, from_none], self.cv_use_most_recent_splits)
result["periods_between_train_test"] = from_union([from_int, from_none], self.periods_between_train_test)
result["test_horizon"] = from_union([from_int, from_none], self.test_horizon)
return result
class MetadataParam:
"""Properties of the input data"""
anomaly_info: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None
"""Anomaly adjustment info. Anomalies in ``df`` are corrected before any forecasting is
done. If None, no adjustments are made.
See :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`.
"""
date_format: Optional[str] = None
"""See :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`"""
freq: Optional[str] = None
"""See :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`"""
time_col: Optional[str] = None
"""See :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`"""
train_end_date: Optional[str] = None
"""Last date to use for fitting the model. Forecasts are generated after this date.
If None, it is set to the last date with a non-null value in value_col df.
See :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`.
"""
value_col: Optional[str] = None
"""See :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`"""
def from_dict(obj: Any) -> 'MetadataParam':
assert isinstance(obj, dict)
anomaly_info = from_union([
lambda x: from_dict(lambda x: x, x),
lambda x: from_list_dict(lambda x: x, x),
from_none], obj.get("anomaly_info"))
date_format = from_union([from_str, from_none], obj.get("date_format"))
freq = from_union([from_str, from_none], obj.get("freq"))
time_col = from_union([from_str, from_none], obj.get("time_col"))
train_end_date = from_union([from_str, from_none], obj.get("train_end_date"))
value_col = from_union([from_str, from_none], obj.get("value_col"))
return MetadataParam(anomaly_info, date_format, freq, time_col, train_end_date, value_col)
def to_dict(self) -> dict:
result: dict = {}
result["anomaly_info"] = from_union([
lambda x: from_dict(lambda x: x, x),
lambda x: from_list_dict(lambda x: x, x),
from_none], self.anomaly_info)
result["date_format"] = from_union([from_str, from_none], self.date_format)
result["freq"] = from_union([from_str, from_none], self.freq)
result["time_col"] = from_union([from_str, from_none], self.time_col)
result["train_end_date"] = from_union([from_str, from_none], self.train_end_date)
result["value_col"] = from_union([from_str, from_none], self.value_col)
return result
class ModelComponentsParam:
"""Parameters to tune the model."""
autoregression: Optional[Dict[str, Any]] = None
"""For modeling autoregression, see template for details"""
changepoints: Optional[Dict[str, Any]] = None
"""For modeling changepoints, see template for details"""
custom: Optional[Dict[str, Any]] = None
"""Additional parameters used by template, see template for details"""
events: Optional[Dict[str, Any]] = None
"""For modeling events, see template for details"""
growth: Optional[Dict[str, Any]] = None
"""For modeling growth (trend), see template for details"""
hyperparameter_override: Optional[Union[Dict, List[Optional[Dict]]]] = None
"""After the above model components are used to create a hyperparameter grid,
the result is updated by this dictionary, to create new keys or override existing ones.
Allows for complete customization of the grid search.
"""
regressors: Optional[Dict[str, Any]] = None
"""For modeling regressors, see template for details"""
lagged_regressors: Optional[Dict[str, Any]] = None
"""For modeling lagged regressors, see template for details"""
seasonality: Optional[Dict[str, Any]] = None
"""For modeling seasonality, see template for details"""
uncertainty: Optional[Dict[str, Any]] = None
"""For modeling uncertainty, see template for details"""
def from_dict(obj: Any) -> 'ModelComponentsParam':
assert isinstance(obj, dict)
autoregression = from_union([lambda x: from_dict(lambda x: x, x), from_none], obj.get("autoregression"))
changepoints = from_union([lambda x: from_dict(lambda x: x, x), from_none], obj.get("changepoints"))
custom = from_union([lambda x: from_dict(lambda x: x, x), from_none], obj.get("custom"))
events = from_union([lambda x: from_dict(lambda x: x, x), from_none], obj.get("events"))
growth = from_union([lambda x: from_dict(lambda x: x, x), from_none], obj.get("growth"))
hyperparameter_override = from_union([
lambda x: from_dict(lambda x: x, x),
lambda x: from_list_dict_or_none(lambda x: x, x),
from_none], obj.get("hyperparameter_override"))
regressors = from_union([lambda x: from_dict(lambda x: x, x), from_none], obj.get("regressors"))
lagged_regressors = from_union([lambda x: from_dict(lambda x: x, x), from_none], obj.get("lagged_regressors"))
seasonality = from_union([lambda x: from_dict(lambda x: x, x), from_none], obj.get("seasonality"))
uncertainty = from_union([lambda x: from_dict(lambda x: x, x), from_none], obj.get("uncertainty"))
return ModelComponentsParam(
autoregression=autoregression,
changepoints=changepoints,
custom=custom,
events=events,
growth=growth,
hyperparameter_override=hyperparameter_override,
regressors=regressors,
lagged_regressors=lagged_regressors,
seasonality=seasonality,
uncertainty=uncertainty)
def to_dict(self) -> dict:
result: dict = {}
result["autoregression"] = from_union([lambda x: from_dict(lambda x: x, x), from_none], self.autoregression)
result["changepoints"] = from_union([lambda x: from_dict(lambda x: x, x), from_none], self.changepoints)
result["custom"] = from_union([lambda x: from_dict(lambda x: x, x), from_none], self.custom)
result["events"] = from_union([lambda x: from_dict(lambda x: x, x), from_none], self.events)
result["growth"] = from_union([lambda x: from_dict(lambda x: x, x), from_none], self.growth)
result["hyperparameter_override"] = from_union([
lambda x: from_dict(lambda x: x, x),
lambda x: from_list_dict_or_none(lambda x: x, x),
from_none], self.hyperparameter_override)
result["regressors"] = from_union([lambda x: from_dict(lambda x: x, x), from_none], self.regressors)
result["lagged_regressors"] = from_union([lambda x: from_dict(lambda x: x, x), from_none], self.lagged_regressors)
result["seasonality"] = from_union([lambda x: from_dict(lambda x: x, x), from_none], self.seasonality)
result["uncertainty"] = from_union([lambda x: from_dict(lambda x: x, x), from_none], self.uncertainty)
return result
class ForecastConfig:
"""Config for providing parameters to the Forecast library"""
computation_param: Optional[ComputationParam] = None
"""How to compute the result. See
:class:`~greykite.framework.templates.autogen.forecast_config.ComputationParam`.
"""
coverage: Optional[float] = None
"""Intended coverage of the prediction bands (0.0 to 1.0).
If None, the upper/lower predictions are not returned.
"""
evaluation_metric_param: Optional[EvaluationMetricParam] = None
"""What metrics to evaluate. See
:class:`~greykite.framework.templates.autogen.forecast_config.EvaluationMetricParam`.
"""
evaluation_period_param: Optional[EvaluationPeriodParam] = None
"""How to split data for evaluation. See
:class:`~greykite.framework.templates.autogen.forecast_config.EvaluationPeriodParam`.
"""
forecast_horizon: Optional[int] = None
"""Number of periods to forecast into the future. Must be > 0.
If None, default is determined from input data frequency.
"""
forecast_one_by_one: Optional[Union[bool, int, List[int]]] = None
"""The options to activate the forecast one-by-one algorithm.
See :class:`~greykite.sklearn.estimator.one_by_one_estimator.OneByOneEstimator`.
Can be boolean, int, of list of int.
If int, it has to be less than or equal to the forecast horizon.
If list of int, the sum has to be the forecast horizon.
"""
metadata_param: Optional[MetadataParam] = None
"""Information about the input data. See
:class:`~greykite.framework.templates.autogen.forecast_config.MetadataParam`.
"""
model_components_param: Optional[Union[ModelComponentsParam, List[Optional[ModelComponentsParam]]]] = None
"""Parameters to tune the model. Typically a single ModelComponentsParam, but the `SimpleSilverkiteTemplate`
template also allows a list of ModelComponentsParam for grid search. A single ModelComponentsParam
corresponds to one grid, and a list corresponds to a list of grids.
See :class:`~greykite.framework.templates.autogen.forecast_config.ModelComponentsParam`.
"""
model_template: Optional[Union[str, dataclass, List[Union[str, dataclass]]]] = None
"""Name of the model template. Typically a single string, but the `SimpleSilverkiteTemplate`
template also allows a list of string for grid search.
See :class:`~greykite.framework.templates.model_templates.ModelTemplateEnum`
for valid names.
"""
def from_dict(obj: Any) -> 'ForecastConfig':
assert isinstance(obj, dict)
computation_param = from_union([ComputationParam.from_dict, from_none], obj.get("computation_param"))
coverage = from_union([from_float, from_none], obj.get("coverage"))
evaluation_metric_param = from_union([EvaluationMetricParam.from_dict, from_none], obj.get("evaluation_metric_param"))
evaluation_period_param = from_union([EvaluationPeriodParam.from_dict, from_none], obj.get("evaluation_period_param"))
forecast_horizon = from_union([from_int, from_none], obj.get("forecast_horizon"))
forecast_one_by_one = from_union([from_int, from_bool, from_none, from_list_int], obj.get("forecast_one_by_one"))
metadata_param = from_union([MetadataParam.from_dict, from_none], obj.get("metadata_param"))
if not isinstance(obj.get("model_components_param"), list):
model_components_param = from_union([ModelComponentsParam.from_dict, from_none], obj.get("model_components_param"))
else:
model_components_param = [from_union([ModelComponentsParam.from_dict, from_none], mcp) for mcp in obj.get("model_components_param")]
if not isinstance(obj.get("model_template"), list):
model_template = from_union([from_str, from_none], obj.get("model_template"))
else:
model_template = [from_union([from_str, from_none], mt) for mt in obj.get("model_template")]
return ForecastConfig(
computation_param=computation_param,
coverage=coverage,
evaluation_metric_param=evaluation_metric_param,
evaluation_period_param=evaluation_period_param,
forecast_horizon=forecast_horizon,
forecast_one_by_one=forecast_one_by_one,
metadata_param=metadata_param,
model_components_param=model_components_param,
model_template=model_template)
def to_dict(self) -> dict:
result: dict = {}
result["computation_param"] = from_union([lambda x: to_class(ComputationParam, x), from_none], self.computation_param)
result["coverage"] = from_union([to_float, from_none], self.coverage)
result["evaluation_metric_param"] = from_union([lambda x: to_class(EvaluationMetricParam, x), from_none], self.evaluation_metric_param)
result["evaluation_period_param"] = from_union([lambda x: to_class(EvaluationPeriodParam, x), from_none], self.evaluation_period_param)
result["forecast_horizon"] = from_union([from_int, from_none], self.forecast_horizon)
result["forecast_one_by_one"] = from_union([from_int, from_bool, from_none, from_list_int], self.forecast_one_by_one)
result["metadata_param"] = from_union([lambda x: to_class(MetadataParam, x), from_none], self.metadata_param)
if not isinstance(self.model_components_param, list):
self.model_components_param = [self.model_components_param]
result["model_components_param"] = [from_union([lambda x: to_class(ModelComponentsParam, x), from_none], mcp) for mcp in self.model_components_param]
if not isinstance(self.model_template, list):
self.model_template = [self.model_template]
result["model_template"] = [from_union([from_str, from_none], mt) for mt in self.model_template]
return result
def from_json(obj: Any) -> 'ForecastConfig':
"""Converts a json string to the corresponding instance of the `ForecastConfig` class.
Raises ValueError if the input is not a json string.
"""
try:
forecast_dict = json.loads(obj)
except Exception:
raise ValueError(f"The input ({obj}) is not a json string.")
return ForecastConfig.from_dict(forecast_dict)
class Forecaster:
"""The main entry point to create a forecast.
Call the :py:meth:`~greykite.framework.templates.forecaster.Forecaster.run_forecast_config`
method to create a forecast. It takes a dataset and forecast configuration parameters.
Notes
-----
This class can create forecasts using any of the model templates in
`~greykite.framework.templates.model_templates.ModelTemplateEnum`.
Model templates provide suitable default values for the available
forecast estimators depending on the data characteristics.
The model template is selected via the ``config.model_template``
parameter to :py:meth:`~greykite.framework.templates.forecaster.Forecaster.run_forecast_config`.
To add your own custom algorithms or template classes in our framework,
pass ``model_template_enum`` and ``default_model_template_name``
to the constructor.
"""
def __init__(
self,
model_template_enum: Type[Enum] = ModelTemplateEnum,
default_model_template_name: str = ModelTemplateEnum.AUTO.name):
# Optional user input
self.model_template_enum: Type[Enum] = model_template_enum
"""The available template names. An Enum class where names are template names, and values are of type
`~greykite.framework.templates.model_templates.ModelTemplate`.
"""
self.default_model_template_name: str = default_model_template_name
"""The default template name if not provided by ``config.model_template``.
Should be a name in ``model_template_enum`` or "auto".
Used by :py:meth:`~greykite.framework.templates.forecaster.Forecaster.__get_template_class`.
"""
# The following are set by `self.run_forecast_config`.
self.template_class: Optional[Type[TemplateInterface]] = None
"""Template class used. Must implement
`~greykite.framework.templates.template_interface.TemplateInterface`
and be one of the classes in ``self.model_template_enum``.
Available for debugging purposes.
Set by :py:meth:`~greykite.framework.templates.forecaster.Forecaster.run_forecast_config`.
"""
self.template: Optional[TemplateInterface] = None
"""Instance of ``template_class`` used to run the forecast.
See the docstring of the specific template class used.
- `~greykite.framework.templates.simple_silverkite_template.SimpleSilverkiteTemplate`
- `~greykite.framework.templates.silverkite_template.SilverkiteTemplate`
- `~greykite.framework.templates.prophet_template.ProphetTemplate`
- etc.
Available for debugging purposes.
Set by :py:meth:`~greykite.framework.templates.forecaster.Forecaster.run_forecast_config`.
"""
self.config: Optional[ForecastConfig] = None
"""`~greykite.framework.templates.autogen.forecast_config.ForecastConfig`
passed to the template class.
Set by :py:meth:`~greykite.framework.templates.forecaster.Forecaster.run_forecast_config`.
"""
self.pipeline_params: Optional[Dict] = None
"""Parameters used to call :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`.
Available for debugging purposes.
Set by :py:meth:`~greykite.framework.templates.forecaster.Forecaster.run_forecast_config`.
"""
self.forecast_result: Optional[ForecastResult] = None
"""The forecast result, returned by :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`.
Set by :py:meth:`~greykite.framework.templates.forecaster.Forecaster.run_forecast_config`.
"""
def __get_config_with_default_model_template_and_components(self, config: Optional[ForecastConfig] = None) -> ForecastConfig:
"""Gets config with default value for `model_template` and `model_components_param` if not provided.
- model_template : default value is ``self.default_model_template_name``.
- model_components_param : default value is an empty ModelComponentsParam().
Parameters
----------
config : :class:`~greykite.framework.templates.model_templates.ForecastConfig` or None
Config object for template class to use.
See :class:`~greykite.framework.templates.model_templates.ForecastConfig`.
If None, uses an empty ForecastConfig.
Returns
-------
config : :class:`~greykite.framework.templates.model_templates.ForecastConfig`
Input ``config`` with default ``model_template`` populated.
If ``config.model_template`` is None, it is set to ``self.default_model_template_name``.
If ``config.model_components_param`` is None, it is set to ``ModelComponentsParam()``.
"""
config = deepcopy(config) if config is not None else ForecastConfig()
# Unpacks list of a single element and sets default value if None.
# NB: Does not call `apply_forecast_config_defaults`.
# Only sets `model_template` and `model_components_param`.
# The template class may have its own implementation of forecast config defaults.
forecast_config_defaults = ForecastConfigDefaults()
forecast_config_defaults.DEFAULT_MODEL_TEMPLATE = self.default_model_template_name
config.model_template = forecast_config_defaults.apply_model_template_defaults(config.model_template)
config.model_components_param = forecast_config_defaults.apply_model_components_defaults(config.model_components_param)
return config
def __get_template_class(self, config: Optional[ForecastConfig] = None) -> Type[TemplateInterface]:
"""Extracts template class (e.g. `SimpleSilverkiteTemplate`) from the config.
Parameters
----------
config : :class:`~greykite.framework.templates.model_templates.ForecastConfig` or None
Config object for template class to use.
See :class:`~greykite.framework.templates.model_templates.ForecastConfig`.
Returns
-------
template_class : Type[`~greykite.framework.templates.template_interface.TemplateInterface`]
An implementation of `~greykite.framework.templates.template_interface.TemplateInterface`.
"""
config = self.__get_config_with_default_model_template_and_components(config)
if isinstance(config.model_template, list):
# Parses `config.model_template` to extract the template class, with validation.
# Handles a list of model templates.
template_classes = [self.__get_template_class(config=ForecastConfig(model_template=mt))
for mt in config.model_template]
for tc in template_classes:
if tc != template_classes[0]:
raise ValueError("All model templates must use the same template class. "
f"Found {template_classes}")
template_class = template_classes[0]
if not template_class().allow_model_template_list:
raise ValueError(f"The template class {template_class} does not allow `model_template` to be a list. "
f"Pass a string instead.")
else:
# Handles other situations (string, data class).
try:
# Tries to look up in `self.model_template_enum`.
template_class = self.model_template_enum[config.model_template].value.template_class
except (KeyError, TypeError):
# Template is not found in the enum.
# NB: The logic in this clause is written for the default `self.model_template_enum`,
# which contains only one template class that is a subclass of SimpleSilverkiteTemplate.
# If a custom `self.model_template_enum` is provided it may be useful to override this logic.
valid_names = ", ".join(self.model_template_enum.__dict__["_member_names_"])
# Checks if template enum has a template class that supports generic naming
# i.e. a subclass of `SimpleSilverkiteTemplate`.
subclass_simple_silverkite = [mte for mte in self.model_template_enum
if issubclass(mte.value.template_class, SimpleSilverkiteTemplate)]
if len(subclass_simple_silverkite) > 0:
try:
log_message(f"Model template {config.model_template} is not found in the template enum. "
f"Checking if model template is suitable for `SimpleSilverkiteTemplate`.", LoggingLevelEnum.DEBUG)
SimpleSilverkiteTemplate().check_template_type(config.model_template)
possible_template_classes = unique_elements_in_list([mte.value.template_class
for mte in subclass_simple_silverkite])
if len(possible_template_classes) > 1:
log_message(f"Multiple template classes could be used for the model "
f"template {config.model_template}: {possible_template_classes}", LoggingLevelEnum.DEBUG)
# arbitrarily take a class that supports generic naming
template_class = subclass_simple_silverkite[0].value.template_class
log_message(f"Using template class {template_class} for the model "
f"template {config.model_template}", LoggingLevelEnum.DEBUG)
except ValueError:
raise ValueError(f"Model Template '{config.model_template}' is not recognized! Must be one of: {valid_names}"
" or satisfy the `SimpleSilverkiteTemplate` rules.")
else:
raise ValueError(f"Model Template '{config.model_template}' is not recognized! Must be one of: {valid_names}.")
# Validates `model_components_param` compatibility with the template
if not template_class().allow_model_components_param_list and isinstance(config.model_components_param, list):
raise ValueError(f"Model template {config.model_template} does not support a list of `ModelComponentsParam`.")
return template_class
def __apply_forecast_one_by_one_to_pipeline_parameters(self):
"""If forecast_one_by_one is activated,
1. replaces the estimator with ``OneByOneEstimator`` in pipeline.
2. Adds one by one estimator's parameters to ``hyperparameter_grid``.
"""
if self.config.forecast_one_by_one not in (None, False):
pipeline = get_basic_pipeline(
estimator=OneByOneEstimator(
estimator=self.template.estimator.__class__.__name__,
forecast_horizon=self.config.forecast_horizon),
score_func=self.template.score_func,
score_func_greater_is_better=self.template.score_func_greater_is_better,
agg_periods=self.template.config.evaluation_metric_param.agg_periods,
agg_func=self.template.config.evaluation_metric_param.agg_func,
relative_error_tolerance=self.template.config.evaluation_metric_param.relative_error_tolerance,
coverage=self.template.config.coverage,
null_model_params=self.template.config.evaluation_metric_param.null_model_params,
regressor_cols=self.template.regressor_cols)
self.pipeline_params["pipeline"] = pipeline
if isinstance(self.pipeline_params["hyperparameter_grid"], list):
for i in range(len(self.pipeline_params["hyperparameter_grid"])):
self.pipeline_params["hyperparameter_grid"][i]["estimator__forecast_horizon"] = [
self.config.forecast_horizon]
self.pipeline_params["hyperparameter_grid"][i]["estimator__estimator_map"] = [
self.config.forecast_one_by_one]
else:
self.pipeline_params["hyperparameter_grid"]["estimator__forecast_horizon"] = [
self.config.forecast_horizon]
self.pipeline_params["hyperparameter_grid"]["estimator__estimator_map"] = [
self.config.forecast_one_by_one]
def __get_model_template(
self,
df: pd.DataFrame,
config: ForecastConfig) -> str:
"""Gets the default model template when "auto" is given.
This is called after ``config`` has been filled with the default values
and all fields are not None.
Parameters
----------
df : `pandas.DataFrame`
Timeseries data to forecast.
Contains columns [`time_col`, `value_col`], and optional regressor columns
Regressor columns should include future values for prediction
config : :class:`~greykite.framework.templates.model_templates.ForecastConfig`
Config object for template class to use.
Must be an instance with all fields not None.
See :class:`~greykite.framework.templates.model_templates.ForecastConfig`.
Returns
-------
model_template : `str`
The corresponding model template.
"""
# Gets the model template from config.
# Model template should already be a string when this function is called,
# which is handled by `self.__get_config_with_default_model_template_and_components`.
model_template = config.model_template
# Returns the model template if it's not "auto".
if not isinstance(model_template, str) or model_template.lower() != "auto":
return model_template
# Handles the "auto" case.
# Since `get_auto_silverkite_model_template` resolves "AUTO" to
# a specific SILVERKITE template, the fallback template passed to it cannot be "AUTO".
# We use SILVERKITE if `self.default_model_template_name` is "AUTO".
default_template_for_auto = (self.default_model_template_name
if self.default_model_template_name.lower() != "auto"
else ModelTemplateEnum.SILVERKITE.name)
model_template = get_auto_silverkite_model_template(
df=df,
default_model_template_name=default_template_for_auto,
config=config
)
return model_template
def apply_forecast_config(
self,
df: pd.DataFrame,
config: Optional[ForecastConfig] = None) -> Dict:
"""Fetches pipeline parameters from the ``df`` and ``config``,
but does not run the pipeline to generate a forecast.
:py:meth:`~greykite.framework.templates.forecaster.Forecaster.run_forecast_config`
calls this function and also runs the forecast pipeline.
Available for debugging purposes to check pipeline parameters before
running a forecast. Sets these attributes for debugging:
- ``pipeline_params`` : the parameters passed to
:func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`.
- ``template_class``, ``template`` : the template class used to generate the
pipeline parameters.
- ``config`` : the :class:`~greykite.framework.templates.model_templates.ForecastConfig`
passed as input to template class, to translate into pipeline parameters.
Provides basic validation on the compatibility of ``config.model_template``
with ``config.model_components_param``.
Parameters
----------
df : `pandas.DataFrame`
Timeseries data to forecast.
Contains columns [`time_col`, `value_col`], and optional regressor columns
Regressor columns should include future values for prediction
config : :class:`~greykite.framework.templates.model_templates.ForecastConfig` or None
Config object for template class to use.
See :class:`~greykite.framework.templates.model_templates.ForecastConfig`.
Returns
-------
pipeline_params : `dict` [`str`, `any`]
Input to :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`.
"""
self.config = self.__get_config_with_default_model_template_and_components(config)
self.config.model_template = self.__get_model_template(df=df, config=self.config)
self.template_class = self.__get_template_class(self.config)
self.template = self.template_class()
self.pipeline_params = self.template.apply_template_for_pipeline_params(df=df, config=self.config)
self.__apply_forecast_one_by_one_to_pipeline_parameters()
return self.pipeline_params
def run_forecast_config(
self,
df: pd.DataFrame,
config: Optional[ForecastConfig] = None) -> ForecastResult:
"""Creates a forecast from input data and config.
The result is also stored as ``self.forecast_result``.
Parameters
----------
df : `pandas.DataFrame`
Timeseries data to forecast.
Contains columns [`time_col`, `value_col`], and optional regressor columns
Regressor columns should include future values for prediction
config : :class:`~greykite.framework.templates.model_templates.ForecastConfig`
Config object for template class to use.
See :class:`~greykite.framework.templates.model_templates.ForecastConfig`.
Returns
-------
forecast_result : :class:`~greykite.framework.pipeline.pipeline.ForecastResult`
Forecast result, an object of type
:class:`~greykite.framework.pipeline.pipeline.ForecastResult`.
The output of :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`,
according to the ``df`` and ``config`` configuration parameters.
"""
pipeline_parameters = self.apply_forecast_config(
df=df,
config=config)
self.forecast_result = forecast_pipeline(**pipeline_parameters)
return self.forecast_result
def run_forecast_json(
self,
df: pd.DataFrame,
json_str: str = "{}") -> ForecastResult:
"""Calls :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`
according to the ``json_str`` configuration parameters.
Parameters
----------
df : `pandas.DataFrame`
Timeseries data to forecast.
Contains columns [`time_col`, `value_col`], and optional regressor columns
Regressor columns should include future values for prediction
json_str : `str`
Json string of the config object for Forecast to use.
See :class:`~greykite.framework.templates.model_templates.ForecastConfig`.
Returns
-------
forecast_result : :class:`~greykite.framework.pipeline.pipeline.ForecastResult`
Forecast result.
The output of :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`,
called using the template class with specified configuration.
See :class:`~greykite.framework.pipeline.pipeline.ForecastResult`
for details.
"""
config_dict = json.loads(json_str)
config = forecast_config_from_dict(config_dict)
self.run_forecast_config(
df=df,
config=config)
return self.forecast_result
def dump_forecast_result(
self,
destination_dir,
object_name="object",
dump_design_info=True,
overwrite_exist_dir=False):
"""Dumps ``self.forecast_result`` to local pickle files.
Parameters
----------
destination_dir : `str`
The pickle destination directory.
object_name : `str`
The stored file name.
dump_design_info : `bool`, default True
Whether to dump design info.
Design info is a patsy class that includes the design matrix information.
It takes longer to dump design info.
overwrite_exist_dir : `bool`, default False
What to do when ``destination_dir`` already exists.
Removes the original directory when exists, if set to True.
Returns
-------
This function writes to local files and does not return anything.
"""
if self.forecast_result is None:
raise ValueError("self.forecast_result is None, nothing to dump.")
dump_obj(
obj=self.forecast_result,
dir_name=destination_dir,
obj_name=object_name,
dump_design_info=dump_design_info,
overwrite_exist_dir=overwrite_exist_dir
)
def load_forecast_result(
self,
source_dir,
load_design_info=True):
"""Loads ``self.forecast_result`` from local files created by ``self.dump_result``.
Parameters
----------
source_dir : `str`
The source file directory.
load_design_info : `bool`, default True
Whether to load design info.
Design info is a patsy class that includes the design matrix information.
It takes longer to load design info.
"""
if self.forecast_result is not None:
raise ValueError("self.forecast_result is not None, please create a new instance.")
self.forecast_result = load_obj(
dir_name=source_dir,
obj=None,
load_design_info=load_design_info
)
class ModelTemplateEnum(Enum):
"""Available model templates.
Enumerates the possible values for the ``model_template`` attribute of
:class:`~greykite.framework.templates.model_templates.ForecastConfig`.
The value has type `~greykite.framework.templates.model_templates.ModelTemplate` which contains:
- the template class that recognizes the model_template. Template classes implement the
`~greykite.framework.templates.template_interface.TemplateInterface` interface.
- a plain-text description of what the model_template is for,
The description should be unique across enum members. The template class
can be shared, because a template class can recognize multiple model templates.
For example, the same template class may use different default values for
``ForecastConfig.model_components_param`` depending on ``ForecastConfig.model_template``.
Notes
-----
The template classes
`~greykite.framework.templates.silverkite_template.SilverkiteTemplate`
and `~greykite.framework.templates.prophet_template.ProphetTemplate`
recognize only the model templates explicitly enumerated here.
However, the `~greykite.framework.templates.simple_silverkite_template.SimpleSilverkiteTemplate`
template class allows additional model templates to be specified generically.
Any object of type `~greykite.framework.templates.simple_silverkite_template_config.SimpleSilverkiteTemplateOptions`
can be used as the model_template.
These generic model templates are valid but not enumerated here.
"""
SILVERKITE = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model with automatic growth, seasonality, holidays, "
"automatic autoregression, normalization "
"and interactions. Best for hourly and daily frequencies."
"Uses `SimpleSilverkiteEstimator`.")
"""Silverkite model with automatic growth, seasonality, holidays,
automatic autoregression, normalization
and interactions. Best for hourly and daily frequencies.
Uses `SimpleSilverkiteEstimator`.
"""
SILVERKITE_DAILY_1_CONFIG_1 = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Config 1 in template ``SILVERKITE_DAILY_1``. "
"Compared to ``SILVERKITE``, it uses parameters "
"specifically tuned for daily data and 1-day forecast.")
"""Config 1 in template ``SILVERKITE_DAILY_1``.
Compared to ``SILVERKITE``, it uses parameters
specifically tuned for daily data and 1-day forecast.
"""
SILVERKITE_DAILY_1_CONFIG_2 = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Config 2 in template ``SILVERKITE_DAILY_1``. "
"Compared to ``SILVERKITE``, it uses parameters "
"specifically tuned for daily data and 1-day forecast.")
"""Config 2 in template ``SILVERKITE_DAILY_1``.
Compared to ``SILVERKITE``, it uses parameters
specifically tuned for daily data and 1-day forecast.
"""
SILVERKITE_DAILY_1_CONFIG_3 = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Config 3 in template ``SILVERKITE_DAILY_1``. "
"Compared to ``SILVERKITE``, it uses parameters "
"specifically tuned for daily data and 1-day forecast.")
"""Config 3 in template ``SILVERKITE_DAILY_1``.
Compared to ``SILVERKITE``, it uses parameters
specifically tuned for daily data and 1-day forecast.
"""
SILVERKITE_DAILY_1 = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model specifically tuned for daily data and 1-day forecast. "
"Contains 3 candidate configs for grid search, "
"optimized the seasonality and changepoint parameters.")
"""Silverkite model specifically tuned for daily data and 1-day forecast.
Contains 3 candidate configs for grid search,
optimized the seasonality and changepoint parameters.
"""
SILVERKITE_DAILY_90 = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model specifically tuned for daily data with 90 days forecast horizon. "
"Contains 4 hyperparameter combinations for grid search. "
"Uses `SimpleSilverkiteEstimator`.")
"""Silverkite model specifically tuned for daily data with 90 days forecast horizon.
Contains 4 hyperparameter combinations for grid search.
Uses `SimpleSilverkiteEstimator`.
"""
SILVERKITE_WEEKLY = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model specifically tuned for weekly data. "
"Contains 4 hyperparameter combinations for grid search. "
"Uses `SimpleSilverkiteEstimator`.")
"""Silverkite model specifically tuned for weekly data.
Contains 4 hyperparameter combinations for grid search.
Uses `SimpleSilverkiteEstimator`.
"""
SILVERKITE_MONTHLY = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model specifically tuned for monthly data. "
"Uses `SimpleSilverkiteEstimator`.")
"""Silverkite model specifically tuned for monthly data.
Uses `SimpleSilverkiteEstimator`.
"""
SILVERKITE_HOURLY_1 = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model specifically tuned for hourly data with 1 hour forecast horizon. "
"Contains 3 hyperparameter combinations for grid search. "
"Uses `SimpleSilverkiteEstimator`.")
"""Silverkite model specifically tuned for hourly data with 1 hour forecast horizon.
Contains 4 hyperparameter combinations for grid search.
Uses `SimpleSilverkiteEstimator`."""
SILVERKITE_HOURLY_24 = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model specifically tuned for hourly data with 24 hours (1 day) forecast horizon. "
"Contains 4 hyperparameter combinations for grid search. "
"Uses `SimpleSilverkiteEstimator`.")
"""Silverkite model specifically tuned for hourly data with 24 hours (1 day) forecast horizon.
Contains 4 hyperparameter combinations for grid search.
Uses `SimpleSilverkiteEstimator`."""
SILVERKITE_HOURLY_168 = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model specifically tuned for hourly data with 168 hours (1 week) forecast horizon. "
"Contains 4 hyperparameter combinations for grid search. "
"Uses `SimpleSilverkiteEstimator`.")
"""Silverkite model specifically tuned for hourly data with 168 hours (1 week) forecast horizon.
Contains 4 hyperparameter combinations for grid search.
Uses `SimpleSilverkiteEstimator`."""
SILVERKITE_HOURLY_336 = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model specifically tuned for hourly data with 336 hours (2 weeks) forecast horizon. "
"Contains 4 hyperparameter combinations for grid search. "
"Uses `SimpleSilverkiteEstimator`.")
"""Silverkite model specifically tuned for hourly data with 336 hours (2 weeks) forecast horizon.
Contains 4 hyperparameter combinations for grid search.
Uses `SimpleSilverkiteEstimator`.
"""
SILVERKITE_EMPTY = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Silverkite model with no component included by default. Fits only a constant intercept. "
"Select and customize this template to add only the terms you want. "
"Uses `SimpleSilverkiteEstimator`.")
"""Silverkite model with no component included by default. Fits only a constant intercept.
Select and customize this template to add only the terms you want.
Uses `SimpleSilverkiteEstimator`.
"""
SK = ModelTemplate(
template_class=SilverkiteTemplate,
description="Silverkite model with low-level interface. For flexible model tuning "
"if SILVERKITE template is not flexible enough. Not for use out-of-the-box: "
"customization is needed for good performance. Uses `SilverkiteEstimator`.")
"""Silverkite model with low-level interface. For flexible model tuning
if SILVERKITE template is not flexible enough. Not for use out-of-the-box:
customization is needed for good performance. Uses `SilverkiteEstimator`.
"""
PROPHET = ModelTemplate(
template_class=ProphetTemplate,
description="Prophet model with growth, seasonality, holidays, additional regressors "
"and prediction intervals. Uses `ProphetEstimator`.")
"""Prophet model with growth, seasonality, holidays, additional regressors
and prediction intervals. Uses `ProphetEstimator`."""
AUTO_ARIMA = ModelTemplate(
template_class=AutoArimaTemplate,
description="Auto ARIMA model with fit and prediction intervals. "
"Uses `AutoArimaEstimator`.")
"""ARIMA model with automatic order selection. Uses `AutoArimaEstimator`."""
SILVERKITE_TWO_STAGE = ModelTemplate(
template_class=MultistageForecastTemplate,
description="MultistageForecastTemplate's default model template. A two-stage model. "
"The first step takes a longer history and learns the long-term effects, "
"while the second step takes a shorter history and learns the short-term residuals."
)
"""Multistage forecast model's default model template. A two-stage model. "
"The first step takes a longer history and learns the long-term effects, "
"while the second step takes a shorter history and learns the short-term residuals.
"""
MULTISTAGE_EMPTY = ModelTemplate(
template_class=MultistageForecastTemplate,
description="Empty configuration for Multistage Forecast. "
"All parameters will be exactly what user inputs. "
"Not to be used without overriding."""
)
"""Empty configuration for Multistage Forecast.
All parameters will be exactly what user inputs.
Not to be used without overriding.
"""
AUTO = ModelTemplate(
template_class=SimpleSilverkiteTemplate,
description="Automatically selects the SimpleSilverkite model template that corresponds to the forecast "
"problem. Selection is based on data frequency, forecast horizon, and CV configuration."
)
"""Automatically selects the SimpleSilverkite model template that corresponds to the forecast problem.
Selection is based on data frequency, forecast horizon, and CV configuration.
"""
LAG_BASED = ModelTemplate(
template_class=LagBasedTemplate,
description="Uses aggregated past observations as predictions. Examples are "
"past day, week-over-week, week-over-3-week median, etc."
)
"""Uses aggregated past observations as predictions. Examples are
past day, week-over-week, week-over-3-week median, etc.
"""
SILVERKITE_WOW = ModelTemplate(
template_class=MultistageForecastTemplate,
description="The Silverkite+WOW model uses Silverkite to model yearly/quarterly/monthly seasonality, "
"growth and holiday effects first, then uses week over week to estimate the residuals. "
"The final prediction is the total of the two models. "
"This avoids the normal week over week (WOW) estimation's weakness in capturing "
"growth and holidays."
)
"""The Silverkite+WOW model uses Silverkite to model yearly/quarterly/monthly seasonality,
growth and holiday effects first, then uses week over week to estimate the residuals.
The final prediction is the total of the two models.
This avoids the normal week over week (WOW) estimation's weakness in capturing growth and holidays.
"""
def summarize_grid_search_results(
grid_search,
only_changing_params=True,
combine_splits=True,
decimals=None,
score_func=EvaluationMetricEnum.MeanAbsolutePercentError.name,
score_func_greater_is_better=False,
cv_report_metrics=CV_REPORT_METRICS_ALL,
column_order=None):
"""Summarizes CV results for each grid search parameter combination.
While ``grid_search.cv_results_`` could be imported into
a `pandas.DataFrame` without this function, the following conveniences
are provided:
- returns the correct ranks based on each metric's greater_is_better direction.
- summarizes the hyperparameter space, only showing the parameters that change
- combines split scores into a tuple to save table width
- rounds the values to specified decimals
- orders columns by type (test score, train score, metric, etc.)
Parameters
----------
grid_search : `~sklearn.model_selection.RandomizedSearchCV`
Grid search output (fitted RandomizedSearchCV object).
only_changing_params : `bool`, default True
If True, only show parameters with multiple values in
the hyperparameter_grid.
combine_splits : `bool`, default True
Whether to report split scores as a tuple in a single column.
- If True, adds a column for the test splits scores for each
requested metric. Adds a column with train split scores if those
are available.
For example, "split_train_score" would contain the values
(split1_train_score, split2_train_score, split3_train_score)
as as tuple.
- If False, this summary column is not added.
The original split columns are available either way.
decimals : `int` or None, default None
Number of decimal places to round to.
If decimals is negative, it specifies the number of
positions to the left of the decimal point.
If None, does not round.
score_func : `str` or callable, default ``EvaluationMetricEnum.MeanAbsolutePercentError.name``
Score function used to select optimal model in CV.
If a callable, takes arrays ``y_true``, ``y_pred`` and returns a float.
If a string, must be either a
`~greykite.common.evaluation.EvaluationMetricEnum` member name
or `~greykite.common.constants.FRACTION_OUTSIDE_TOLERANCE`.
Used in this function to fix the ``"rank_test_score"`` column if
``score_func_greater_is_better=False``.
Should be the same as what was passed to
:py:meth:`~greykite.framework.templates.forecaster.Forecaster.run_forecast_config`,
or `~greykite.framework.pipeline.pipeline.forecast_pipeline`,
or `~greykite.framework.pipeline.utils.get_hyperparameter_searcher`.
score_func_greater_is_better : `bool`, default False
True if ``score_func`` is a score function, meaning higher is better,
and False if it is a loss function, meaning lower is better.
Must be provided if ``score_func`` is a callable (custom function).
Ignored if ``score_func`` is a string, because the direction is known.
Used in this function to fix the ``"rank_test_score"`` column if
``score_func_greater_is_better=False``.
Should be the same as what was passed to
:py:meth:`~greykite.framework.templates.forecaster.Forecaster.run_forecast_config`,
or `~greykite.framework.pipeline.pipeline.forecast_pipeline`,
or `~greykite.framework.pipeline.utils.get_hyperparameter_searcher`.
cv_report_metrics : `~greykite.framework.constants.CV_REPORT_METRICS_ALL`, or `list` [`str`], or None, default `~greykite.common.constants.CV_REPORT_METRICS_ALL` # noqa: E501
Additional metrics to show in the summary, besides the one specified by ``score_func``.
If a metric is specified but not available, a warning will be given.
Should be the same as what was passed to
:py:meth:`~greykite.framework.templates.forecaster.Forecaster.run_forecast_config`,
or `~greykite.framework.pipeline.pipeline.forecast_pipeline`,
or `~greykite.framework.pipeline.utils.get_hyperparameter_searcher`,
or a subset of computed metric to show.
If a list of strings, valid strings are
`greykite.common.evaluation.EvaluationMetricEnum` member names
and `~greykite.common.constants.FRACTION_OUTSIDE_TOLERANCE`.
column_order : `list` [`str`] or None, default None
How to order the columns.
A list of regex to order column names, in greedy fashion. Column names matching
the first item are placed first. Among remaining items, those matching the second
items are placed next, etc.
Use "*" as the last element to select all available columns, if desired.
If None, uses default ordering::
column_order = ["rank_test", "mean_test", "split_test", "mean_train",
"params", "param", "split_train", "time", ".*"]
Notes
-----
Metrics are named in ``grid_search.cv_results_`` according to the ``scoring``
parameter passed to `~sklearn.model_selection.RandomizedSearchCV`.
``"score"`` is the default used by sklearn for single metric
evaluation.
If a dictionary is provided to ``scoring``, as is the case through
templates, then the metrics are named by its keys, and the
metric used for selection is defined by ``refit``. The keys
are derived from ``score_func`` and ``cv_report_metrics``
in `~greykite.framework.pipeline.utils.get_scoring_and_refit`.
- The key for ``score_func`` if it is a callable is
`~greykite.common.constants.CUSTOM_SCORE_FUNC_NAME`.
- The key for ``EvaluationMetricEnum`` member name is the short name
from ``.get_metric_name()``.
- The key for `~greykite.common.constants.FRACTION_OUTSIDE_TOLERANCE`
is `~greykite.common.constants.FRACTION_OUTSIDE_TOLERANCE_NAME`.
Returns
-------
cv_results : `pandas.DataFrame`
A summary of cross-validation results in tabular format.
Each row corresponds to a set of parameters used in the grid search.
The columns have the following format, where name is the canonical short
name for the metric.
``"rank_test_{name}"`` : `int`
The params ranked by mean_test_score (1 is best).
``"mean_test_{name}"`` : `float`
Average test score.
``"split_test_{name}"`` : `list` [`float`]
Test score on each split. [split 0, split 1, ...]
``"std_test_{name}"`` : `float`
Standard deviation of test scores.
``"mean_train_{name}"`` : `float`
Average train score.
``"split_train_{name}"`` : `list` [`float`]
Train score on each split. [split 0, split 1, ...]
``"std_train_{name}"`` : `float`
Standard deviation of train scores.
``"mean_fit_time"`` : `float`
Average time to fit each CV split (in seconds)
``"std_fit_time"`` : `float`
Std of time to fit each CV split (in seconds)
``"mean_score_time"`` : `float`
Average time to score each CV split (in seconds)
``"std_score_time"`` : `float`
Std of time to score each CV split (in seconds)
``"params"`` : `dict`
The parameters used. If ``only_changing==True``,
only shows the parameters which are not identical
across all CV splits.
``"param_{pipeline__param__name}"`` : Any
The value of pipeline parameter `pipeline__param__name`
for each row.
"""
if column_order is None:
column_order = ["rank_test", "mean_test", "split_test", "mean_train", "params", "param", "split_train", "time", ".*"]
cv_results = grid_search.cv_results_.copy()
# Overwrites the params
selected_params = []
if only_changing_params:
# Removes keys that don't vary
keep_params = set()
seen_params = {}
for params in cv_results['params']:
for k, v in params.items():
if k in seen_params:
try:
assert_equal(v, seen_params[k])
except AssertionError:
# the values are different
keep_params.add(k)
else:
seen_params[k] = v
for params in cv_results['params']:
explore_params = [(k, v) for k, v in params.items() if k in keep_params]
selected_params.append(explore_params)
cv_results['params'] = selected_params
# Overwrites the ranks and computes combined split score columns
# for the requested metrics.
metric_list = [(score_func, score_func_greater_is_better, True)]
if cv_report_metrics == CV_REPORT_METRICS_ALL:
cv_report_metrics = EvaluationMetricEnum.__dict__["_member_names_"].copy()
# Computes `FRACTION_OUTSIDE_TOLERANCE` if `relative_error_tolerance` is specified
cv_report_metrics.append(FRACTION_OUTSIDE_TOLERANCE)
metric_list += [(metric, None, False) for metric in cv_report_metrics]
elif cv_report_metrics is not None:
# greater_is_better is derived from the metric name
metric_list += [(metric, None, True) for metric in cv_report_metrics]
keep_metrics = set()
for metric, greater_is_better, warn_metric in metric_list:
ranks_and_splits = get_ranks_and_splits(
grid_search=grid_search,
score_func=metric,
greater_is_better=greater_is_better,
combine_splits=combine_splits,
decimals=decimals,
warn_metric=warn_metric)
short_name = ranks_and_splits["short_name"]
if ranks_and_splits["ranks"] is not None:
cv_results[f"rank_test_{short_name}"] = ranks_and_splits["ranks"]
if ranks_and_splits["split_train"] is not None:
cv_results[f"split_train_{short_name}"] = ranks_and_splits["split_train"]
if ranks_and_splits["split_test"] is not None:
cv_results[f"split_test_{short_name}"] = ranks_and_splits["split_test"]
keep_metrics.add(short_name)
# Creates DataFrame and orders the columns.
# Dictionary keys are unordered, but appears to follow insertion order.
cv_results_df = pd.DataFrame(cv_results)
available_cols = list(cv_results_df.columns)
# Removes metrics not selected
all_metrics = set(col.replace("mean_test_", "") for col in cv_results.keys()
if re.search("mean_test_", col))
remove_metrics = all_metrics - keep_metrics
remove_regex = "|".join(remove_metrics)
if remove_regex:
available_cols = [col for col in available_cols
if not re.search(remove_regex, col)]
# Orders the columns
ordered_cols = []
for regex in column_order:
selected_cols = [col for col in available_cols
if col not in ordered_cols and re.search(regex, col)]
ordered_cols += selected_cols
cv_results_df = cv_results_df[ordered_cols]
if decimals is not None:
cv_results_df = cv_results_df.round(decimals)
return cv_results_df
The provided code snippet includes necessary dependencies for implementing the `fit_forecast` function. Write a Python function `def fit_forecast( df, time_col, value_col)` to solve the following problem:
Fits a daily model for this use case. The daily model is a generic silverkite model with regressors.
Here is the function:
def fit_forecast(
df,
time_col,
value_col):
"""Fits a daily model for this use case.
The daily model is a generic silverkite model with regressors."""
meta_data_params = MetadataParam(
time_col=time_col,
value_col=value_col,
freq="D",
)
# Autoregression to be used in the function
autoregression = {
"autoreg_dict": {
"lag_dict": {"orders": [1, 2, 3]},
"agg_lag_dict": {
"orders_list": [[7, 7*2, 7*3]],
"interval_list": [(1, 7), (8, 7*2)]},
"series_na_fill_func": lambda s: s.bfill().ffill()},
"fast_simulation": True
}
# Changepoints configuration
# The config includes changepoints both in trend and seasonality
changepoints = {
"changepoints_dict": {
"method": "auto",
"yearly_seasonality_order": 15,
"resample_freq": "2D",
"actual_changepoint_min_distance": "100D",
"potential_changepoint_distance": "50D",
"no_changepoint_distance_from_end": "50D"},
"seasonality_changepoints_dict": {
"method": "auto",
"yearly_seasonality_order": 15,
"resample_freq": "2D",
"actual_changepoint_min_distance": "100D",
"potential_changepoint_distance": "50D",
"no_changepoint_distance_from_end": "50D"}
}
regressor_cols = [
"regressor_tmin",
"regressor_bin_pn",
"regressor_bin_heavy_pn",
]
# Model parameters
model_components = ModelComponentsParam(
growth=dict(growth_term="linear"),
seasonality=dict(
yearly_seasonality=[15],
quarterly_seasonality=[False],
monthly_seasonality=[False],
weekly_seasonality=[7],
daily_seasonality=[False]
),
custom=dict(
fit_algorithm_dict=dict(fit_algorithm="ridge"),
extra_pred_cols=None,
normalize_method="statistical"
),
regressors=dict(regressor_cols=regressor_cols),
autoregression=autoregression,
uncertainty=dict(uncertainty_dict=None),
events=dict(holiday_lookup_countries=["US"]),
changepoints=changepoints
)
# Evaluation is done on same ``forecast_horizon`` as desired for output
evaluation_period_param = EvaluationPeriodParam(
test_horizon=None,
cv_horizon=forecast_horizon,
cv_min_train_periods=365*2,
cv_expanding_window=True,
cv_use_most_recent_splits=False,
cv_periods_between_splits=None,
cv_periods_between_train_test=0,
cv_max_splits=5,
)
# Runs the forecast model using "SILVERKITE" template
forecaster = Forecaster()
result = forecaster.run_forecast_config(
df=df,
config=ForecastConfig(
model_template=ModelTemplateEnum.SILVERKITE.name,
coverage=0.95,
forecast_horizon=forecast_horizon,
metadata_param=meta_data_params,
evaluation_period_param=evaluation_period_param,
model_components_param=model_components
)
)
# Gets cross-validation results
grid_search = result.grid_search
cv_results = summarize_grid_search_results(
grid_search=grid_search,
decimals=2,
cv_report_metrics=None)
cv_results = cv_results.transpose()
cv_results = pd.DataFrame(cv_results)
cv_results.columns = ["err_value"]
cv_results["err_name"] = cv_results.index
cv_results = cv_results.reset_index(drop=True)
cv_results = cv_results[["err_name", "err_value"]]
print(f"\n cv_results: \n {cv_results}")
return result | Fits a daily model for this use case. The daily model is a generic silverkite model with regressors. |
167,517 | import sys
from py12306.app import *
from py12306.helpers.cdn import Cdn
from py12306.log.common_log import CommonLog
from py12306.query.query import Query
from py12306.user.user import User
from py12306.web.web import Web
def test():
"""
功能检查
包含:
账号密码验证 (打码)
座位验证
乘客验证
语音验证码验证
通知验证
:return:
"""
Const.IS_TEST = True
Config.OUT_PUT_LOG_TO_FILE_ENABLED = False
if '--test-notification' in sys.argv or '-n' in sys.argv:
Const.IS_TEST_NOTIFICATION = True
pass
def load_argvs():
if '--test' in sys.argv or '-t' in sys.argv: test()
config_index = None
if '--config' in sys.argv: config_index = sys.argv.index('--config')
if '-c' in sys.argv: config_index = sys.argv.index('-c')
if config_index:
Config.CONFIG_FILE = sys.argv[config_index + 1:config_index + 2].pop() | null |
167,518 | import signal
import sys
from py12306.helpers.func import *
from py12306.config import Config
from py12306.helpers.notification import Notification
from py12306.log.common_log import CommonLog
from py12306.log.order_log import OrderLog
Config:
IS_DEBUG = False
# 查询任务
# 查询间隔
# 查询重试次数
# 用户心跳检测间隔
# 多线程查询
# 打码平台账号
#用户打码平台地址
# 输出日志到文件
# Query
# 语音验证码
# 集群配置
# 钉钉配置
# Telegram推送配置
# Bark 推送配置
# ServerChan和PushBear配置
# 邮箱配置
# CDN
# Default time out
# @classmethod
# def keep_work(cls):
# self = cls()
class CommonLog(BaseLog):
# 这里如果不声明,会出现重复打印,目前不知道什么原因
logs = []
thread_logs = {}
quick_log = []
MESSAGE_12306_IS_CLOSED = '当前时间: {} | 12306 休息时间,程序将在明天早上 6 点自动运行'
MESSAGE_RETRY_AUTH_CODE = '{} 秒后重新获取验证码'
MESSAGE_EMPTY_APP_CODE = '无法发送语音消息,未填写验证码接口 appcode'
MESSAGE_VOICE_API_FORBID = '语音消息发送失败,请检查 appcode 是否填写正确或 套餐余额是否充足'
MESSAGE_VOICE_API_SEND_FAIL = '语音消息发送失败,错误原因 {}'
MESSAGE_VOICE_API_SEND_SUCCESS = '语音消息发送成功! 接口返回信息 {} '
MESSAGE_CHECK_AUTO_CODE_FAIL = '请配置打码账号的账号密码'
MESSAGE_CHECK_EMPTY_USER_ACCOUNT = '请配置 12306 账号密码'
MESSAGE_TEST_SEND_VOICE_CODE = '正在测试发送语音验证码...'
MESSAGE_TEST_SEND_EMAIL = '正在测试发送邮件...'
MESSAGE_TEST_SEND_DINGTALK = '正在测试发送钉钉消息...'
MESSAGE_TEST_SEND_TELEGRAM = '正在测试推送到Telegram...'
MESSAGE_TEST_SEND_SERVER_CHAN = '正在测试发送ServerChan消息...'
MESSAGE_TEST_SEND_PUSH_BEAR = '正在测试发送PushBear消息...'
MESSAGE_TEST_SEND_PUSH_BARK = '正在测试发送Bark消息...'
MESSAGE_CONFIG_FILE_DID_CHANGED = '配置文件已修改,正在重新加载中\n'
MESSAGE_API_RESPONSE_CAN_NOT_BE_HANDLE = '接口返回错误'
MESSAGE_SEND_EMAIL_SUCCESS = '邮件发送成功,请检查收件箱'
MESSAGE_SEND_EMAIL_FAIL = '邮件发送失败,请手动检查配置,错误原因 {}'
MESSAGE_SEND_EMAIL_WITH_QRCODE_SUCCESS = '二维码邮件发送成功,请检查收件箱扫描登陆'
MESSAGE_SEND_TELEGRAM_SUCCESS = 'Telegram推送成功'
MESSAGE_SEND_TELEGRAM_FAIL = 'Telegram推送失败,错误原因 {}'
MESSAGE_SEND_SERVER_CHAN_SUCCESS = '发送成功,请检查微信'
MESSAGE_SEND_SERVER_CHAN_FAIL = 'ServerChan发送失败,请检查KEY'
MESSAGE_SEND_PUSH_BEAR_SUCCESS = '发送成功,请检查微信'
MESSAGE_SEND_PUSH_BEAR_FAIL = 'PushBear发送失败,请检查KEY'
MESSAGE_SEND_BARK_SUCCESS = 'Bark推送成功'
MESSAGE_SEND_BARK_FAIL = 'Bark推送失败,错误原因 {}'
MESSAGE_OUTPUT_TO_FILE_IS_UN_ENABLE = '请先打开配置项中的:OUT_PUT_LOG_TO_FILE_ENABLED ( 输出到文件 )'
MESSAGE_GET_RESPONSE_FROM_FREE_AUTO_CODE = '从免费打码获取结果失败'
MESSAGE_RESPONSE_EMPTY_ERROR = '网络错误'
MESSAGE_CDN_START_TO_CHECK = '正在筛选 {} 个 CDN...'
MESSAGE_CDN_START_TO_RECHECK = '正在重新筛选 {} 个 CDN...当前时间 {}\n'
MESSAGE_CDN_RESTORE_SUCCESS = 'CDN 恢复成功,上次检测 {}\n'
MESSAGE_CDN_CHECKED_SUCCESS = '# CDN 检测完成,可用 CDN {} #\n'
MESSAGE_CDN_CLOSED = '# CDN 已关闭 #'
def __init__(self):
super().__init__()
self.init_data()
def init_data(self):
pass
def print_welcome(cls):
self = cls()
self.add_quick_log('######## py12306 购票助手,本程序为开源工具,请勿用于商业用途 ########')
if Const.IS_TEST:
self.add_quick_log()
self.add_quick_log('当前为测试模式,程序运行完成后自动结束')
if not Const.IS_TEST and Config().OUT_PUT_LOG_TO_FILE_ENABLED:
self.add_quick_log()
self.add_quick_log('日志已输出到文件中: {}'.format(Config().OUT_PUT_LOG_TO_FILE_PATH))
if Config().WEB_ENABLE:
self.add_quick_log()
self.add_quick_log('WEB 管理页面已开启,请访问 主机地址 + 端口 {} 进行查看'.format(Config().WEB_PORT))
self.add_quick_log()
self.flush(file=False, publish=False)
return self
def print_configs(cls):
# 打印配置
self = cls()
enable = '已开启'
disable = '未开启'
self.add_quick_log('**** 当前配置 ****')
self.add_quick_log('多线程查询: {}'.format(get_true_false_text(Config().QUERY_JOB_THREAD_ENABLED, enable, disable)))
self.add_quick_log('CDN 状态: {}'.format(get_true_false_text(Config().CDN_ENABLED, enable, disable))).flush()
self.add_quick_log('通知状态:')
if Config().NOTIFICATION_BY_VOICE_CODE:
self.add_quick_log(
'语音验证码: {}'.format(get_true_false_text(Config().NOTIFICATION_BY_VOICE_CODE, enable, disable)))
if Config().EMAIL_ENABLED:
self.add_quick_log('邮件通知: {}'.format(get_true_false_text(Config().EMAIL_ENABLED, enable, disable)))
if Config().DINGTALK_ENABLED:
self.add_quick_log('钉钉通知: {}'.format(get_true_false_text(Config().DINGTALK_ENABLED, enable, disable)))
if Config().TELEGRAM_ENABLED:
self.add_quick_log('Telegram通知: {}'.format(get_true_false_text(Config().TELEGRAM_ENABLED, enable, disable)))
if Config().SERVERCHAN_ENABLED:
self.add_quick_log(
'ServerChan通知: {}'.format(get_true_false_text(Config().SERVERCHAN_ENABLED, enable, disable)))
if Config().BARK_ENABLED:
self.add_quick_log('Bark通知: {}'.format(get_true_false_text(Config().BARK_ENABLED, enable, disable)))
if Config().PUSHBEAR_ENABLED:
self.add_quick_log(
'PushBear通知: {}'.format(get_true_false_text(Config().PUSHBEAR_ENABLED, enable, disable)))
self.add_quick_log().flush(sep='\t\t')
self.add_quick_log('查询间隔: {} 秒'.format(Config().QUERY_INTERVAL))
self.add_quick_log('用户心跳检测间隔: {} 秒'.format(Config().USER_HEARTBEAT_INTERVAL))
self.add_quick_log('WEB 管理页面: {}'.format(get_true_false_text(Config().WEB_ENABLE, enable, disable)))
if Config().is_cluster_enabled():
from py12306.cluster.cluster import Cluster
self.add_quick_log('分布式查询: {}'.format(get_true_false_text(Config().is_cluster_enabled(), enable, enable)))
self.add_quick_log('节点名称: {}'.format(Cluster().node_name))
self.add_quick_log('节点是否主节点: {}'.format(get_true_false_text(Config().is_master(), '是', '否')))
self.add_quick_log(
'子节点提升为主节点: {}'.format(get_true_false_text(Config().NODE_SLAVE_CAN_BE_MASTER, enable, disable)))
self.add_quick_log()
self.flush()
return self
def print_test_complete(cls):
self = cls()
self.add_quick_log('# 测试完成,请检查输出是否正确 #')
self.flush(publish=False)
return self
def print_auto_code_fail(cls, reason):
self = cls()
self.add_quick_log('打码失败: 错误原因 {reason}'.format(reason=reason))
self.flush()
return self
def print_auth_code_info(cls, reason):
self = cls()
self.add_quick_log('打码信息: {reason}'.format(reason=reason))
self.flush()
return self
def app_available_check():
if Config().IS_DEBUG:
return True
now = time_now()
if now.weekday() == 1 and (now.hour > 23 and now.minute > 30 or now.hour < 5):
CommonLog.add_quick_log(CommonLog.MESSAGE_12306_IS_CLOSED.format(time_now())).flush()
open_time = datetime.datetime(now.year, now.month, now.day, 5)
if open_time < now:
open_time += datetime.timedelta(1)
sleep((open_time - now).seconds)
elif 1 < now.hour < 5:
CommonLog.add_quick_log(CommonLog.MESSAGE_12306_IS_CLOSED.format(time_now())).flush()
open_time = datetime.datetime(now.year, now.month, now.day, 5)
sleep((open_time - now).seconds)
return True | null |
167,519 | from flask import Blueprint, request
from flask.json import jsonify
from flask_jwt_extended import (jwt_required)
from py12306.config import Config
from py12306.query.query import Query
from py12306.user.user import User
Config:
IS_DEBUG = False
# 查询任务
# 查询间隔
# 查询重试次数
# 用户心跳检测间隔
# 多线程查询
# 打码平台账号
#用户打码平台地址
# 输出日志到文件
# Query
# 语音验证码
# 集群配置
# 钉钉配置
# Telegram推送配置
# Bark 推送配置
# ServerChan和PushBear配置
# 邮箱配置
# CDN
# Default time out
# @classmethod
# def keep_work(cls):
# self = cls()
class Query:
"""
余票查询
"""
jobs = []
query_jobs = []
session = {}
# 查询间隔
interval = {}
cluster = None
is_in_thread = False
retry_time = 3
is_ready = False
api_type = None # Query api url, Current know value leftTicket/queryX | leftTicket/queryZ
def __init__(self):
self.session = Request()
self.request_device_id()
self.cluster = Cluster()
self.update_query_interval()
self.update_query_jobs()
self.get_query_api_type()
def update_query_interval(self, auto=False):
self.interval = init_interval_by_number(Config().QUERY_INTERVAL)
if auto:
jobs_do(self.jobs, 'update_interval')
def update_query_jobs(self, auto=False):
self.query_jobs = Config().QUERY_JOBS
if auto:
QueryLog.add_quick_log(QueryLog.MESSAGE_JOBS_DID_CHANGED).flush()
self.refresh_jobs()
if not Config().is_slave():
jobs_do(self.jobs, 'check_passengers')
def run(cls):
self = cls()
app_available_check()
self.start()
pass
def check_before_run(cls):
self = cls()
self.init_jobs()
self.is_ready = True
def start(self):
# return # DEBUG
QueryLog.init_data()
stay_second(3)
# 多线程
while True:
if Config().QUERY_JOB_THREAD_ENABLED: # 多线程
if not self.is_in_thread:
self.is_in_thread = True
create_thread_and_run(jobs=self.jobs, callback_name='run', wait=Const.IS_TEST)
if Const.IS_TEST: return
stay_second(self.retry_time)
else:
if not self.jobs: break
self.is_in_thread = False
jobs_do(self.jobs, 'run')
if Const.IS_TEST: return
# while True:
# app_available_check()
# if Config().QUERY_JOB_THREAD_ENABLED: # 多线程
# create_thread_and_run(jobs=self.jobs, callback_name='run')
# else:
# for job in self.jobs: job.run()
# if Const.IS_TEST: return
# self.refresh_jobs() # 刷新任务
def refresh_jobs(self):
"""
更新任务
:return:
"""
allow_jobs = []
for job in self.query_jobs:
id = md5(job)
job_ins = objects_find_object_by_key_value(self.jobs, 'id', id) # [1 ,2]
if not job_ins:
job_ins = self.init_job(job)
if Config().QUERY_JOB_THREAD_ENABLED: # 多线程重新添加
create_thread_and_run(jobs=job_ins, callback_name='run', wait=Const.IS_TEST)
allow_jobs.append(job_ins)
for job in self.jobs: # 退出已删除 Job
if job not in allow_jobs: job.destroy()
QueryLog.print_init_jobs(jobs=self.jobs)
def init_jobs(self):
for job in self.query_jobs:
self.init_job(job)
QueryLog.print_init_jobs(jobs=self.jobs)
def init_job(self, job):
job = Job(info=job, query=self)
self.jobs.append(job)
return job
def request_device_id(self, force_renew = False):
"""
获取加密后的浏览器特征 ID
:return:
"""
expire_time = self.session.cookies.get('RAIL_EXPIRATION')
if not force_renew and expire_time and int(expire_time) - time_int_ms() > 0:
return
if 'pjialin' not in API_GET_BROWSER_DEVICE_ID:
return self.request_device_id2()
response = self.session.get(API_GET_BROWSER_DEVICE_ID)
if response.status_code == 200:
try:
result = json.loads(response.text)
response = self.session.get(b64decode(result['id']).decode())
if response.text.find('callbackFunction') >= 0:
result = response.text[18:-2]
result = json.loads(result)
if not Config().is_cache_rail_id_enabled():
self.session.cookies.update({
'RAIL_EXPIRATION': result.get('exp'),
'RAIL_DEVICEID': result.get('dfp'),
})
else:
self.session.cookies.update({
'RAIL_EXPIRATION': Config().RAIL_EXPIRATION,
'RAIL_DEVICEID': Config().RAIL_DEVICEID,
})
except:
return self.request_device_id()
else:
return self.request_device_id()
def request_device_id2(self):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36"
}
self.session.headers.update(headers)
response = self.session.get(API_GET_BROWSER_DEVICE_ID)
if response.status_code == 200:
try:
if response.text.find('callbackFunction') >= 0:
result = response.text[18:-2]
result = json.loads(result)
if not Config().is_cache_rail_id_enabled():
self.session.cookies.update({
'RAIL_EXPIRATION': result.get('exp'),
'RAIL_DEVICEID': result.get('dfp'),
})
else:
self.session.cookies.update({
'RAIL_EXPIRATION': Config().RAIL_EXPIRATION,
'RAIL_DEVICEID': Config().RAIL_DEVICEID,
})
except:
return self.request_device_id2()
else:
return self.request_device_id2()
def wait_for_ready(cls):
self = cls()
if self.is_ready: return self
stay_second(self.retry_time)
return self.wait_for_ready()
def job_by_name(cls, name) -> Job:
self = cls()
for job in self.jobs:
if job.job_name == name: return job
return None
def job_by_name(cls, name) -> Job:
self = cls()
return objects_find_object_by_key_value(self.jobs, 'job_name', name)
def job_by_account_key(cls, account_key) -> Job:
self = cls()
return objects_find_object_by_key_value(self.jobs, 'account_key', account_key)
def get_query_api_type(cls):
import re
self = cls()
if self.api_type:
return self.api_type
response = self.session.get(API_QUERY_INIT_PAGE)
if response.status_code == 200:
res = re.search(r'var CLeftTicketUrl = \'(.*)\';', response.text)
try:
self.api_type = res.group(1)
except Exception:
pass
if not self.api_type:
QueryLog.add_quick_log('查询地址获取失败, 正在重新获取...').flush()
sleep(get_interval_num(self.interval))
self.request_device_id(True)
return cls.get_query_api_type()
class User:
users = []
user_accounts = []
retry_time = 3
cluster = None
def __init__(self):
self.cluster = Cluster()
self.update_interval()
self.update_user_accounts()
def update_user_accounts(self, auto=False, old=None):
self.user_accounts = Config().USER_ACCOUNTS
if auto:
UserLog.add_quick_log(UserLog.MESSAGE_USERS_DID_CHANGED).flush()
self.refresh_users(old)
def update_interval(self, auto=False):
self.interval = Config().USER_HEARTBEAT_INTERVAL
if auto: jobs_do(self.users, 'update_user')
def run(cls):
self = cls()
# app_available_check() 用户系统不休息
self.start()
pass
def start(self):
self.init_users()
UserLog.print_init_users(users=self.users)
# 多线程维护用户
create_thread_and_run(jobs=self.users, callback_name='run', wait=Const.IS_TEST)
def init_users(self):
for account in self.user_accounts:
self.init_user(account)
def init_user(self, info):
user = UserJob(info=info)
self.users.append(user)
return user
def refresh_users(self, old):
for account in self.user_accounts:
key = account.get('key')
old_account = array_dict_find_by_key_value(old, 'key', key)
if old_account and account != old_account:
user = self.get_user(key)
user.init_data(account)
elif not old_account: # 新用户 添加到 多线程
new_user = self.init_user(account)
create_thread_and_run(jobs=new_user, callback_name='run', wait=Const.IS_TEST)
for account in old: # 退出已删除的用户
if not array_dict_find_by_key_value(self.user_accounts, 'key', account.get('key')):
Event().user_job_destroy({'key': account.get('key')})
def is_empty(cls):
self = cls()
return not bool(self.users)
def get_user(cls, key) -> UserJob:
self = cls()
for user in self.users:
if user.key == key: return user
return None
def get_passenger_for_members(cls, members, key):
"""
检测乘客信息
:param passengers
:return:
"""
self = cls()
for user in self.users:
assert isinstance(user, UserJob)
if user.key == key and user.wait_for_ready():
return user.get_passengers_by_members(members)
class QueryLog(BaseLog):
# 这里如果不声明,会出现重复打印,目前不知道什么原因
logs = []
thread_logs = {}
quick_log = []
data = {
'query_count': 0,
'last_time': '',
}
data_path = None
LOG_INIT_JOBS = ''
MESSAGE_GIVE_UP_CHANCE_CAUSE_TICKET_NUM_LESS_THAN_SPECIFIED = '余票数小于乘车人数,放弃此次提交机会'
MESSAGE_QUERY_LOG_OF_EVERY_TRAIN = '{}'
MESSAGE_QUERY_LOG_OF_TRAIN_INFO = '{} {}'
MESSAGE_QUERY_START_BY_DATE = '出发日期 {}: {} - {}'
MESSAGE_JOBS_DID_CHANGED = '任务已更新,正在重新加载...\n'
MESSAGE_SKIP_ORDER = '跳过本次请求,节点 {} 用户 {} 正在处理该订单\n'
MESSAGE_QUERY_JOB_BEING_DESTROY = '查询任务 {} 已结束\n'
MESSAGE_INIT_PASSENGERS_SUCCESS = '初始化乘客成功'
MESSAGE_CHECK_PASSENGERS = '查询任务 {} 正在验证乘客信息'
MESSAGE_USER_IS_EMPTY_WHEN_DO_ORDER = '未配置自动下单账号,{} 秒后继续查询\n'
MESSAGE_ORDER_USER_IS_EMPTY = '未找到下单账号,{} 秒后继续查询'
cluster = None
def __init__(self):
super().__init__()
self.data_path = Config().QUERY_DATA_DIR + 'status.json'
self.cluster = Cluster()
def init_data(cls):
self = cls()
# 获取上次记录
result = False
if not Config.is_cluster_enabled() and path.exists(self.data_path):
with open(self.data_path, encoding='utf-8') as f:
result = f.read()
try:
result = json.loads(result)
except json.JSONDecodeError as e:
result = {}
# self.add_quick_log('加载status.json失败, 文件内容为: {}.'.format(repr(result)))
# self.flush() # 这里可以用不用提示
if Config.is_cluster_enabled():
result = self.get_data_from_cluster()
if result:
self.data = {**self.data, **result}
self.print_data_restored()
def get_data_from_cluster(self):
query_count = self.cluster.session.get(Cluster.KEY_QUERY_COUNT, 0)
last_time = self.cluster.session.get(Cluster.KEY_QUERY_LAST_TIME, '')
if query_count and last_time:
return {'query_count': query_count, 'last_time': last_time}
return False
def refresh_data_of_cluster(self):
return {
'query_count': self.cluster.session.incr(Cluster.KEY_QUERY_COUNT),
'last_time': self.cluster.session.set(Cluster.KEY_QUERY_LAST_TIME, time_now()),
}
def print_init_jobs(cls, jobs):
"""
输出初始化信息
:return:
"""
self = cls()
self.add_log('# 发现 {} 个任务 #'.format(len(jobs)))
index = 1
for job in jobs:
self.add_log('================== 任务 {} =================='.format(index))
for station in job.stations:
self.add_log('出发站:{} 到达站:{}'.format(station.get('left'), station.get('arrive')))
self.add_log('乘车日期:{}'.format(job.left_dates))
self.add_log('坐席:{}'.format(','.join(job.allow_seats)))
self.add_log('乘车人:{}'.format(','.join(job.members)))
if job.except_train_numbers:
train_number_message = '排除 ' + ','.join(job.allow_train_numbers)
else:
train_number_message = ','.join(job.allow_train_numbers if job.allow_train_numbers else ['不筛选'])
self.add_log('筛选车次:{}'.format(train_number_message))
self.add_log('任务名称:{}'.format(job.job_name))
# 乘车日期:['2019-01-24', '2019-01-25', '2019-01-26', '2019-01-27']
self.add_log('')
index += 1
self.flush()
return self
def print_ticket_num_less_than_specified(cls, rest_num, job):
self = cls()
self.add_quick_log(
'余票数小于乘车人数,当前余票数: {rest_num}, 实际人数 {actual_num}, 删减人车人数到: {take_num}'.format(rest_num=rest_num,
actual_num=job.member_num,
take_num=job.member_num_take))
self.flush()
return self
def print_ticket_seat_available(cls, left_date, train_number, seat_type, rest_num):
self = cls()
self.add_quick_log(
'[ 查询到座位可用 出发时间 {left_date} 车次 {train_number} 座位类型 {seat_type} 余票数量 {rest_num} ]'.format(
left_date=left_date,
train_number=train_number,
seat_type=seat_type,
rest_num=rest_num))
self.flush()
return self
def print_ticket_available(cls, left_date, train_number, rest_num):
self = cls()
self.add_quick_log('检查完成 开始提交订单 '.format())
self.notification('查询到可用车票', '时间 {left_date} 车次 {train_number} 余票数量 {rest_num}'.format(left_date=left_date,
train_number=train_number,
rest_num=rest_num))
self.flush()
return self
def print_query_error(cls, reason, code=None):
self = cls()
self.add_quick_log('查询余票请求失败')
if code:
self.add_quick_log('状态码 {} '.format(code))
if reason:
self.add_quick_log('错误原因 {} '.format(reason))
self.flush(sep='\t')
return self
def print_job_start(cls, job_name):
self = cls()
message = '>> 第 {query_count} 次查询 {job_name} {time}'.format(
query_count=int(self.data.get('query_count', 0)) + 1,
job_name=job_name, time=time_now().strftime("%Y-%m-%d %H:%M:%S"))
self.add_log(message)
self.refresh_data()
if is_main_thread():
self.flush(publish=False)
return self
def add_query_time_log(cls, time, is_cdn):
return cls().add_log(('*' if is_cdn else '') + '耗时 %.2f' % time)
def add_stay_log(cls, second):
self = cls()
self.add_log('停留 {}'.format(second))
return self
def print_data_restored(self):
self.add_quick_log('============================================================')
self.add_quick_log('|=== 查询记录恢复成功 上次查询 {last_date} ===|'.format(last_date=self.data.get('last_time')))
self.add_quick_log('============================================================')
self.add_quick_log('')
self.flush(publish=False)
return self
def refresh_data(self):
if Config.is_cluster_enabled():
self.data = {**self.data, **self.refresh_data_of_cluster()}
else:
self.data['query_count'] += 1
self.data['last_time'] = str(datetime.datetime.now())
self.save_data()
def save_data(self):
with open(self.data_path, 'w') as file:
file.write(json.dumps(self.data))
class Cdn:
"""
CDN 管理
"""
items = []
available_items = []
unavailable_items = []
recheck_available_items = []
recheck_unavailable_items = []
retry_time = 3
is_ready = False
is_finished = False
is_ready_num = 10 # 当可用超过 10,已准备好
is_alive = True
is_recheck = False
safe_stay_time = 0.2
retry_num = 1
thread_num = 5
check_time_out = 3
last_check_at = 0
save_second = 5
check_keep_second = 60 * 60 * 24
def __init__(self):
self.cluster = Cluster()
self.init_config()
create_thread_and_run(self, 'watch_cdn', False)
def init_data(self):
self.items = []
self.available_items = []
self.unavailable_items = []
self.is_finished = False
self.is_ready = False
self.is_recheck = False
def init_config(self):
self.check_time_out = Config().CDN_CHECK_TIME_OUT
def update_cdn_status(self, auto=False):
if auto:
self.init_config()
if Config().is_cdn_enabled():
self.run()
else:
self.destroy()
def run(cls):
self = cls()
app_available_check()
self.is_alive = True
self.start()
pass
def start(self):
if not Config.is_cdn_enabled(): return
self.load_items()
CommonLog.add_quick_log(CommonLog.MESSAGE_CDN_START_TO_CHECK.format(len(self.items))).flush()
self.restore_items()
for i in range(self.thread_num): # 多线程
create_thread_and_run(jobs=self, callback_name='check_available', wait=False)
def load_items(self):
with open(Config().CDN_ITEM_FILE, encoding='utf-8') as f:
for line, val in enumerate(f):
self.items.append(val.rstrip('\n'))
def restore_items(self):
"""
恢复已有数据
:return: bool
"""
result = False
if path.exists(Config().CDN_ENABLED_AVAILABLE_ITEM_FILE):
with open(Config().CDN_ENABLED_AVAILABLE_ITEM_FILE, encoding='utf-8') as f:
result = f.read()
try:
result = json.loads(result)
except json.JSONDecodeError as e:
result = {}
# if Config.is_cluster_enabled(): # 集群不用同步 cdn
# result = self.get_data_from_cluster()
if result:
self.last_check_at = result.get('last_check_at', '')
if self.last_check_at: self.last_check_at = str_to_time(self.last_check_at)
self.available_items = result.get('items', [])
self.unavailable_items = result.get('fail_items', [])
CommonLog.add_quick_log(CommonLog.MESSAGE_CDN_RESTORE_SUCCESS.format(self.last_check_at)).flush()
return True
return False
# def get_data_from_cluster(self):
# available_items = self.cluster.session.smembers(Cluster.KEY_CDN_AVAILABLE_ITEMS)
# last_time = self.cluster.session.get(Cluster.KEY_CDN_LAST_CHECK_AT, '')
# if available_items and last_time:
# return {'items': available_items, 'last_check_at': last_time}
# return False
def is_need_to_recheck(self):
"""
是否需要重新检查 cdn
:return:
"""
if self.last_check_at and (
time_now() - self.last_check_at).seconds > self.check_keep_second:
return True
return False
def get_unchecked_item(self):
if not self.is_recheck:
items = list(set(self.items) - set(self.available_items) - set(self.unavailable_items))
else:
items = list(set(self.items) - set(self.recheck_available_items) - set(self.recheck_unavailable_items))
if items: return random.choice(items)
return None
def check_available(self):
while True and self.is_alive:
item = self.get_unchecked_item()
if not item: return self.check_did_finished()
self.check_item_available(item)
def watch_cdn(self):
"""
监控 cdn 状态,自动重新检测
:return:
"""
while True:
if self.is_alive and not self.is_recheck and self.is_need_to_recheck(): # 重新检测
self.is_recheck = True
self.is_finished = False
CommonLog.add_quick_log(
CommonLog.MESSAGE_CDN_START_TO_RECHECK.format(len(self.items), time_now())).flush()
for i in range(self.thread_num): # 多线程
create_thread_and_run(jobs=self, callback_name='check_available', wait=False)
stay_second(self.retry_num)
def destroy(self):
"""
关闭 CDN
:return:
"""
CommonLog.add_quick_log(CommonLog.MESSAGE_CDN_CLOSED).flush()
self.is_alive = False
self.init_data()
def check_item_available(self, item, try_num=0):
session = Request()
response = session.get(API_CHECK_CDN_AVAILABLE.format(item), headers={'Host': HOST_URL_OF_12306},
timeout=self.check_time_out,
verify=False)
if response.status_code == 200:
if not self.is_recheck:
self.available_items.append(item)
else:
self.recheck_available_items.append(item)
if not self.is_ready: self.check_is_ready()
elif try_num < self.retry_num: # 重试
stay_second(self.safe_stay_time)
return self.check_item_available(item, try_num + 1)
else:
if not self.is_recheck:
self.unavailable_items.append(item)
else:
self.recheck_unavailable_items.append(item)
if not self.is_recheck and (
not self.last_check_at or (time_now() - self.last_check_at).seconds > self.save_second):
self.save_available_items()
stay_second(self.safe_stay_time)
def check_did_finished(self):
self.is_ready = True
if not self.is_finished:
self.is_finished = True
if self.is_recheck:
self.is_recheck = False
self.available_items = self.recheck_available_items
self.unavailable_items = self.recheck_unavailable_items
self.recheck_available_items = []
self.recheck_unavailable_items = []
CommonLog.add_quick_log(CommonLog.MESSAGE_CDN_CHECKED_SUCCESS.format(len(self.available_items))).flush()
self.save_available_items()
def save_available_items(self):
self.last_check_at = time_now()
data = {'items': self.available_items, 'fail_items': self.unavailable_items,
'last_check_at': str(self.last_check_at)}
with open(Config().CDN_ENABLED_AVAILABLE_ITEM_FILE, 'w') as f:
f.write(json.dumps(data))
# if Config.is_master():
# self.cluster.session.sadd(Cluster.KEY_CDN_AVAILABLE_ITEMS, self.available_items)
# self.cluster.session.set(Cluster.KEY_CDN_LAST_CHECK_AT, time_now())
def check_is_ready(self):
if len(self.available_items) > self.is_ready_num:
self.is_ready = True
else:
self.is_ready = False
def get_cdn(cls):
self = cls()
if self.is_ready and self.available_items:
return random.choice(self.available_items)
return None
The provided code snippet includes necessary dependencies for implementing the `dashboard` function. Write a Python function `def dashboard()` to solve the following problem:
状态统计 任务数量,用户数量,查询次数 节点信息(TODO) :return:
Here is the function:
def dashboard():
"""
状态统计
任务数量,用户数量,查询次数
节点信息(TODO)
:return:
"""
from py12306.log.query_log import QueryLog
query_job_count = len(Query().jobs)
user_job_count = len(User().users)
query_count = QueryLog().data.get('query_count')
res = {
'query_job_count': query_job_count,
'user_job_count': user_job_count,
'query_count': query_count,
}
if Config().CDN_ENABLED:
from py12306.helpers.cdn import Cdn
res['cdn_count'] = len(Cdn().available_items)
return jsonify(res) | 状态统计 任务数量,用户数量,查询次数 节点信息(TODO) :return: |
167,520 | from flask import Blueprint, request
from flask.json import jsonify
from flask_jwt_extended import (jwt_required)
from py12306.config import Config
from py12306.query.query import Query
from py12306.user.user import User
class Cluster():
KEY_PREFIX = 'py12306_' # 目前只能手动
KEY_QUERY_COUNT = KEY_PREFIX + 'query_count'
KEY_QUERY_LAST_TIME = KEY_PREFIX + 'query_last_time'
KEY_CONFIGS = KEY_PREFIX + 'configs'
KEY_NODES = KEY_PREFIX + 'nodes'
KEY_CHANNEL_LOG = KEY_PREFIX + 'channel_log'
KEY_CHANNEL_EVENT = KEY_PREFIX + 'channel_even'
KEY_USER_COOKIES = KEY_PREFIX + 'user_cookies'
KEY_USER_INFOS = KEY_PREFIX + 'user_infos'
KEY_USER_LAST_HEARTBEAT = KEY_PREFIX + 'user_last_heartbeat'
KEY_NODES_ALIVE_PREFIX = KEY_PREFIX + 'nodes_alive_'
KEY_CDN_AVAILABLE_ITEMS = KEY_PREFIX + 'cdn_available_items'
KEY_CDN_LAST_CHECK_AT = KEY_PREFIX + 'cdn_last_check_at'
# 锁
KEY_LOCK_INIT_USER = KEY_PREFIX + 'lock_init_user' # 暂未使用
KEY_LOCK_DO_ORDER = KEY_PREFIX + 'lock_do_order' # 订单锁
lock_do_order_time = 60 * 1 # 订单锁超时时间
lock_prefix = KEY_PREFIX + 'lock_' # 锁键前缀
lock_info_prefix = KEY_PREFIX + 'info_'
KEY_MASTER = 1
KEY_SLAVE = 0
session: Redis = None
pubsub: PubSub = None
refresh_channel_time = 0.5
retry_time = 2
keep_alive_time = 3 # 报告存活间隔
lost_alive_time = keep_alive_time * 2
nodes = {}
node_name = None
is_ready = False
is_master = False
def __init__(self, *args):
if Config.is_cluster_enabled():
self.session = Redis()
return self
def run(cls):
self = cls()
self.start()
def start(self):
self.pubsub = self.session.pubsub()
self.pubsub.subscribe(self.KEY_CHANNEL_LOG, self.KEY_CHANNEL_EVENT)
create_thread_and_run(self, 'subscribe', wait=False)
self.is_ready = True
self.get_nodes() # 提前获取节点列表
self.check_nodes() # 防止 节点列表未清空
self.join_cluster()
create_thread_and_run(self, 'keep_alive', wait=False)
create_thread_and_run(self, 'refresh_data', wait=False)
def join_cluster(self):
"""
加入到集群
:return:
"""
self.node_name = node_name = Config().NODE_NAME
if Config().NODE_IS_MASTER:
if self.node_name in self.nodes: # 重复运行主节点
ClusterLog.add_quick_log(ClusterLog.MESSAGE_MASTER_NODE_ALREADY_RUN.format(node_name)).flush(
publish=False)
os._exit(1)
if self.have_master(): # 子节点提升为主节点情况,交回控制
message = ClusterLog.MESSAGE_NODE_BECOME_MASTER_AGAIN.format(node_name)
self.publish_log_message(message)
self.make_nodes_as_slave()
elif not self.have_master(): # 只能通过主节点启动
ClusterLog.add_quick_log(ClusterLog.MESSAGE_MASTER_NODE_NOT_FOUND).flush(publish=False)
os._exit(1)
if node_name in self.nodes:
self.node_name = node_name = node_name + '_' + str(dict_count_key_num(self.nodes, node_name))
ClusterLog.add_quick_log(ClusterLog.MESSAGE_NODE_ALREADY_IN_CLUSTER.format(node_name)).flush()
self.session.hset(self.KEY_NODES, node_name, Config().NODE_IS_MASTER)
message = ClusterLog.MESSAGE_JOIN_CLUSTER_SUCCESS.format(self.node_name, ClusterLog.get_print_nodes(
self.get_nodes())) # 手动 get nodes
self.publish_log_message(message)
def left_cluster(self, node_name=None):
node_name = node_name if node_name else self.node_name
self.session.hdel(self.KEY_NODES, node_name)
message = ClusterLog.MESSAGE_LEFT_CLUSTER.format(node_name, ClusterLog.get_print_nodes(self.get_nodes()))
self.publish_log_message(message, node_name)
def make_nodes_as_slave(self):
"""
将所有节点设为主节点
:return:
"""
for node in self.nodes:
self.session.hset(self.KEY_NODES, node, self.KEY_SLAVE)
def publish_log_message(self, message, node_name=None):
"""
发布订阅消息
:return:
"""
node_name = node_name if node_name else self.node_name
message = ClusterLog.MESSAGE_SUBSCRIBE_NOTIFICATION.format(node_name, message)
self.session.publish(self.KEY_CHANNEL_LOG, message)
def publish_event(self, name, data={}):
"""
发布事件消息
:return:
"""
data = {'event': name, 'data': data}
self.session.publish(self.KEY_CHANNEL_EVENT, json.dumps(data))
def get_nodes(self) -> dict:
res = self.session.hgetall(self.KEY_NODES)
res = res if res else {}
self.nodes = res
return res
def refresh_data(self):
"""
单独进程处理数据同步
:return:
"""
while True:
self.get_nodes()
self.check_locks()
self.check_nodes()
self.check_master()
stay_second(self.retry_time)
def check_master(self):
"""
检测主节点是否可用
:return:
"""
master = self.have_master()
if master == self.node_name: # 动态提升
self.is_master = True
else:
self.is_master = False
if not master:
if Config().NODE_SLAVE_CAN_BE_MASTER:
# 提升子节点为主节点
slave = list(self.nodes)[0]
self.session.hset(self.KEY_NODES, slave, self.KEY_MASTER)
self.publish_log_message(ClusterLog.MESSAGE_ASCENDING_MASTER_NODE.format(slave,
ClusterLog.get_print_nodes(
self.get_nodes())))
return True
else:
self.publish_log_message(ClusterLog.MESSAGE_MASTER_DID_LOST.format(self.retry_time))
stay_second(self.retry_time)
os._exit(1) # 退出整个程序
def have_master(self):
return dict_find_key_by_value(self.nodes, str(self.KEY_MASTER), False)
def check_nodes(self):
"""
检查节点是否存活
:return:
"""
for node in self.nodes:
if not self.session.exists(self.KEY_NODES_ALIVE_PREFIX + node):
self.left_cluster(node)
# def kick_out_from_nodes(self, node_name):
# pass
def keep_alive(self):
while True:
if self.node_name not in self.get_nodes(): # 已经被 kict out 重新加下
self.join_cluster()
self.session.set(self.KEY_NODES_ALIVE_PREFIX + self.node_name, Config().NODE_IS_MASTER, ex=self.lost_alive_time)
stay_second(self.keep_alive_time)
def subscribe(self):
while True:
try:
message = self.pubsub.get_message()
except RuntimeError as err:
if 'args' in dir(err) and err.args[0].find('pubsub connection not set') >= 0: # 失去重连
self.pubsub.subscribe(self.KEY_CHANNEL_LOG, self.KEY_CHANNEL_EVENT)
continue
if message:
if message.get('type') == 'message' and message.get('channel') == self.KEY_CHANNEL_LOG and message.get(
'data'):
msg = message.get('data')
if self.node_name:
msg = msg.replace(ClusterLog.MESSAGE_SUBSCRIBE_NOTIFICATION_PREFIX.format(self.node_name), '')
ClusterLog.add_quick_log(msg).flush(publish=False)
elif message.get('channel') == self.KEY_CHANNEL_EVENT:
create_thread_and_run(self, 'handle_events', args=(message,))
stay_second(self.refresh_channel_time)
def handle_events(self, message):
# 这里应该分开处理,先都在这处理了
if message.get('type') != 'message': return
result = json.loads(message.get('data', {}))
event_name = result.get('event')
data = result.get('data')
from py12306.helpers.event import Event
method = getattr(Event(), event_name)
if method:
create_thread_and_run(Event(), event_name, Const.IS_TEST, kwargs={'data': data, 'callback': True})
def get_lock(self, key: str, timeout=1, info={}):
timeout = int(time.time()) + timeout
res = self.session.setnx(key, timeout)
if res:
if info: self.session.set_dict(self.lock_info_prefix + key.replace(self.KEY_PREFIX, ''), info) # 存储额外信息
return True
return False
def get_lock_info(self, key, default={}):
return self.session.get_dict(self.lock_info_prefix + key.replace(self.KEY_PREFIX, ''), default=default)
def release_lock(self, key):
self.session.delete(key)
self.session.delete(self.lock_info_prefix + key.replace(self.KEY_PREFIX, ''))
def check_locks(self):
locks = self.session.keys(self.lock_prefix + '*')
for key in locks:
val = self.session.get(key)
if val and int(val) <= time_int():
self.release_lock(key)
def get_user_cookie(cls, key, default=None):
self = cls()
res = self.session.hget(Cluster.KEY_USER_COOKIES, key)
return pickle.loads(res.encode()) if res else default
def set_user_cookie(cls, key, value):
self = cls()
return self.session.hset(Cluster.KEY_USER_COOKIES, key, pickle.dumps(value, 0).decode())
def set_user_info(cls, key, info):
self = cls()
return self.session.hset(Cluster.KEY_USER_INFOS, key, pickle.dumps(info, 0).decode())
def get_user_info(cls, key, default=None):
self = cls()
res = self.session.hget(Cluster.KEY_USER_INFOS, key)
return pickle.loads(res.encode()) if res else default
The provided code snippet includes necessary dependencies for implementing the `clusters` function. Write a Python function `def clusters()` to solve the following problem:
节点统计 节点数量,主节点,子节点列表 :return:
Here is the function:
def clusters():
"""
节点统计
节点数量,主节点,子节点列表
:return:
"""
from py12306.cluster.cluster import Cluster
nodes = Cluster().nodes
count = len(nodes)
node_lists = list(nodes)
master = [key for key, val in nodes.items() if int(val) == Cluster.KEY_MASTER]
master = master[0] if master else ''
return jsonify({
'master': master,
'count': count,
'node_lists': ', '.join(node_lists)
}) | 节点统计 节点数量,主节点,子节点列表 :return: |
167,521 | import json
import re
from flask import Blueprint, request, send_file
from flask.json import jsonify
from flask_jwt_extended import (jwt_required)
from py12306.config import Config
from py12306.query.query import Query
from py12306.user.user import User
Config:
IS_DEBUG = False
# 查询任务
# 查询间隔
# 查询重试次数
# 用户心跳检测间隔
# 多线程查询
# 打码平台账号
#用户打码平台地址
# 输出日志到文件
# Query
# 语音验证码
# 集群配置
# 钉钉配置
# Telegram推送配置
# Bark 推送配置
# ServerChan和PushBear配置
# 邮箱配置
# CDN
# Default time out
# @classmethod
# def keep_work(cls):
# self = cls()
def index():
file = Config().WEB_ENTER_HTML_PATH
result = ''
with open(file, 'r', encoding='utf-8') as f:
result = f.read()
config = {
'API_BASE_URL': '' # TODO 自定义 Host
}
result = re.sub(r'<script>[\s\S]*?<\/script>', '<script>window.config={}</script>'.format(json.dumps(config)),
result)
return result | null |
167,522 | import json
import re
from flask import Blueprint, request, send_file
from flask.json import jsonify
from flask_jwt_extended import (jwt_required)
from py12306.config import Config
from py12306.query.query import Query
from py12306.user.user import User
The provided code snippet includes necessary dependencies for implementing the `menus` function. Write a Python function `def menus()` to solve the following problem:
菜单列表
Here is the function:
def menus():
"""
菜单列表
"""
menus = [
{"id": 10, "name": "首页", "url": "/", "icon": "fa fa-tachometer-alt"},
{"id": 20, "name": "用户管理", "url": "/user", "icon": "fa fa-user"},
{"id": 30, "name": "查询任务", "url": "/query", "icon": "fa fa-infinity"},
{"id": 40, "name": "实时日志", "url": "/log/realtime", "icon": "fa fa-signature"},
{"id": 50, "name": "帮助", "url": "/help", "icon": "fa fa-search"}
]
return jsonify(menus) | 菜单列表 |
167,523 | import json
import re
from flask import Blueprint, request, send_file
from flask.json import jsonify
from flask_jwt_extended import (jwt_required)
from py12306.config import Config
from py12306.query.query import Query
from py12306.user.user import User
The provided code snippet includes necessary dependencies for implementing the `actions` function. Write a Python function `def actions()` to solve the following problem:
操作列表
Here is the function:
def actions():
"""
操作列表
"""
actions = [
{"text": "退出登录", "key": 'logout', "link": "", "icon": "fa fa-sign-out-alt"}
]
return jsonify(actions) | 操作列表 |
167,524 | from flask import Blueprint, request
from flask.json import jsonify
from flask_jwt_extended import (jwt_required, create_access_token)
from py12306.config import Config
from py12306.helpers.func import str_to_time, timestamp_to_time
from py12306.user.job import UserJob
from py12306.user.user import User
Config:
IS_DEBUG = False
# 查询任务
# 查询间隔
# 查询重试次数
# 用户心跳检测间隔
# 多线程查询
# 打码平台账号
#用户打码平台地址
# 输出日志到文件
# Query
# 语音验证码
# 集群配置
# 钉钉配置
# Telegram推送配置
# Bark 推送配置
# ServerChan和PushBear配置
# 邮箱配置
# CDN
# Default time out
# @classmethod
# def keep_work(cls):
# self = cls()
The provided code snippet includes necessary dependencies for implementing the `login` function. Write a Python function `def login()` to solve the following problem:
用户登录 :return:
Here is the function:
def login():
"""
用户登录
:return:
"""
username = request.json.get('username', None)
password = request.json.get('password', None)
if username and password and username == Config().WEB_USER.get('username') and password == Config().WEB_USER.get(
'password'):
access_token = create_access_token(identity=username)
return jsonify(access_token=access_token)
return jsonify({"msg": "用户名或密码错误"}), 422 | 用户登录 :return: |
167,525 | from flask import Blueprint, request
from flask.json import jsonify
from flask_jwt_extended import (jwt_required, create_access_token)
from py12306.config import Config
from py12306.helpers.func import str_to_time, timestamp_to_time
from py12306.user.job import UserJob
from py12306.user.user import User
def convert_job_to_info(job: UserJob):
return {
'key': job.key,
'user_name': job.user_name,
'name': job.get_name(),
'is_ready': job.is_ready,
'is_loaded': job.user_loaded, # 是否成功加载 ready 是当前是否可用
'last_heartbeat': timestamp_to_time(job.last_heartbeat) if job.last_heartbeat else '-',
'login_num': job.login_num
}
class User:
users = []
user_accounts = []
retry_time = 3
cluster = None
def __init__(self):
self.cluster = Cluster()
self.update_interval()
self.update_user_accounts()
def update_user_accounts(self, auto=False, old=None):
self.user_accounts = Config().USER_ACCOUNTS
if auto:
UserLog.add_quick_log(UserLog.MESSAGE_USERS_DID_CHANGED).flush()
self.refresh_users(old)
def update_interval(self, auto=False):
self.interval = Config().USER_HEARTBEAT_INTERVAL
if auto: jobs_do(self.users, 'update_user')
def run(cls):
self = cls()
# app_available_check() 用户系统不休息
self.start()
pass
def start(self):
self.init_users()
UserLog.print_init_users(users=self.users)
# 多线程维护用户
create_thread_and_run(jobs=self.users, callback_name='run', wait=Const.IS_TEST)
def init_users(self):
for account in self.user_accounts:
self.init_user(account)
def init_user(self, info):
user = UserJob(info=info)
self.users.append(user)
return user
def refresh_users(self, old):
for account in self.user_accounts:
key = account.get('key')
old_account = array_dict_find_by_key_value(old, 'key', key)
if old_account and account != old_account:
user = self.get_user(key)
user.init_data(account)
elif not old_account: # 新用户 添加到 多线程
new_user = self.init_user(account)
create_thread_and_run(jobs=new_user, callback_name='run', wait=Const.IS_TEST)
for account in old: # 退出已删除的用户
if not array_dict_find_by_key_value(self.user_accounts, 'key', account.get('key')):
Event().user_job_destroy({'key': account.get('key')})
def is_empty(cls):
self = cls()
return not bool(self.users)
def get_user(cls, key) -> UserJob:
self = cls()
for user in self.users:
if user.key == key: return user
return None
def get_passenger_for_members(cls, members, key):
"""
检测乘客信息
:param passengers
:return:
"""
self = cls()
for user in self.users:
assert isinstance(user, UserJob)
if user.key == key and user.wait_for_ready():
return user.get_passengers_by_members(members)
The provided code snippet includes necessary dependencies for implementing the `users` function. Write a Python function `def users()` to solve the following problem:
用户任务列表 :return:
Here is the function:
def users():
"""
用户任务列表
:return:
"""
jobs = User().users
result = list(map(convert_job_to_info, jobs))
return jsonify(result) | 用户任务列表 :return: |
167,526 | from flask import Blueprint, request
from flask.json import jsonify
from flask_jwt_extended import (jwt_required, create_access_token)
from py12306.config import Config
from py12306.helpers.func import str_to_time, timestamp_to_time
from py12306.user.job import UserJob
from py12306.user.user import User
Config:
IS_DEBUG = False
# 查询任务
# 查询间隔
# 查询重试次数
# 用户心跳检测间隔
# 多线程查询
# 打码平台账号
#用户打码平台地址
# 输出日志到文件
# Query
# 语音验证码
# 集群配置
# 钉钉配置
# Telegram推送配置
# Bark 推送配置
# ServerChan和PushBear配置
# 邮箱配置
# CDN
# Default time out
# @classmethod
# def keep_work(cls):
# self = cls()
The provided code snippet includes necessary dependencies for implementing the `user_info` function. Write a Python function `def user_info()` to solve the following problem:
获取用户信息 :return:
Here is the function:
def user_info():
"""
获取用户信息
:return:
"""
result = {
'name': Config().WEB_USER.get('username')
}
return jsonify(result) | 获取用户信息 :return: |
167,527 | from flask import Blueprint, request
from flask.json import jsonify
from flask_jwt_extended import (jwt_required)
from py12306.config import Config
from py12306.query.job import Job
from py12306.query.query import Query
def convert_job_to_info(job: Job):
return {
'name': job.job_name,
'left_dates': job.left_dates,
'stations': job.stations,
'members': job.members,
'member_num': job.member_num,
'allow_seats': job.allow_seats,
'allow_train_numbers': job.allow_train_numbers,
'except_train_numbers': job.except_train_numbers,
'allow_less_member': job.allow_less_member,
'passengers': job.passengers,
}
class Query:
"""
余票查询
"""
jobs = []
query_jobs = []
session = {}
# 查询间隔
interval = {}
cluster = None
is_in_thread = False
retry_time = 3
is_ready = False
api_type = None # Query api url, Current know value leftTicket/queryX | leftTicket/queryZ
def __init__(self):
self.session = Request()
self.request_device_id()
self.cluster = Cluster()
self.update_query_interval()
self.update_query_jobs()
self.get_query_api_type()
def update_query_interval(self, auto=False):
self.interval = init_interval_by_number(Config().QUERY_INTERVAL)
if auto:
jobs_do(self.jobs, 'update_interval')
def update_query_jobs(self, auto=False):
self.query_jobs = Config().QUERY_JOBS
if auto:
QueryLog.add_quick_log(QueryLog.MESSAGE_JOBS_DID_CHANGED).flush()
self.refresh_jobs()
if not Config().is_slave():
jobs_do(self.jobs, 'check_passengers')
def run(cls):
self = cls()
app_available_check()
self.start()
pass
def check_before_run(cls):
self = cls()
self.init_jobs()
self.is_ready = True
def start(self):
# return # DEBUG
QueryLog.init_data()
stay_second(3)
# 多线程
while True:
if Config().QUERY_JOB_THREAD_ENABLED: # 多线程
if not self.is_in_thread:
self.is_in_thread = True
create_thread_and_run(jobs=self.jobs, callback_name='run', wait=Const.IS_TEST)
if Const.IS_TEST: return
stay_second(self.retry_time)
else:
if not self.jobs: break
self.is_in_thread = False
jobs_do(self.jobs, 'run')
if Const.IS_TEST: return
# while True:
# app_available_check()
# if Config().QUERY_JOB_THREAD_ENABLED: # 多线程
# create_thread_and_run(jobs=self.jobs, callback_name='run')
# else:
# for job in self.jobs: job.run()
# if Const.IS_TEST: return
# self.refresh_jobs() # 刷新任务
def refresh_jobs(self):
"""
更新任务
:return:
"""
allow_jobs = []
for job in self.query_jobs:
id = md5(job)
job_ins = objects_find_object_by_key_value(self.jobs, 'id', id) # [1 ,2]
if not job_ins:
job_ins = self.init_job(job)
if Config().QUERY_JOB_THREAD_ENABLED: # 多线程重新添加
create_thread_and_run(jobs=job_ins, callback_name='run', wait=Const.IS_TEST)
allow_jobs.append(job_ins)
for job in self.jobs: # 退出已删除 Job
if job not in allow_jobs: job.destroy()
QueryLog.print_init_jobs(jobs=self.jobs)
def init_jobs(self):
for job in self.query_jobs:
self.init_job(job)
QueryLog.print_init_jobs(jobs=self.jobs)
def init_job(self, job):
job = Job(info=job, query=self)
self.jobs.append(job)
return job
def request_device_id(self, force_renew = False):
"""
获取加密后的浏览器特征 ID
:return:
"""
expire_time = self.session.cookies.get('RAIL_EXPIRATION')
if not force_renew and expire_time and int(expire_time) - time_int_ms() > 0:
return
if 'pjialin' not in API_GET_BROWSER_DEVICE_ID:
return self.request_device_id2()
response = self.session.get(API_GET_BROWSER_DEVICE_ID)
if response.status_code == 200:
try:
result = json.loads(response.text)
response = self.session.get(b64decode(result['id']).decode())
if response.text.find('callbackFunction') >= 0:
result = response.text[18:-2]
result = json.loads(result)
if not Config().is_cache_rail_id_enabled():
self.session.cookies.update({
'RAIL_EXPIRATION': result.get('exp'),
'RAIL_DEVICEID': result.get('dfp'),
})
else:
self.session.cookies.update({
'RAIL_EXPIRATION': Config().RAIL_EXPIRATION,
'RAIL_DEVICEID': Config().RAIL_DEVICEID,
})
except:
return self.request_device_id()
else:
return self.request_device_id()
def request_device_id2(self):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36"
}
self.session.headers.update(headers)
response = self.session.get(API_GET_BROWSER_DEVICE_ID)
if response.status_code == 200:
try:
if response.text.find('callbackFunction') >= 0:
result = response.text[18:-2]
result = json.loads(result)
if not Config().is_cache_rail_id_enabled():
self.session.cookies.update({
'RAIL_EXPIRATION': result.get('exp'),
'RAIL_DEVICEID': result.get('dfp'),
})
else:
self.session.cookies.update({
'RAIL_EXPIRATION': Config().RAIL_EXPIRATION,
'RAIL_DEVICEID': Config().RAIL_DEVICEID,
})
except:
return self.request_device_id2()
else:
return self.request_device_id2()
def wait_for_ready(cls):
self = cls()
if self.is_ready: return self
stay_second(self.retry_time)
return self.wait_for_ready()
def job_by_name(cls, name) -> Job:
self = cls()
for job in self.jobs:
if job.job_name == name: return job
return None
def job_by_name(cls, name) -> Job:
self = cls()
return objects_find_object_by_key_value(self.jobs, 'job_name', name)
def job_by_account_key(cls, account_key) -> Job:
self = cls()
return objects_find_object_by_key_value(self.jobs, 'account_key', account_key)
def get_query_api_type(cls):
import re
self = cls()
if self.api_type:
return self.api_type
response = self.session.get(API_QUERY_INIT_PAGE)
if response.status_code == 200:
res = re.search(r'var CLeftTicketUrl = \'(.*)\';', response.text)
try:
self.api_type = res.group(1)
except Exception:
pass
if not self.api_type:
QueryLog.add_quick_log('查询地址获取失败, 正在重新获取...').flush()
sleep(get_interval_num(self.interval))
self.request_device_id(True)
return cls.get_query_api_type()
The provided code snippet includes necessary dependencies for implementing the `query_lists` function. Write a Python function `def query_lists()` to solve the following problem:
查询任务列表 :return:
Here is the function:
def query_lists():
"""
查询任务列表
:return:
"""
jobs = Query().jobs
result = list(map(convert_job_to_info, jobs))
return jsonify(result) | 查询任务列表 :return: |
167,528 | import linecache
from flask import Blueprint, request
from flask.json import jsonify
from flask_jwt_extended import (jwt_required)
from py12306.config import Config
from py12306.helpers.func import get_file_total_line_num, pick_file_lines
from py12306.log.common_log import CommonLog
from py12306.query.query import Query
from py12306.user.user import User
Config:
IS_DEBUG = False
# 查询任务
# 查询间隔
# 查询重试次数
# 用户心跳检测间隔
# 多线程查询
# 打码平台账号
#用户打码平台地址
# 输出日志到文件
# Query
# 语音验证码
# 集群配置
# 钉钉配置
# Telegram推送配置
# Bark 推送配置
# ServerChan和PushBear配置
# 邮箱配置
# CDN
# Default time out
# @classmethod
# def keep_work(cls):
# self = cls()
:
def get_file_total_line_num(file, encoding='utf-8'):
with open(file, 'r', encoding=encoding) as f:
return len(f.readlines())
def pick_file_lines(file, lines):
return [x for i, x in enumerate(file) if i in lines]
class CommonLog(BaseLog):
# 这里如果不声明,会出现重复打印,目前不知道什么原因
logs = []
thread_logs = {}
quick_log = []
MESSAGE_12306_IS_CLOSED = '当前时间: {} | 12306 休息时间,程序将在明天早上 6 点自动运行'
MESSAGE_RETRY_AUTH_CODE = '{} 秒后重新获取验证码'
MESSAGE_EMPTY_APP_CODE = '无法发送语音消息,未填写验证码接口 appcode'
MESSAGE_VOICE_API_FORBID = '语音消息发送失败,请检查 appcode 是否填写正确或 套餐余额是否充足'
MESSAGE_VOICE_API_SEND_FAIL = '语音消息发送失败,错误原因 {}'
MESSAGE_VOICE_API_SEND_SUCCESS = '语音消息发送成功! 接口返回信息 {} '
MESSAGE_CHECK_AUTO_CODE_FAIL = '请配置打码账号的账号密码'
MESSAGE_CHECK_EMPTY_USER_ACCOUNT = '请配置 12306 账号密码'
MESSAGE_TEST_SEND_VOICE_CODE = '正在测试发送语音验证码...'
MESSAGE_TEST_SEND_EMAIL = '正在测试发送邮件...'
MESSAGE_TEST_SEND_DINGTALK = '正在测试发送钉钉消息...'
MESSAGE_TEST_SEND_TELEGRAM = '正在测试推送到Telegram...'
MESSAGE_TEST_SEND_SERVER_CHAN = '正在测试发送ServerChan消息...'
MESSAGE_TEST_SEND_PUSH_BEAR = '正在测试发送PushBear消息...'
MESSAGE_TEST_SEND_PUSH_BARK = '正在测试发送Bark消息...'
MESSAGE_CONFIG_FILE_DID_CHANGED = '配置文件已修改,正在重新加载中\n'
MESSAGE_API_RESPONSE_CAN_NOT_BE_HANDLE = '接口返回错误'
MESSAGE_SEND_EMAIL_SUCCESS = '邮件发送成功,请检查收件箱'
MESSAGE_SEND_EMAIL_FAIL = '邮件发送失败,请手动检查配置,错误原因 {}'
MESSAGE_SEND_EMAIL_WITH_QRCODE_SUCCESS = '二维码邮件发送成功,请检查收件箱扫描登陆'
MESSAGE_SEND_TELEGRAM_SUCCESS = 'Telegram推送成功'
MESSAGE_SEND_TELEGRAM_FAIL = 'Telegram推送失败,错误原因 {}'
MESSAGE_SEND_SERVER_CHAN_SUCCESS = '发送成功,请检查微信'
MESSAGE_SEND_SERVER_CHAN_FAIL = 'ServerChan发送失败,请检查KEY'
MESSAGE_SEND_PUSH_BEAR_SUCCESS = '发送成功,请检查微信'
MESSAGE_SEND_PUSH_BEAR_FAIL = 'PushBear发送失败,请检查KEY'
MESSAGE_SEND_BARK_SUCCESS = 'Bark推送成功'
MESSAGE_SEND_BARK_FAIL = 'Bark推送失败,错误原因 {}'
MESSAGE_OUTPUT_TO_FILE_IS_UN_ENABLE = '请先打开配置项中的:OUT_PUT_LOG_TO_FILE_ENABLED ( 输出到文件 )'
MESSAGE_GET_RESPONSE_FROM_FREE_AUTO_CODE = '从免费打码获取结果失败'
MESSAGE_RESPONSE_EMPTY_ERROR = '网络错误'
MESSAGE_CDN_START_TO_CHECK = '正在筛选 {} 个 CDN...'
MESSAGE_CDN_START_TO_RECHECK = '正在重新筛选 {} 个 CDN...当前时间 {}\n'
MESSAGE_CDN_RESTORE_SUCCESS = 'CDN 恢复成功,上次检测 {}\n'
MESSAGE_CDN_CHECKED_SUCCESS = '# CDN 检测完成,可用 CDN {} #\n'
MESSAGE_CDN_CLOSED = '# CDN 已关闭 #'
def __init__(self):
super().__init__()
self.init_data()
def init_data(self):
pass
def print_welcome(cls):
self = cls()
self.add_quick_log('######## py12306 购票助手,本程序为开源工具,请勿用于商业用途 ########')
if Const.IS_TEST:
self.add_quick_log()
self.add_quick_log('当前为测试模式,程序运行完成后自动结束')
if not Const.IS_TEST and Config().OUT_PUT_LOG_TO_FILE_ENABLED:
self.add_quick_log()
self.add_quick_log('日志已输出到文件中: {}'.format(Config().OUT_PUT_LOG_TO_FILE_PATH))
if Config().WEB_ENABLE:
self.add_quick_log()
self.add_quick_log('WEB 管理页面已开启,请访问 主机地址 + 端口 {} 进行查看'.format(Config().WEB_PORT))
self.add_quick_log()
self.flush(file=False, publish=False)
return self
def print_configs(cls):
# 打印配置
self = cls()
enable = '已开启'
disable = '未开启'
self.add_quick_log('**** 当前配置 ****')
self.add_quick_log('多线程查询: {}'.format(get_true_false_text(Config().QUERY_JOB_THREAD_ENABLED, enable, disable)))
self.add_quick_log('CDN 状态: {}'.format(get_true_false_text(Config().CDN_ENABLED, enable, disable))).flush()
self.add_quick_log('通知状态:')
if Config().NOTIFICATION_BY_VOICE_CODE:
self.add_quick_log(
'语音验证码: {}'.format(get_true_false_text(Config().NOTIFICATION_BY_VOICE_CODE, enable, disable)))
if Config().EMAIL_ENABLED:
self.add_quick_log('邮件通知: {}'.format(get_true_false_text(Config().EMAIL_ENABLED, enable, disable)))
if Config().DINGTALK_ENABLED:
self.add_quick_log('钉钉通知: {}'.format(get_true_false_text(Config().DINGTALK_ENABLED, enable, disable)))
if Config().TELEGRAM_ENABLED:
self.add_quick_log('Telegram通知: {}'.format(get_true_false_text(Config().TELEGRAM_ENABLED, enable, disable)))
if Config().SERVERCHAN_ENABLED:
self.add_quick_log(
'ServerChan通知: {}'.format(get_true_false_text(Config().SERVERCHAN_ENABLED, enable, disable)))
if Config().BARK_ENABLED:
self.add_quick_log('Bark通知: {}'.format(get_true_false_text(Config().BARK_ENABLED, enable, disable)))
if Config().PUSHBEAR_ENABLED:
self.add_quick_log(
'PushBear通知: {}'.format(get_true_false_text(Config().PUSHBEAR_ENABLED, enable, disable)))
self.add_quick_log().flush(sep='\t\t')
self.add_quick_log('查询间隔: {} 秒'.format(Config().QUERY_INTERVAL))
self.add_quick_log('用户心跳检测间隔: {} 秒'.format(Config().USER_HEARTBEAT_INTERVAL))
self.add_quick_log('WEB 管理页面: {}'.format(get_true_false_text(Config().WEB_ENABLE, enable, disable)))
if Config().is_cluster_enabled():
from py12306.cluster.cluster import Cluster
self.add_quick_log('分布式查询: {}'.format(get_true_false_text(Config().is_cluster_enabled(), enable, enable)))
self.add_quick_log('节点名称: {}'.format(Cluster().node_name))
self.add_quick_log('节点是否主节点: {}'.format(get_true_false_text(Config().is_master(), '是', '否')))
self.add_quick_log(
'子节点提升为主节点: {}'.format(get_true_false_text(Config().NODE_SLAVE_CAN_BE_MASTER, enable, disable)))
self.add_quick_log()
self.flush()
return self
def print_test_complete(cls):
self = cls()
self.add_quick_log('# 测试完成,请检查输出是否正确 #')
self.flush(publish=False)
return self
def print_auto_code_fail(cls, reason):
self = cls()
self.add_quick_log('打码失败: 错误原因 {reason}'.format(reason=reason))
self.flush()
return self
def print_auth_code_info(cls, reason):
self = cls()
self.add_quick_log('打码信息: {reason}'.format(reason=reason))
self.flush()
return self
The provided code snippet includes necessary dependencies for implementing the `log_output` function. Write a Python function `def log_output()` to solve the following problem:
日志 :return:
Here is the function:
def log_output():
"""
日志
:return:
"""
last_line = int(request.args.get('line', 0))
limit = int(request.args.get('limit', 10))
max_old = 200 # 取最新时 往后再取的数
file = Config().OUT_PUT_LOG_TO_FILE_PATH
res = []
if last_line == -1:
total_line = get_file_total_line_num(file)
last_line = total_line - max_old if total_line > max_old else 0
ranges = range(last_line, last_line + max_old + limit)
# limit = max_old + limit
else:
ranges = range(last_line, last_line + limit)
if Config().OUT_PUT_LOG_TO_FILE_ENABLED:
with open(Config().OUT_PUT_LOG_TO_FILE_PATH, 'r', encoding='utf-8') as f:
res = pick_file_lines(f, ranges)
# linecache.updatecache(file) # 使用 linecache windows 平台会出来编码问题 暂时弃用
# for i in ranges:
# tmp = linecache.getline(file, last_line + i)
# if tmp != '': res.append(tmp)
last_line += len(res)
else:
res = CommonLog.MESSAGE_OUTPUT_TO_FILE_IS_UN_ENABLE
return jsonify({
'last_line': last_line,
'data': res
}) | 日志 :return: |
167,529 | import datetime
import hashlib
import json
import os
import random
import threading
import functools
import time
from time import sleep
from types import MethodType
The provided code snippet includes necessary dependencies for implementing the `singleton` function. Write a Python function `def singleton(cls)` to solve the following problem:
将一个类作为单例 来自 https://wiki.python.org/moin/PythonDecoratorLibrary#Singleton
Here is the function:
def singleton(cls):
"""
将一个类作为单例
来自 https://wiki.python.org/moin/PythonDecoratorLibrary#Singleton
"""
cls.__new_original__ = cls.__new__
@functools.wraps(cls.__new__)
def singleton_new(cls, *args, **kw):
it = cls.__dict__.get('__it__')
if it is not None:
return it
cls.__it__ = it = cls.__new_original__(cls, *args, **kw)
it.__init_original__(*args, **kw)
return it
cls.__new__ = singleton_new
cls.__init_original__ = cls.__init__
cls.__init__ = object.__init__
return cls | 将一个类作为单例 来自 https://wiki.python.org/moin/PythonDecoratorLibrary#Singleton |
167,530 | import datetime
import hashlib
import json
import os
import random
import threading
import functools
import time
from time import sleep
from types import MethodType
if isinstance(number, dict):
min = float(number.get('min'))
max = float(number.get('max'))
else:
min = number / 2
max = number
return {
'min': min,
'max': max
def init_interval_by_number(number):
if isinstance(number, dict):
min = float(number.get('min'))
max = float(number.get('max'))
else:
min = number / 2
max = number
return {
'min': min,
'max': max
} | null |
167,531 | import datetime
import hashlib
import json
import os
import random
import threading
import functools
import time
from time import sleep
from types import MethodType
return round(random.uniform(interval.get('min'), interval.get('max')), decimal
def get_interval_num(interval, decimal=2):
return round(random.uniform(interval.get('min'), interval.get('max')), decimal) | null |
167,532 | import datetime
import hashlib
import json
import os
import random
import threading
import functools
import time
from time import sleep
from types import MethodType
sleep(second)
def stay_second(second, call_back=None):
sleep(second)
if call_back:
return call_back() | null |
167,533 | import datetime
import hashlib
import json
import os
import random
import threading
import functools
import time
from time import sleep
from types import MethodType
def current_thread_id():
return threading.current_thread().ident | null |
167,534 | import datetime
import hashlib
import json
import os
import random
import threading
import functools
import time
from time import sleep
from types import MethodType
def time_now():
return datetime.datetime.now() | null |
167,535 | import datetime
import hashlib
import json
import os
import random
import threading
import functools
import time
from time import sleep
from types import MethodType
def timestamp_to_time(timestamp):
time_struct = time.localtime(timestamp)
return time.strftime('%Y-%m-%d %H:%M:%S', time_struct)
def get_file_modify_time(filePath):
timestamp = os.path.getmtime(filePath)
return timestamp_to_time(timestamp) | null |
167,536 | import datetime
import hashlib
import json
import os
import random
import threading
import functools
import time
from time import sleep
from types import MethodType
def touch_file(path):
with open(path, 'a'): pass | null |
167,537 | import datetime
import hashlib
import json
import os
import random
import threading
import functools
import time
from time import sleep
from types import MethodType
def str_to_time(str):
return datetime.datetime.strptime(str, '%Y-%m-%d %H:%M:%S.%f') | null |
167,538 | import datetime
import hashlib
import json
import os
import random
import threading
import functools
import time
from time import sleep
from types import MethodType
def time_int():
return int(time.time()) | null |
167,539 | import datetime
import hashlib
import json
import os
import random
import threading
import functools
import time
from time import sleep
from types import MethodType
def time_int_ms():
return int(time.time() * 1000) | null |
167,540 | import datetime
import hashlib
import json
import os
import random
import threading
import functools
import time
from time import sleep
from types import MethodType
if isinstance(number, dict):
min = float(number.get('min'))
max = float(number.get('max'))
else:
min = number / 2
max = number
def is_number(val):
if isinstance(val, int): return val
if isinstance(val, str): return val.isdigit()
return False | null |
167,541 | import datetime
import hashlib
import json
import os
import random
import threading
import functools
import time
from time import sleep
from types import MethodType
if isinstance(number, dict):
min = float(number.get('min'))
max = float(number.get('max'))
else:
min = number / 2
max = number
def create_thread_and_run(jobs, callback_name, wait=True, daemon=True, args=(), kwargs={}):
threads = []
if not isinstance(jobs, list): jobs = [jobs]
for job in jobs:
thread = threading.Thread(target=getattr(job, callback_name), args=args, kwargs=kwargs)
thread.setDaemon(daemon)
thread.start()
threads.append(thread)
if wait:
for thread in threads: thread.join() | null |
167,542 | import datetime
import hashlib
import json
import os
import random
import threading
import functools
import time
from time import sleep
from types import MethodType
if isinstance(number, dict):
min = float(number.get('min'))
max = float(number.get('max'))
else:
min = number / 2
max = number
def jobs_do(jobs, do):
if not isinstance(jobs, list): jobs = [jobs]
for job in jobs:
getattr(job, do)() | null |
167,543 | import datetime
import hashlib
import json
import os
import random
import threading
import functools
import time
from time import sleep
from types import MethodType
def dict_find_key_by_value(data, value, default=None):
result = [k for k, v in data.items() if v == value]
return result.pop() if len(result) else default | null |
167,544 | import datetime
import hashlib
import json
import os
import random
import threading
import functools
import time
from time import sleep
from types import MethodType
def objects_find_object_by_key_value(objects, key, value, default=None):
result = [obj for obj in objects if getattr(obj, key) == value]
return result.pop() if len(result) else default | null |
167,545 | import datetime
import hashlib
import json
import os
import random
import threading
import functools
import time
from time import sleep
from types import MethodType
def dict_count_key_num(data: dict, key, like=False):
count = 0
for k in data.keys():
if like:
if k.find(key) >= 0: count += 1
elif k == key:
count += 1
return count | null |
167,546 | import datetime
import hashlib
import json
import os
import random
import threading
import functools
import time
from time import sleep
from types import MethodType
def array_dict_find_by_key_value(data, key, value, default=None):
result = [v for k, v in enumerate(data) if key in v and v[key] == value]
return result.pop() if len(result) else default | null |
167,547 | import datetime
import hashlib
import json
import os
import random
import threading
import functools
import time
from time import sleep
from types import MethodType
def get_true_false_text(value, true='', false=''):
if value: return true
return false | null |
167,548 | import datetime
import hashlib
import json
import os
import random
import threading
import functools
import time
from time import sleep
from types import MethodType
def sleep_forever():
"""
当不是主线程时,假象停止
:return:
"""
if not is_main_thread():
while True: sleep(10000000)
class Const:
IS_TEST = False
IS_TEST_NOTIFICATION = False
def sleep_forever_when_in_test():
if Const.IS_TEST: sleep_forever() | null |
167,549 | import datetime
import hashlib
import json
import os
import random
import threading
import functools
import time
from time import sleep
from types import MethodType
def expand_class(cls, key, value, keep_old=True):
if (keep_old):
setattr(cls, 'old_' + key, getattr(cls, key))
setattr(cls, key, MethodType(value, cls))
return cls | null |
167,550 | import datetime
import hashlib
import json
import os
import random
import threading
import functools
import time
from time import sleep
from types import MethodType
if isinstance(number, dict):
min = float(number.get('min'))
max = float(number.get('max'))
else:
min = number / 2
max = number
def available_value(value):
if isinstance(value, str) or isinstance(value, bytes):
return value
return str(value) | null |
167,551 | import datetime
import hashlib
import json
import os
import random
import threading
import functools
import time
from time import sleep
from types import MethodType
def md5(value):
return hashlib.md5(json.dumps(value).encode()).hexdigest() | null |
167,552 | import png
The provided code snippet includes necessary dependencies for implementing the `print_qrcode` function. Write a Python function `def print_qrcode(path)` to solve the following problem:
将二维码输出到控制台 需要终端尺寸足够大才能显示 :param path: 二维码图片路径 (PNG 格式) :return: None
Here is the function:
def print_qrcode(path):
"""
将二维码输出到控制台
需要终端尺寸足够大才能显示
:param path: 二维码图片路径 (PNG 格式)
:return: None
"""
reader = png.Reader(path)
width, height, rows, info = reader.read()
lines = list(rows)
planes = info['planes'] # 通道数
threshold = (2 ** info['bitdepth']) / 2 # 色彩阈值
# 识别二维码尺寸
x_flag = -1 # x 边距标志
y_flag = -1 # y 边距标志
x_white = -1 # 定位图案白块 x 坐标
y_white = -1 # 定位图案白块 y 坐标
i = y_flag
while i < height:
if y_white > 0 and x_white > 0:
break
j = x_flag
while j < width:
total = 0
for k in range(planes):
px = lines[i][j * planes + k]
total += px
avg = total / planes
black = avg < threshold
if y_white > 0 and x_white > 0:
break
if x_flag > 0 > x_white and not black:
x_white = j
if x_flag == -1 and black:
x_flag = j
if y_flag > 0 > y_white and not black:
y_white = i
if y_flag == -1 and black:
y_flag = i
if x_flag > 0 and y_flag > 0:
i += 1
j += 1
i += 1
assert y_white - y_flag == x_white - x_flag
scale = y_white - y_flag
assert width - x_flag == height - y_flag
module_count = int((width - x_flag * 2) / scale)
whole_white = '█'
whole_black = ' '
down_black = '▀'
up_black = '▄'
dual_flag = False
last_line = []
output = '\n'
for i in range(module_count + 2):
output += up_black
output += '\n'
i = y_flag
while i < height - y_flag:
if dual_flag:
output += whole_white
t = 0
j = x_flag
while j < width - x_flag:
total = 0
for k in range(planes):
px = lines[i][j * planes + k]
total += px
avg = total / planes
black = avg < threshold
if dual_flag:
last_black = last_line[t]
if black and last_black:
output += whole_black
elif black and not last_black:
output += down_black
elif not black and last_black:
output += up_black
elif not black and not last_black:
output += whole_white
else:
last_line[t:t+1] = [black]
t = t + 1
j += scale
if dual_flag:
output += whole_white + '\n'
dual_flag = not dual_flag
i += scale
output += whole_white
for i in range(module_count):
output += up_black if last_line[i] else whole_white
output += whole_white + '\n'
print(output, flush=True) | 将二维码输出到控制台 需要终端尺寸足够大才能显示 :param path: 二维码图片路径 (PNG 格式) :return: None |
167,553 | import base64
import logging
import os
import json
import boto3
import urllib3
import uuid
def lambda_handler(event, context):
urls = []
http = urllib3.PoolManager()
for i in range(10):
r = http.request('GET', 'http://thecatapi.com/api/images/get?size=medformat=src&type=png&api_key=8f7dc437-0b9b-47b8-a2c0-65925d593acf')
with open('/mnt/efs/'+str(uuid.uuid1())+".png", "wb" ) as png:
png.write(r.data)
return {
'statusCode': 200,
} | null |
167,554 | import os
import json
import uuid
import boto3
from PIL import Image
processed_bucket=os.environ['processed_bucket']
s3_client = boto3.client('s3')
def pixelate(pixelsize, image_path, pixelated_img_path):
img = Image.open(image_path)
temp_img = img.resize(pixelsize, Image.BILINEAR)
new_img = temp_img.resize(img.size, Image.NEAREST)
new_img.save(pixelated_img_path)
def lambda_handler(event, context):
print(event)
# get bucket and object key from event object
source_bucket = event['Records'][0]['s3']['bucket']['name']
key = event['Records'][0]['s3']['object']['key']
# Generate a temp name, and set location for our original image
object_key = str(uuid.uuid4()) + '-' + key
img_download_path = '/tmp/{}'.format(object_key)
# Download the source image from S3 to temp location within execution environment
with open(img_download_path,'wb') as img_file:
s3_client.download_fileobj(source_bucket, key, img_file)
# Biggify the pixels and store temp pixelated versions
pixelate((8,8), img_download_path, '/tmp/pixelated-8x8-{}'.format(object_key) )
pixelate((16,16), img_download_path, '/tmp/pixelated-16x16-{}'.format(object_key) )
pixelate((32,32), img_download_path, '/tmp/pixelated-32x32-{}'.format(object_key) )
pixelate((48,48), img_download_path, '/tmp/pixelated-48x48-{}'.format(object_key) )
pixelate((64,64), img_download_path, '/tmp/pixelated-64x64-{}'.format(object_key) )
# uploading the pixelated version to destination bucket
upload_key = 'pixelated-{}'.format(object_key)
s3_client.upload_file('/tmp/pixelated-8x8-{}'.format(object_key), processed_bucket,'pixelated-8x8-{}'.format(key))
s3_client.upload_file('/tmp/pixelated-16x16-{}'.format(object_key), processed_bucket,'pixelated-16x16-{}'.format(key))
s3_client.upload_file('/tmp/pixelated-32x32-{}'.format(object_key), processed_bucket,'pixelated-32x32-{}'.format(key))
s3_client.upload_file('/tmp/pixelated-48x48-{}'.format(object_key), processed_bucket,'pixelated-48x48-{}'.format(key))
s3_client.upload_file('/tmp/pixelated-64x64-{}'.format(object_key), processed_bucket,'pixelated-64x64-{}'.format(key)) | null |
167,555 | from . import Image, ImageFile
from ._binary import i32be as i32
def _accept(prefix):
return len(prefix) >= 8 and i32(prefix, 0) >= 20 and i32(prefix, 4) in (1, 2) | null |
167,556 | from . import Image, ImageFile
from ._binary import o8
from ._binary import o16be as o16b
_Palm8BitColormapValues = (
(255, 255, 255), (255, 204, 255), (255, 153, 255), (255, 102, 255),
(255, 51, 255), (255, 0, 255), (255, 255, 204), (255, 204, 204),
(255, 153, 204), (255, 102, 204), (255, 51, 204), (255, 0, 204),
(255, 255, 153), (255, 204, 153), (255, 153, 153), (255, 102, 153),
(255, 51, 153), (255, 0, 153), (204, 255, 255), (204, 204, 255),
(204, 153, 255), (204, 102, 255), (204, 51, 255), (204, 0, 255),
(204, 255, 204), (204, 204, 204), (204, 153, 204), (204, 102, 204),
(204, 51, 204), (204, 0, 204), (204, 255, 153), (204, 204, 153),
(204, 153, 153), (204, 102, 153), (204, 51, 153), (204, 0, 153),
(153, 255, 255), (153, 204, 255), (153, 153, 255), (153, 102, 255),
(153, 51, 255), (153, 0, 255), (153, 255, 204), (153, 204, 204),
(153, 153, 204), (153, 102, 204), (153, 51, 204), (153, 0, 204),
(153, 255, 153), (153, 204, 153), (153, 153, 153), (153, 102, 153),
(153, 51, 153), (153, 0, 153), (102, 255, 255), (102, 204, 255),
(102, 153, 255), (102, 102, 255), (102, 51, 255), (102, 0, 255),
(102, 255, 204), (102, 204, 204), (102, 153, 204), (102, 102, 204),
(102, 51, 204), (102, 0, 204), (102, 255, 153), (102, 204, 153),
(102, 153, 153), (102, 102, 153), (102, 51, 153), (102, 0, 153),
(51, 255, 255), (51, 204, 255), (51, 153, 255), (51, 102, 255),
(51, 51, 255), (51, 0, 255), (51, 255, 204), (51, 204, 204),
(51, 153, 204), (51, 102, 204), (51, 51, 204), (51, 0, 204),
(51, 255, 153), (51, 204, 153), (51, 153, 153), (51, 102, 153),
(51, 51, 153), (51, 0, 153), (0, 255, 255), (0, 204, 255),
(0, 153, 255), (0, 102, 255), (0, 51, 255), (0, 0, 255),
(0, 255, 204), (0, 204, 204), (0, 153, 204), (0, 102, 204),
(0, 51, 204), (0, 0, 204), (0, 255, 153), (0, 204, 153),
(0, 153, 153), (0, 102, 153), (0, 51, 153), (0, 0, 153),
(255, 255, 102), (255, 204, 102), (255, 153, 102), (255, 102, 102),
(255, 51, 102), (255, 0, 102), (255, 255, 51), (255, 204, 51),
(255, 153, 51), (255, 102, 51), (255, 51, 51), (255, 0, 51),
(255, 255, 0), (255, 204, 0), (255, 153, 0), (255, 102, 0),
(255, 51, 0), (255, 0, 0), (204, 255, 102), (204, 204, 102),
(204, 153, 102), (204, 102, 102), (204, 51, 102), (204, 0, 102),
(204, 255, 51), (204, 204, 51), (204, 153, 51), (204, 102, 51),
(204, 51, 51), (204, 0, 51), (204, 255, 0), (204, 204, 0),
(204, 153, 0), (204, 102, 0), (204, 51, 0), (204, 0, 0),
(153, 255, 102), (153, 204, 102), (153, 153, 102), (153, 102, 102),
(153, 51, 102), (153, 0, 102), (153, 255, 51), (153, 204, 51),
(153, 153, 51), (153, 102, 51), (153, 51, 51), (153, 0, 51),
(153, 255, 0), (153, 204, 0), (153, 153, 0), (153, 102, 0),
(153, 51, 0), (153, 0, 0), (102, 255, 102), (102, 204, 102),
(102, 153, 102), (102, 102, 102), (102, 51, 102), (102, 0, 102),
(102, 255, 51), (102, 204, 51), (102, 153, 51), (102, 102, 51),
(102, 51, 51), (102, 0, 51), (102, 255, 0), (102, 204, 0),
(102, 153, 0), (102, 102, 0), (102, 51, 0), (102, 0, 0),
(51, 255, 102), (51, 204, 102), (51, 153, 102), (51, 102, 102),
(51, 51, 102), (51, 0, 102), (51, 255, 51), (51, 204, 51),
(51, 153, 51), (51, 102, 51), (51, 51, 51), (51, 0, 51),
(51, 255, 0), (51, 204, 0), (51, 153, 0), (51, 102, 0),
(51, 51, 0), (51, 0, 0), (0, 255, 102), (0, 204, 102),
(0, 153, 102), (0, 102, 102), (0, 51, 102), (0, 0, 102),
(0, 255, 51), (0, 204, 51), (0, 153, 51), (0, 102, 51),
(0, 51, 51), (0, 0, 51), (0, 255, 0), (0, 204, 0),
(0, 153, 0), (0, 102, 0), (0, 51, 0), (17, 17, 17),
(34, 34, 34), (68, 68, 68), (85, 85, 85), (119, 119, 119),
(136, 136, 136), (170, 170, 170), (187, 187, 187), (221, 221, 221),
(238, 238, 238), (192, 192, 192), (128, 0, 0), (128, 0, 128),
(0, 128, 0), (0, 128, 128), (0, 0, 0), (0, 0, 0),
(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0),
(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0),
(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0),
(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0),
(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0),
(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0))
Image.register_save("Palm", _save)
Image.register_extension("Palm", ".palm")
Image.register_mime("Palm", "image/palm")
class Image:
"""
This class represents an image object. To create
:py:class:`~PIL.Image.Image` objects, use the appropriate factory
functions. There's hardly ever any reason to call the Image constructor
directly.
* :py:func:`~PIL.Image.open`
* :py:func:`~PIL.Image.new`
* :py:func:`~PIL.Image.frombytes`
"""
format = None
format_description = None
_close_exclusive_fp_after_loading = True
def __init__(self):
# FIXME: take "new" parameters / other image?
# FIXME: turn mode and size into delegating properties?
self.im = None
self.mode = ""
self._size = (0, 0)
self.palette = None
self.info = {}
self._category = 0
self.readonly = 0
self.pyaccess = None
self._exif = None
def __getattr__(self, name):
if name == "category":
warnings.warn(
"Image categories are deprecated and will be removed in Pillow 10 "
"(2023-07-01). Use is_animated instead.",
DeprecationWarning,
stacklevel=2,
)
return self._category
raise AttributeError(name)
def width(self):
return self.size[0]
def height(self):
return self.size[1]
def size(self):
return self._size
def _new(self, im):
new = Image()
new.im = im
new.mode = im.mode
new._size = im.size
if im.mode in ("P", "PA"):
if self.palette:
new.palette = self.palette.copy()
else:
from . import ImagePalette
new.palette = ImagePalette.ImagePalette()
new.info = self.info.copy()
return new
# Context manager support
def __enter__(self):
return self
def __exit__(self, *args):
if hasattr(self, "fp") and getattr(self, "_exclusive_fp", False):
if hasattr(self, "_close__fp"):
self._close__fp()
if self.fp:
self.fp.close()
self.fp = None
def close(self):
"""
Closes the file pointer, if possible.
This operation will destroy the image core and release its memory.
The image data will be unusable afterward.
This function is required to close images that have multiple frames or
have not had their file read and closed by the
:py:meth:`~PIL.Image.Image.load` method. See :ref:`file-handling` for
more information.
"""
try:
if hasattr(self, "_close__fp"):
self._close__fp()
if self.fp:
self.fp.close()
self.fp = None
except Exception as msg:
logger.debug("Error closing: %s", msg)
if getattr(self, "map", None):
self.map = None
# Instead of simply setting to None, we're setting up a
# deferred error that will better explain that the core image
# object is gone.
self.im = deferred_error(ValueError("Operation on closed image"))
def _copy(self):
self.load()
self.im = self.im.copy()
self.pyaccess = None
self.readonly = 0
def _ensure_mutable(self):
if self.readonly:
self._copy()
else:
self.load()
def _dump(self, file=None, format=None, **options):
suffix = ""
if format:
suffix = "." + format
if not file:
f, filename = tempfile.mkstemp(suffix)
os.close(f)
else:
filename = file
if not filename.endswith(suffix):
filename = filename + suffix
self.load()
if not format or format == "PPM":
self.im.save_ppm(filename)
else:
self.save(filename, format, **options)
return filename
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.mode == other.mode
and self.size == other.size
and self.info == other.info
and self._category == other._category
and self.readonly == other.readonly
and self.getpalette() == other.getpalette()
and self.tobytes() == other.tobytes()
)
def __repr__(self):
return "<%s.%s image mode=%s size=%dx%d at 0x%X>" % (
self.__class__.__module__,
self.__class__.__name__,
self.mode,
self.size[0],
self.size[1],
id(self),
)
def _repr_png_(self):
"""iPython display hook support
:returns: png version of the image as bytes
"""
b = io.BytesIO()
try:
self.save(b, "PNG")
except Exception as e:
raise ValueError("Could not save to PNG for display") from e
return b.getvalue()
class _ArrayData:
def __init__(self, new):
self.__array_interface__ = new
def __array__(self, dtype=None):
# numpy array interface support
import numpy as np
new = {}
shape, typestr = _conv_type_shape(self)
new["shape"] = shape
new["typestr"] = typestr
new["version"] = 3
if self.mode == "1":
# Binary images need to be extended from bits to bytes
# See: https://github.com/python-pillow/Pillow/issues/350
new["data"] = self.tobytes("raw", "L")
else:
new["data"] = self.tobytes()
return np.array(self._ArrayData(new), dtype)
def __getstate__(self):
return [self.info, self.mode, self.size, self.getpalette(), self.tobytes()]
def __setstate__(self, state):
Image.__init__(self)
self.tile = []
info, mode, size, palette, data = state
self.info = info
self.mode = mode
self._size = size
self.im = core.new(mode, size)
if mode in ("L", "LA", "P", "PA") and palette:
self.putpalette(palette)
self.frombytes(data)
def tobytes(self, encoder_name="raw", *args):
"""
Return image as a bytes object.
.. warning::
This method returns the raw image data from the internal
storage. For compressed image data (e.g. PNG, JPEG) use
:meth:`~.save`, with a BytesIO parameter for in-memory
data.
:param encoder_name: What encoder to use. The default is to
use the standard "raw" encoder.
:param args: Extra arguments to the encoder.
:returns: A :py:class:`bytes` object.
"""
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if encoder_name == "raw" and args == ():
args = self.mode
self.load()
# unpack data
e = _getencoder(self.mode, encoder_name, args)
e.setimage(self.im)
bufsize = max(65536, self.size[0] * 4) # see RawEncode.c
data = []
while True:
l, s, d = e.encode(bufsize)
data.append(d)
if s:
break
if s < 0:
raise RuntimeError(f"encoder error {s} in tobytes")
return b"".join(data)
def tobitmap(self, name="image"):
"""
Returns the image converted to an X11 bitmap.
.. note:: This method only works for mode "1" images.
:param name: The name prefix to use for the bitmap variables.
:returns: A string containing an X11 bitmap.
:raises ValueError: If the mode is not "1"
"""
self.load()
if self.mode != "1":
raise ValueError("not a bitmap")
data = self.tobytes("xbm")
return b"".join(
[
f"#define {name}_width {self.size[0]}\n".encode("ascii"),
f"#define {name}_height {self.size[1]}\n".encode("ascii"),
f"static char {name}_bits[] = {{\n".encode("ascii"),
data,
b"};",
]
)
def frombytes(self, data, decoder_name="raw", *args):
"""
Loads this image with pixel data from a bytes object.
This method is similar to the :py:func:`~PIL.Image.frombytes` function,
but loads data into this image instead of creating a new image object.
"""
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
# default format
if decoder_name == "raw" and args == ():
args = self.mode
# unpack data
d = _getdecoder(self.mode, decoder_name, args)
d.setimage(self.im)
s = d.decode(data)
if s[0] >= 0:
raise ValueError("not enough image data")
if s[1] != 0:
raise ValueError("cannot decode image data")
def load(self):
"""
Allocates storage for the image and loads the pixel data. In
normal cases, you don't need to call this method, since the
Image class automatically loads an opened image when it is
accessed for the first time.
If the file associated with the image was opened by Pillow, then this
method will close it. The exception to this is if the image has
multiple frames, in which case the file will be left open for seek
operations. See :ref:`file-handling` for more information.
:returns: An image access object.
:rtype: :ref:`PixelAccess` or :py:class:`PIL.PyAccess`
"""
if self.im and self.palette and self.palette.dirty:
# realize palette
mode, arr = self.palette.getdata()
if mode == "RGBA":
mode = "RGB"
self.info["transparency"] = arr[3::4]
arr = bytes(
value for (index, value) in enumerate(arr) if index % 4 != 3
)
palette_length = self.im.putpalette(mode, arr)
self.palette.dirty = 0
self.palette.rawmode = None
if "transparency" in self.info and mode in ("LA", "PA"):
if isinstance(self.info["transparency"], int):
self.im.putpalettealpha(self.info["transparency"], 0)
else:
self.im.putpalettealphas(self.info["transparency"])
self.palette.mode = "RGBA"
else:
self.palette.mode = "RGB"
self.palette.palette = self.im.getpalette()[: palette_length * 3]
if self.im:
if cffi and USE_CFFI_ACCESS:
if self.pyaccess:
return self.pyaccess
from . import PyAccess
self.pyaccess = PyAccess.new(self, self.readonly)
if self.pyaccess:
return self.pyaccess
return self.im.pixel_access(self.readonly)
def verify(self):
"""
Verifies the contents of a file. For data read from a file, this
method attempts to determine if the file is broken, without
actually decoding the image data. If this method finds any
problems, it raises suitable exceptions. If you need to load
the image after using this method, you must reopen the image
file.
"""
pass
def convert(self, mode=None, matrix=None, dither=None, palette=WEB, colors=256):
"""
Returns a converted copy of this image. For the "P" mode, this
method translates pixels through the palette. If mode is
omitted, a mode is chosen so that all information in the image
and the palette can be represented without a palette.
The current version supports all possible conversions between
"L", "RGB" and "CMYK." The ``matrix`` argument only supports "L"
and "RGB".
When translating a color image to greyscale (mode "L"),
the library uses the ITU-R 601-2 luma transform::
L = R * 299/1000 + G * 587/1000 + B * 114/1000
The default method of converting a greyscale ("L") or "RGB"
image into a bilevel (mode "1") image uses Floyd-Steinberg
dither to approximate the original image luminosity levels. If
dither is :data:`NONE`, all values larger than 127 are set to 255 (white),
all other values to 0 (black). To use other thresholds, use the
:py:meth:`~PIL.Image.Image.point` method.
When converting from "RGBA" to "P" without a ``matrix`` argument,
this passes the operation to :py:meth:`~PIL.Image.Image.quantize`,
and ``dither`` and ``palette`` are ignored.
:param mode: The requested mode. See: :ref:`concept-modes`.
:param matrix: An optional conversion matrix. If given, this
should be 4- or 12-tuple containing floating point values.
:param dither: Dithering method, used when converting from
mode "RGB" to "P" or from "RGB" or "L" to "1".
Available methods are :data:`NONE` or :data:`FLOYDSTEINBERG` (default).
Note that this is not used when ``matrix`` is supplied.
:param palette: Palette to use when converting from mode "RGB"
to "P". Available palettes are :data:`WEB` or :data:`ADAPTIVE`.
:param colors: Number of colors to use for the :data:`ADAPTIVE` palette.
Defaults to 256.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
has_transparency = self.info.get("transparency") is not None
if not mode and self.mode == "P":
# determine default mode
if self.palette:
mode = self.palette.mode
else:
mode = "RGB"
if mode == "RGB" and has_transparency:
mode = "RGBA"
if not mode or (mode == self.mode and not matrix):
return self.copy()
if matrix:
# matrix conversion
if mode not in ("L", "RGB"):
raise ValueError("illegal conversion")
im = self.im.convert_matrix(mode, matrix)
new = self._new(im)
if has_transparency and self.im.bands == 3:
transparency = new.info["transparency"]
def convert_transparency(m, v):
v = m[0] * v[0] + m[1] * v[1] + m[2] * v[2] + m[3] * 0.5
return max(0, min(255, int(v)))
if mode == "L":
transparency = convert_transparency(matrix, transparency)
elif len(mode) == 3:
transparency = tuple(
convert_transparency(matrix[i * 4 : i * 4 + 4], transparency)
for i in range(0, len(transparency))
)
new.info["transparency"] = transparency
return new
if mode == "P" and self.mode == "RGBA":
return self.quantize(colors)
trns = None
delete_trns = False
# transparency handling
if has_transparency:
if self.mode in ("1", "L", "I", "RGB") and mode == "RGBA":
# Use transparent conversion to promote from transparent
# color to an alpha channel.
new_im = self._new(
self.im.convert_transparent(mode, self.info["transparency"])
)
del new_im.info["transparency"]
return new_im
elif self.mode in ("L", "RGB", "P") and mode in ("L", "RGB", "P"):
t = self.info["transparency"]
if isinstance(t, bytes):
# Dragons. This can't be represented by a single color
warnings.warn(
"Palette images with Transparency expressed in bytes should be "
"converted to RGBA images"
)
delete_trns = True
else:
# get the new transparency color.
# use existing conversions
trns_im = Image()._new(core.new(self.mode, (1, 1)))
if self.mode == "P":
trns_im.putpalette(self.palette)
if isinstance(t, tuple):
err = "Couldn't allocate a palette color for transparency"
try:
t = trns_im.palette.getcolor(t, self)
except ValueError as e:
if str(e) == "cannot allocate more than 256 colors":
# If all 256 colors are in use,
# then there is no need for transparency
t = None
else:
raise ValueError(err) from e
if t is None:
trns = None
else:
trns_im.putpixel((0, 0), t)
if mode in ("L", "RGB"):
trns_im = trns_im.convert(mode)
else:
# can't just retrieve the palette number, got to do it
# after quantization.
trns_im = trns_im.convert("RGB")
trns = trns_im.getpixel((0, 0))
elif self.mode == "P" and mode in ("LA", "PA", "RGBA"):
t = self.info["transparency"]
delete_trns = True
if isinstance(t, bytes):
self.im.putpalettealphas(t)
elif isinstance(t, int):
self.im.putpalettealpha(t, 0)
else:
raise ValueError("Transparency for P mode should be bytes or int")
if mode == "P" and palette == ADAPTIVE:
im = self.im.quantize(colors)
new = self._new(im)
from . import ImagePalette
new.palette = ImagePalette.ImagePalette("RGB", new.im.getpalette("RGB"))
if delete_trns:
# This could possibly happen if we requantize to fewer colors.
# The transparency would be totally off in that case.
del new.info["transparency"]
if trns is not None:
try:
new.info["transparency"] = new.palette.getcolor(trns, new)
except Exception:
# if we can't make a transparent color, don't leave the old
# transparency hanging around to mess us up.
del new.info["transparency"]
warnings.warn("Couldn't allocate palette entry for transparency")
return new
# colorspace conversion
if dither is None:
dither = FLOYDSTEINBERG
try:
im = self.im.convert(mode, dither)
except ValueError:
try:
# normalize source image and try again
im = self.im.convert(getmodebase(self.mode))
im = im.convert(mode, dither)
except KeyError as e:
raise ValueError("illegal conversion") from e
new_im = self._new(im)
if mode == "P" and palette != ADAPTIVE:
from . import ImagePalette
new_im.palette = ImagePalette.ImagePalette("RGB", list(range(256)) * 3)
if delete_trns:
# crash fail if we leave a bytes transparency in an rgb/l mode.
del new_im.info["transparency"]
if trns is not None:
if new_im.mode == "P":
try:
new_im.info["transparency"] = new_im.palette.getcolor(trns, new_im)
except ValueError as e:
del new_im.info["transparency"]
if str(e) != "cannot allocate more than 256 colors":
# If all 256 colors are in use,
# then there is no need for transparency
warnings.warn(
"Couldn't allocate palette entry for transparency"
)
else:
new_im.info["transparency"] = trns
return new_im
def quantize(self, colors=256, method=None, kmeans=0, palette=None, dither=1):
"""
Convert the image to 'P' mode with the specified number
of colors.
:param colors: The desired number of colors, <= 256
:param method: :data:`MEDIANCUT` (median cut),
:data:`MAXCOVERAGE` (maximum coverage),
:data:`FASTOCTREE` (fast octree),
:data:`LIBIMAGEQUANT` (libimagequant; check support using
:py:func:`PIL.features.check_feature`
with ``feature="libimagequant"``).
By default, :data:`MEDIANCUT` will be used.
The exception to this is RGBA images. :data:`MEDIANCUT` and
:data:`MAXCOVERAGE` do not support RGBA images, so
:data:`FASTOCTREE` is used by default instead.
:param kmeans: Integer
:param palette: Quantize to the palette of given
:py:class:`PIL.Image.Image`.
:param dither: Dithering method, used when converting from
mode "RGB" to "P" or from "RGB" or "L" to "1".
Available methods are :data:`NONE` or :data:`FLOYDSTEINBERG` (default).
Default: 1 (legacy setting)
:returns: A new image
"""
self.load()
if method is None:
# defaults:
method = MEDIANCUT
if self.mode == "RGBA":
method = FASTOCTREE
if self.mode == "RGBA" and method not in (FASTOCTREE, LIBIMAGEQUANT):
# Caller specified an invalid mode.
raise ValueError(
"Fast Octree (method == 2) and libimagequant (method == 3) "
"are the only valid methods for quantizing RGBA images"
)
if palette:
# use palette from reference image
palette.load()
if palette.mode != "P":
raise ValueError("bad mode for palette image")
if self.mode != "RGB" and self.mode != "L":
raise ValueError(
"only RGB or L mode images can be quantized to a palette"
)
im = self.im.convert("P", dither, palette.im)
new_im = self._new(im)
new_im.palette = palette.palette.copy()
return new_im
im = self._new(self.im.quantize(colors, method, kmeans))
from . import ImagePalette
mode = im.im.getpalettemode()
palette = im.im.getpalette(mode, mode)[: colors * len(mode)]
im.palette = ImagePalette.ImagePalette(mode, palette)
return im
def copy(self):
"""
Copies this image. Use this method if you wish to paste things
into an image, but still retain the original.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
return self._new(self.im.copy())
__copy__ = copy
def crop(self, box=None):
"""
Returns a rectangular region from this image. The box is a
4-tuple defining the left, upper, right, and lower pixel
coordinate. See :ref:`coordinate-system`.
Note: Prior to Pillow 3.4.0, this was a lazy operation.
:param box: The crop rectangle, as a (left, upper, right, lower)-tuple.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if box is None:
return self.copy()
self.load()
return self._new(self._crop(self.im, box))
def _crop(self, im, box):
"""
Returns a rectangular region from the core image object im.
This is equivalent to calling im.crop((x0, y0, x1, y1)), but
includes additional sanity checks.
:param im: a core image object
:param box: The crop rectangle, as a (left, upper, right, lower)-tuple.
:returns: A core image object.
"""
x0, y0, x1, y1 = map(int, map(round, box))
absolute_values = (abs(x1 - x0), abs(y1 - y0))
_decompression_bomb_check(absolute_values)
return im.crop((x0, y0, x1, y1))
def draft(self, mode, size):
"""
Configures the image file loader so it returns a version of the
image that as closely as possible matches the given mode and
size. For example, you can use this method to convert a color
JPEG to greyscale while loading it.
If any changes are made, returns a tuple with the chosen ``mode`` and
``box`` with coordinates of the original image within the altered one.
Note that this method modifies the :py:class:`~PIL.Image.Image` object
in place. If the image has already been loaded, this method has no
effect.
Note: This method is not implemented for most images. It is
currently implemented only for JPEG and MPO images.
:param mode: The requested mode.
:param size: The requested size.
"""
pass
def _expand(self, xmargin, ymargin=None):
if ymargin is None:
ymargin = xmargin
self.load()
return self._new(self.im.expand(xmargin, ymargin, 0))
def filter(self, filter):
"""
Filters this image using the given filter. For a list of
available filters, see the :py:mod:`~PIL.ImageFilter` module.
:param filter: Filter kernel.
:returns: An :py:class:`~PIL.Image.Image` object."""
from . import ImageFilter
self.load()
if isinstance(filter, Callable):
filter = filter()
if not hasattr(filter, "filter"):
raise TypeError(
"filter argument should be ImageFilter.Filter instance or class"
)
multiband = isinstance(filter, ImageFilter.MultibandFilter)
if self.im.bands == 1 or multiband:
return self._new(filter.filter(self.im))
ims = []
for c in range(self.im.bands):
ims.append(self._new(filter.filter(self.im.getband(c))))
return merge(self.mode, ims)
def getbands(self):
"""
Returns a tuple containing the name of each band in this image.
For example, ``getbands`` on an RGB image returns ("R", "G", "B").
:returns: A tuple containing band names.
:rtype: tuple
"""
return ImageMode.getmode(self.mode).bands
def getbbox(self):
"""
Calculates the bounding box of the non-zero regions in the
image.
:returns: The bounding box is returned as a 4-tuple defining the
left, upper, right, and lower pixel coordinate. See
:ref:`coordinate-system`. If the image is completely empty, this
method returns None.
"""
self.load()
return self.im.getbbox()
def getcolors(self, maxcolors=256):
"""
Returns a list of colors used in this image.
The colors will be in the image's mode. For example, an RGB image will
return a tuple of (red, green, blue) color values, and a P image will
return the index of the color in the palette.
:param maxcolors: Maximum number of colors. If this number is
exceeded, this method returns None. The default limit is
256 colors.
:returns: An unsorted list of (count, pixel) values.
"""
self.load()
if self.mode in ("1", "L", "P"):
h = self.im.histogram()
out = []
for i in range(256):
if h[i]:
out.append((h[i], i))
if len(out) > maxcolors:
return None
return out
return self.im.getcolors(maxcolors)
def getdata(self, band=None):
"""
Returns the contents of this image as a sequence object
containing pixel values. The sequence object is flattened, so
that values for line one follow directly after the values of
line zero, and so on.
Note that the sequence object returned by this method is an
internal PIL data type, which only supports certain sequence
operations. To convert it to an ordinary sequence (e.g. for
printing), use ``list(im.getdata())``.
:param band: What band to return. The default is to return
all bands. To return a single band, pass in the index
value (e.g. 0 to get the "R" band from an "RGB" image).
:returns: A sequence-like object.
"""
self.load()
if band is not None:
return self.im.getband(band)
return self.im # could be abused
def getextrema(self):
"""
Gets the the minimum and maximum pixel values for each band in
the image.
:returns: For a single-band image, a 2-tuple containing the
minimum and maximum pixel value. For a multi-band image,
a tuple containing one 2-tuple for each band.
"""
self.load()
if self.im.bands > 1:
extrema = []
for i in range(self.im.bands):
extrema.append(self.im.getband(i).getextrema())
return tuple(extrema)
return self.im.getextrema()
def _getxmp(self, xmp_tags):
def get_name(tag):
return tag.split("}")[1]
def get_value(element):
value = {get_name(k): v for k, v in element.attrib.items()}
children = list(element)
if children:
for child in children:
name = get_name(child.tag)
child_value = get_value(child)
if name in value:
if not isinstance(value[name], list):
value[name] = [value[name]]
value[name].append(child_value)
else:
value[name] = child_value
elif value:
if element.text:
value["text"] = element.text
else:
return element.text
return value
if ElementTree is None:
warnings.warn("XMP data cannot be read without defusedxml dependency")
return {}
else:
root = ElementTree.fromstring(xmp_tags)
return {get_name(root.tag): get_value(root)}
def getexif(self):
if self._exif is None:
self._exif = Exif()
exif_info = self.info.get("exif")
if exif_info is None:
if "Raw profile type exif" in self.info:
exif_info = bytes.fromhex(
"".join(self.info["Raw profile type exif"].split("\n")[3:])
)
elif hasattr(self, "tag_v2"):
self._exif.endian = self.tag_v2._endian
self._exif.load_from_fp(self.fp, self.tag_v2._offset)
if exif_info is not None:
self._exif.load(exif_info)
# XMP tags
if 0x0112 not in self._exif:
xmp_tags = self.info.get("XML:com.adobe.xmp")
if xmp_tags:
match = re.search(r'tiff:Orientation="([0-9])"', xmp_tags)
if match:
self._exif[0x0112] = int(match[1])
return self._exif
def getim(self):
"""
Returns a capsule that points to the internal image memory.
:returns: A capsule object.
"""
self.load()
return self.im.ptr
def getpalette(self):
"""
Returns the image palette as a list.
:returns: A list of color values [r, g, b, ...], or None if the
image has no palette.
"""
self.load()
try:
return list(self.im.getpalette())
except ValueError:
return None # no palette
def getpixel(self, xy):
"""
Returns the pixel value at a given position.
:param xy: The coordinate, given as (x, y). See
:ref:`coordinate-system`.
:returns: The pixel value. If the image is a multi-layer image,
this method returns a tuple.
"""
self.load()
if self.pyaccess:
return self.pyaccess.getpixel(xy)
return self.im.getpixel(xy)
def getprojection(self):
"""
Get projection to x and y axes
:returns: Two sequences, indicating where there are non-zero
pixels along the X-axis and the Y-axis, respectively.
"""
self.load()
x, y = self.im.getprojection()
return list(x), list(y)
def histogram(self, mask=None, extrema=None):
"""
Returns a histogram for the image. The histogram is returned as
a list of pixel counts, one for each pixel value in the source
image. If the image has more than one band, the histograms for
all bands are concatenated (for example, the histogram for an
"RGB" image contains 768 values).
A bilevel image (mode "1") is treated as a greyscale ("L") image
by this method.
If a mask is provided, the method returns a histogram for those
parts of the image where the mask image is non-zero. The mask
image must have the same size as the image, and be either a
bi-level image (mode "1") or a greyscale image ("L").
:param mask: An optional mask.
:param extrema: An optional tuple of manually-specified extrema.
:returns: A list containing pixel counts.
"""
self.load()
if mask:
mask.load()
return self.im.histogram((0, 0), mask.im)
if self.mode in ("I", "F"):
if extrema is None:
extrema = self.getextrema()
return self.im.histogram(extrema)
return self.im.histogram()
def entropy(self, mask=None, extrema=None):
"""
Calculates and returns the entropy for the image.
A bilevel image (mode "1") is treated as a greyscale ("L")
image by this method.
If a mask is provided, the method employs the histogram for
those parts of the image where the mask image is non-zero.
The mask image must have the same size as the image, and be
either a bi-level image (mode "1") or a greyscale image ("L").
:param mask: An optional mask.
:param extrema: An optional tuple of manually-specified extrema.
:returns: A float value representing the image entropy
"""
self.load()
if mask:
mask.load()
return self.im.entropy((0, 0), mask.im)
if self.mode in ("I", "F"):
if extrema is None:
extrema = self.getextrema()
return self.im.entropy(extrema)
return self.im.entropy()
def paste(self, im, box=None, mask=None):
"""
Pastes another image into this image. The box argument is either
a 2-tuple giving the upper left corner, a 4-tuple defining the
left, upper, right, and lower pixel coordinate, or None (same as
(0, 0)). See :ref:`coordinate-system`. If a 4-tuple is given, the size
of the pasted image must match the size of the region.
If the modes don't match, the pasted image is converted to the mode of
this image (see the :py:meth:`~PIL.Image.Image.convert` method for
details).
Instead of an image, the source can be a integer or tuple
containing pixel values. The method then fills the region
with the given color. When creating RGB images, you can
also use color strings as supported by the ImageColor module.
If a mask is given, this method updates only the regions
indicated by the mask. You can use either "1", "L" or "RGBA"
images (in the latter case, the alpha band is used as mask).
Where the mask is 255, the given image is copied as is. Where
the mask is 0, the current value is preserved. Intermediate
values will mix the two images together, including their alpha
channels if they have them.
See :py:meth:`~PIL.Image.Image.alpha_composite` if you want to
combine images with respect to their alpha channels.
:param im: Source image or pixel value (integer or tuple).
:param box: An optional 4-tuple giving the region to paste into.
If a 2-tuple is used instead, it's treated as the upper left
corner. If omitted or None, the source is pasted into the
upper left corner.
If an image is given as the second argument and there is no
third, the box defaults to (0, 0), and the second argument
is interpreted as a mask image.
:param mask: An optional mask image.
"""
if isImageType(box) and mask is None:
# abbreviated paste(im, mask) syntax
mask = box
box = None
if box is None:
box = (0, 0)
if len(box) == 2:
# upper left corner given; get size from image or mask
if isImageType(im):
size = im.size
elif isImageType(mask):
size = mask.size
else:
# FIXME: use self.size here?
raise ValueError("cannot determine region size; use 4-item box")
box += (box[0] + size[0], box[1] + size[1])
if isinstance(im, str):
from . import ImageColor
im = ImageColor.getcolor(im, self.mode)
elif isImageType(im):
im.load()
if self.mode != im.mode:
if self.mode != "RGB" or im.mode not in ("RGBA", "RGBa"):
# should use an adapter for this!
im = im.convert(self.mode)
im = im.im
self._ensure_mutable()
if mask:
mask.load()
self.im.paste(im, box, mask.im)
else:
self.im.paste(im, box)
def alpha_composite(self, im, dest=(0, 0), source=(0, 0)):
"""'In-place' analog of Image.alpha_composite. Composites an image
onto this image.
:param im: image to composite over this one
:param dest: Optional 2 tuple (left, top) specifying the upper
left corner in this (destination) image.
:param source: Optional 2 (left, top) tuple for the upper left
corner in the overlay source image, or 4 tuple (left, top, right,
bottom) for the bounds of the source rectangle
Performance Note: Not currently implemented in-place in the core layer.
"""
if not isinstance(source, (list, tuple)):
raise ValueError("Source must be a tuple")
if not isinstance(dest, (list, tuple)):
raise ValueError("Destination must be a tuple")
if not len(source) in (2, 4):
raise ValueError("Source must be a 2 or 4-tuple")
if not len(dest) == 2:
raise ValueError("Destination must be a 2-tuple")
if min(source) < 0:
raise ValueError("Source must be non-negative")
if len(source) == 2:
source = source + im.size
# over image, crop if it's not the whole thing.
if source == (0, 0) + im.size:
overlay = im
else:
overlay = im.crop(source)
# target for the paste
box = dest + (dest[0] + overlay.width, dest[1] + overlay.height)
# destination image. don't copy if we're using the whole image.
if box == (0, 0) + self.size:
background = self
else:
background = self.crop(box)
result = alpha_composite(background, overlay)
self.paste(result, box)
def point(self, lut, mode=None):
"""
Maps this image through a lookup table or function.
:param lut: A lookup table, containing 256 (or 65536 if
self.mode=="I" and mode == "L") values per band in the
image. A function can be used instead, it should take a
single argument. The function is called once for each
possible pixel value, and the resulting table is applied to
all bands of the image.
It may also be an :py:class:`~PIL.Image.ImagePointHandler`
object::
class Example(Image.ImagePointHandler):
def point(self, data):
# Return result
:param mode: Output mode (default is same as input). In the
current version, this can only be used if the source image
has mode "L" or "P", and the output has mode "1" or the
source image mode is "I" and the output mode is "L".
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
if isinstance(lut, ImagePointHandler):
return lut.point(self)
if callable(lut):
# if it isn't a list, it should be a function
if self.mode in ("I", "I;16", "F"):
# check if the function can be used with point_transform
# UNDONE wiredfool -- I think this prevents us from ever doing
# a gamma function point transform on > 8bit images.
scale, offset = _getscaleoffset(lut)
return self._new(self.im.point_transform(scale, offset))
# for other modes, convert the function to a table
lut = [lut(i) for i in range(256)] * self.im.bands
if self.mode == "F":
# FIXME: _imaging returns a confusing error message for this case
raise ValueError("point operation not supported for this mode")
return self._new(self.im.point(lut, mode))
def putalpha(self, alpha):
"""
Adds or replaces the alpha layer in this image. If the image
does not have an alpha layer, it's converted to "LA" or "RGBA".
The new layer must be either "L" or "1".
:param alpha: The new alpha layer. This can either be an "L" or "1"
image having the same size as this image, or an integer or
other color value.
"""
self._ensure_mutable()
if self.mode not in ("LA", "PA", "RGBA"):
# attempt to promote self to a matching alpha mode
try:
mode = getmodebase(self.mode) + "A"
try:
self.im.setmode(mode)
except (AttributeError, ValueError) as e:
# do things the hard way
im = self.im.convert(mode)
if im.mode not in ("LA", "PA", "RGBA"):
raise ValueError from e # sanity check
self.im = im
self.pyaccess = None
self.mode = self.im.mode
except KeyError as e:
raise ValueError("illegal image mode") from e
if self.mode in ("LA", "PA"):
band = 1
else:
band = 3
if isImageType(alpha):
# alpha layer
if alpha.mode not in ("1", "L"):
raise ValueError("illegal image mode")
alpha.load()
if alpha.mode == "1":
alpha = alpha.convert("L")
else:
# constant alpha
try:
self.im.fillband(band, alpha)
except (AttributeError, ValueError):
# do things the hard way
alpha = new("L", self.size, alpha)
else:
return
self.im.putband(alpha.im, band)
def putdata(self, data, scale=1.0, offset=0.0):
"""
Copies pixel data from a flattened sequence object into the image. The
values should start at the upper left corner (0, 0), continue to the
end of the line, followed directly by the first value of the second
line, and so on. Data will be read until either the image or the
sequence ends. The scale and offset values are used to adjust the
sequence values: **pixel = value*scale + offset**.
:param data: A flattened sequence object.
:param scale: An optional scale value. The default is 1.0.
:param offset: An optional offset value. The default is 0.0.
"""
self._ensure_mutable()
self.im.putdata(data, scale, offset)
def putpalette(self, data, rawmode="RGB"):
"""
Attaches a palette to this image. The image must be a "P", "PA", "L"
or "LA" image.
The palette sequence must contain at most 256 colors, made up of one
integer value for each channel in the raw mode.
For example, if the raw mode is "RGB", then it can contain at most 768
values, made up of red, green and blue values for the corresponding pixel
index in the 256 colors.
If the raw mode is "RGBA", then it can contain at most 1024 values,
containing red, green, blue and alpha values.
Alternatively, an 8-bit string may be used instead of an integer sequence.
:param data: A palette sequence (either a list or a string).
:param rawmode: The raw mode of the palette. Either "RGB", "RGBA", or a
mode that can be transformed to "RGB" (e.g. "R", "BGR;15", "RGBA;L").
"""
from . import ImagePalette
if self.mode not in ("L", "LA", "P", "PA"):
raise ValueError("illegal image mode")
if isinstance(data, ImagePalette.ImagePalette):
palette = ImagePalette.raw(data.rawmode, data.palette)
else:
if not isinstance(data, bytes):
data = bytes(data)
palette = ImagePalette.raw(rawmode, data)
self.mode = "PA" if "A" in self.mode else "P"
self.palette = palette
self.palette.mode = "RGB"
self.load() # install new palette
def putpixel(self, xy, value):
"""
Modifies the pixel at the given position. The color is given as
a single numerical value for single-band images, and a tuple for
multi-band images. In addition to this, RGB and RGBA tuples are
accepted for P images.
Note that this method is relatively slow. For more extensive changes,
use :py:meth:`~PIL.Image.Image.paste` or the :py:mod:`~PIL.ImageDraw`
module instead.
See:
* :py:meth:`~PIL.Image.Image.paste`
* :py:meth:`~PIL.Image.Image.putdata`
* :py:mod:`~PIL.ImageDraw`
:param xy: The pixel coordinate, given as (x, y). See
:ref:`coordinate-system`.
:param value: The pixel value.
"""
if self.readonly:
self._copy()
self.load()
if self.pyaccess:
return self.pyaccess.putpixel(xy, value)
if (
self.mode == "P"
and isinstance(value, (list, tuple))
and len(value) in [3, 4]
):
# RGB or RGBA value for a P image
value = self.palette.getcolor(value, self)
return self.im.putpixel(xy, value)
def remap_palette(self, dest_map, source_palette=None):
"""
Rewrites the image to reorder the palette.
:param dest_map: A list of indexes into the original palette.
e.g. ``[1,0]`` would swap a two item palette, and ``list(range(256))``
is the identity transform.
:param source_palette: Bytes or None.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
from . import ImagePalette
if self.mode not in ("L", "P"):
raise ValueError("illegal image mode")
if source_palette is None:
if self.mode == "P":
self.load()
source_palette = self.im.getpalette("RGB")[:768]
else: # L-mode
source_palette = bytearray(i // 3 for i in range(768))
palette_bytes = b""
new_positions = [0] * 256
# pick only the used colors from the palette
for i, oldPosition in enumerate(dest_map):
palette_bytes += source_palette[oldPosition * 3 : oldPosition * 3 + 3]
new_positions[oldPosition] = i
# replace the palette color id of all pixel with the new id
# Palette images are [0..255], mapped through a 1 or 3
# byte/color map. We need to remap the whole image
# from palette 1 to palette 2. New_positions is
# an array of indexes into palette 1. Palette 2 is
# palette 1 with any holes removed.
# We're going to leverage the convert mechanism to use the
# C code to remap the image from palette 1 to palette 2,
# by forcing the source image into 'L' mode and adding a
# mapping 'L' mode palette, then converting back to 'L'
# sans palette thus converting the image bytes, then
# assigning the optimized RGB palette.
# perf reference, 9500x4000 gif, w/~135 colors
# 14 sec prepatch, 1 sec postpatch with optimization forced.
mapping_palette = bytearray(new_positions)
m_im = self.copy()
m_im.mode = "P"
m_im.palette = ImagePalette.ImagePalette("RGB", palette=mapping_palette * 3)
# possibly set palette dirty, then
# m_im.putpalette(mapping_palette, 'L') # converts to 'P'
# or just force it.
# UNDONE -- this is part of the general issue with palettes
m_im.im.putpalette("RGB;L", m_im.palette.tobytes())
m_im = m_im.convert("L")
# Internally, we require 768 bytes for a palette.
new_palette_bytes = palette_bytes + (768 - len(palette_bytes)) * b"\x00"
m_im.putpalette(new_palette_bytes)
m_im.palette = ImagePalette.ImagePalette("RGB", palette=palette_bytes)
return m_im
def _get_safe_box(self, size, resample, box):
"""Expands the box so it includes adjacent pixels
that may be used by resampling with the given resampling filter.
"""
filter_support = _filters_support[resample] - 0.5
scale_x = (box[2] - box[0]) / size[0]
scale_y = (box[3] - box[1]) / size[1]
support_x = filter_support * scale_x
support_y = filter_support * scale_y
return (
max(0, int(box[0] - support_x)),
max(0, int(box[1] - support_y)),
min(self.size[0], math.ceil(box[2] + support_x)),
min(self.size[1], math.ceil(box[3] + support_y)),
)
def resize(self, size, resample=None, box=None, reducing_gap=None):
"""
Returns a resized copy of this image.
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
:param resample: An optional resampling filter. This can be
one of :py:data:`PIL.Image.NEAREST`, :py:data:`PIL.Image.BOX`,
:py:data:`PIL.Image.BILINEAR`, :py:data:`PIL.Image.HAMMING`,
:py:data:`PIL.Image.BICUBIC` or :py:data:`PIL.Image.LANCZOS`.
If the image has mode "1" or "P", it is always set to
:py:data:`PIL.Image.NEAREST`.
If the image mode specifies a number of bits, such as "I;16", then the
default filter is :py:data:`PIL.Image.NEAREST`.
Otherwise, the default filter is :py:data:`PIL.Image.BICUBIC`.
See: :ref:`concept-filters`.
:param box: An optional 4-tuple of floats providing
the source image region to be scaled.
The values must be within (0, 0, width, height) rectangle.
If omitted or None, the entire source is used.
:param reducing_gap: Apply optimization by resizing the image
in two steps. First, reducing the image by integer times
using :py:meth:`~PIL.Image.Image.reduce`.
Second, resizing using regular resampling. The last step
changes size no less than by ``reducing_gap`` times.
``reducing_gap`` may be None (no first step is performed)
or should be greater than 1.0. The bigger ``reducing_gap``,
the closer the result to the fair resampling.
The smaller ``reducing_gap``, the faster resizing.
With ``reducing_gap`` greater or equal to 3.0, the result is
indistinguishable from fair resampling in most cases.
The default value is None (no optimization).
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if resample is None:
type_special = ";" in self.mode
resample = NEAREST if type_special else BICUBIC
elif resample not in (NEAREST, BILINEAR, BICUBIC, LANCZOS, BOX, HAMMING):
message = f"Unknown resampling filter ({resample})."
filters = [
f"{filter[1]} ({filter[0]})"
for filter in (
(NEAREST, "Image.NEAREST"),
(LANCZOS, "Image.LANCZOS"),
(BILINEAR, "Image.BILINEAR"),
(BICUBIC, "Image.BICUBIC"),
(BOX, "Image.BOX"),
(HAMMING, "Image.HAMMING"),
)
]
raise ValueError(
message + " Use " + ", ".join(filters[:-1]) + " or " + filters[-1]
)
if reducing_gap is not None and reducing_gap < 1.0:
raise ValueError("reducing_gap must be 1.0 or greater")
size = tuple(size)
if box is None:
box = (0, 0) + self.size
else:
box = tuple(box)
if self.size == size and box == (0, 0) + self.size:
return self.copy()
if self.mode in ("1", "P"):
resample = NEAREST
if self.mode in ["LA", "RGBA"] and resample != NEAREST:
im = self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode])
im = im.resize(size, resample, box)
return im.convert(self.mode)
self.load()
if reducing_gap is not None and resample != NEAREST:
factor_x = int((box[2] - box[0]) / size[0] / reducing_gap) or 1
factor_y = int((box[3] - box[1]) / size[1] / reducing_gap) or 1
if factor_x > 1 or factor_y > 1:
reduce_box = self._get_safe_box(size, resample, box)
factor = (factor_x, factor_y)
if callable(self.reduce):
self = self.reduce(factor, box=reduce_box)
else:
self = Image.reduce(self, factor, box=reduce_box)
box = (
(box[0] - reduce_box[0]) / factor_x,
(box[1] - reduce_box[1]) / factor_y,
(box[2] - reduce_box[0]) / factor_x,
(box[3] - reduce_box[1]) / factor_y,
)
return self._new(self.im.resize(size, resample, box))
def reduce(self, factor, box=None):
"""
Returns a copy of the image reduced ``factor`` times.
If the size of the image is not dividable by ``factor``,
the resulting size will be rounded up.
:param factor: A greater than 0 integer or tuple of two integers
for width and height separately.
:param box: An optional 4-tuple of ints providing
the source image region to be reduced.
The values must be within ``(0, 0, width, height)`` rectangle.
If omitted or ``None``, the entire source is used.
"""
if not isinstance(factor, (list, tuple)):
factor = (factor, factor)
if box is None:
box = (0, 0) + self.size
else:
box = tuple(box)
if factor == (1, 1) and box == (0, 0) + self.size:
return self.copy()
if self.mode in ["LA", "RGBA"]:
im = self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode])
im = im.reduce(factor, box)
return im.convert(self.mode)
self.load()
return self._new(self.im.reduce(factor, box))
def rotate(
self,
angle,
resample=NEAREST,
expand=0,
center=None,
translate=None,
fillcolor=None,
):
"""
Returns a rotated copy of this image. This method returns a
copy of this image, rotated the given number of degrees counter
clockwise around its centre.
:param angle: In degrees counter clockwise.
:param resample: An optional resampling filter. This can be
one of :py:data:`PIL.Image.NEAREST` (use nearest neighbour),
:py:data:`PIL.Image.BILINEAR` (linear interpolation in a 2x2
environment), or :py:data:`PIL.Image.BICUBIC`
(cubic spline interpolation in a 4x4 environment).
If omitted, or if the image has mode "1" or "P", it is
set to :py:data:`PIL.Image.NEAREST`. See :ref:`concept-filters`.
:param expand: Optional expansion flag. If true, expands the output
image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the
input image. Note that the expand flag assumes rotation around
the center and no translation.
:param center: Optional center of rotation (a 2-tuple). Origin is
the upper left corner. Default is the center of the image.
:param translate: An optional post-rotate translation (a 2-tuple).
:param fillcolor: An optional color for area outside the rotated image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
angle = angle % 360.0
# Fast paths regardless of filter, as long as we're not
# translating or changing the center.
if not (center or translate):
if angle == 0:
return self.copy()
if angle == 180:
return self.transpose(ROTATE_180)
if angle in (90, 270) and (expand or self.width == self.height):
return self.transpose(ROTATE_90 if angle == 90 else ROTATE_270)
# Calculate the affine matrix. Note that this is the reverse
# transformation (from destination image to source) because we
# want to interpolate the (discrete) destination pixel from
# the local area around the (floating) source pixel.
# The matrix we actually want (note that it operates from the right):
# (1, 0, tx) (1, 0, cx) ( cos a, sin a, 0) (1, 0, -cx)
# (0, 1, ty) * (0, 1, cy) * (-sin a, cos a, 0) * (0, 1, -cy)
# (0, 0, 1) (0, 0, 1) ( 0, 0, 1) (0, 0, 1)
# The reverse matrix is thus:
# (1, 0, cx) ( cos -a, sin -a, 0) (1, 0, -cx) (1, 0, -tx)
# (0, 1, cy) * (-sin -a, cos -a, 0) * (0, 1, -cy) * (0, 1, -ty)
# (0, 0, 1) ( 0, 0, 1) (0, 0, 1) (0, 0, 1)
# In any case, the final translation may be updated at the end to
# compensate for the expand flag.
w, h = self.size
if translate is None:
post_trans = (0, 0)
else:
post_trans = translate
if center is None:
# FIXME These should be rounded to ints?
rotn_center = (w / 2.0, h / 2.0)
else:
rotn_center = center
angle = -math.radians(angle)
matrix = [
round(math.cos(angle), 15),
round(math.sin(angle), 15),
0.0,
round(-math.sin(angle), 15),
round(math.cos(angle), 15),
0.0,
]
def transform(x, y, matrix):
(a, b, c, d, e, f) = matrix
return a * x + b * y + c, d * x + e * y + f
matrix[2], matrix[5] = transform(
-rotn_center[0] - post_trans[0], -rotn_center[1] - post_trans[1], matrix
)
matrix[2] += rotn_center[0]
matrix[5] += rotn_center[1]
if expand:
# calculate output size
xx = []
yy = []
for x, y in ((0, 0), (w, 0), (w, h), (0, h)):
x, y = transform(x, y, matrix)
xx.append(x)
yy.append(y)
nw = math.ceil(max(xx)) - math.floor(min(xx))
nh = math.ceil(max(yy)) - math.floor(min(yy))
# We multiply a translation matrix from the right. Because of its
# special form, this is the same as taking the image of the
# translation vector as new translation vector.
matrix[2], matrix[5] = transform(-(nw - w) / 2.0, -(nh - h) / 2.0, matrix)
w, h = nw, nh
return self.transform((w, h), AFFINE, matrix, resample, fillcolor=fillcolor)
def save(self, fp, format=None, **params):
"""
Saves this image under the given filename. If no format is
specified, the format to use is determined from the filename
extension, if possible.
Keyword options can be used to provide additional instructions
to the writer. If a writer doesn't recognise an option, it is
silently ignored. The available options are described in the
:doc:`image format documentation
<../handbook/image-file-formats>` for each writer.
You can use a file object instead of a filename. In this case,
you must always specify the format. The file object must
implement the ``seek``, ``tell``, and ``write``
methods, and be opened in binary mode.
:param fp: A filename (string), pathlib.Path object or file object.
:param format: Optional format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
:param params: Extra parameters to the image writer.
:returns: None
:exception ValueError: If the output format could not be determined
from the file name. Use the format option to solve this.
:exception OSError: If the file could not be written. The file
may have been created, and may contain partial data.
"""
filename = ""
open_fp = False
if isinstance(fp, Path):
filename = str(fp)
open_fp = True
elif isPath(fp):
filename = fp
open_fp = True
elif fp == sys.stdout:
try:
fp = sys.stdout.buffer
except AttributeError:
pass
if not filename and hasattr(fp, "name") and isPath(fp.name):
# only set the name for metadata purposes
filename = fp.name
# may mutate self!
self._ensure_mutable()
save_all = params.pop("save_all", False)
self.encoderinfo = params
self.encoderconfig = ()
preinit()
ext = os.path.splitext(filename)[1].lower()
if not format:
if ext not in EXTENSION:
init()
try:
format = EXTENSION[ext]
except KeyError as e:
raise ValueError(f"unknown file extension: {ext}") from e
if format.upper() not in SAVE:
init()
if save_all:
save_handler = SAVE_ALL[format.upper()]
else:
save_handler = SAVE[format.upper()]
if open_fp:
if params.get("append", False):
# Open also for reading ("+"), because TIFF save_all
# writer needs to go back and edit the written data.
fp = builtins.open(filename, "r+b")
else:
fp = builtins.open(filename, "w+b")
try:
save_handler(self, fp, filename)
finally:
# do what we can to clean up
if open_fp:
fp.close()
def seek(self, frame):
"""
Seeks to the given frame in this sequence file. If you seek
beyond the end of the sequence, the method raises an
``EOFError`` exception. When a sequence file is opened, the
library automatically seeks to frame 0.
See :py:meth:`~PIL.Image.Image.tell`.
If defined, :attr:`~PIL.Image.Image.n_frames` refers to the
number of available frames.
:param frame: Frame number, starting at 0.
:exception EOFError: If the call attempts to seek beyond the end
of the sequence.
"""
# overridden by file handlers
if frame != 0:
raise EOFError
def show(self, title=None):
"""
Displays this image. This method is mainly intended for debugging purposes.
This method calls :py:func:`PIL.ImageShow.show` internally. You can use
:py:func:`PIL.ImageShow.register` to override its default behaviour.
The image is first saved to a temporary file. By default, it will be in
PNG format.
On Unix, the image is then opened using the **display**, **eog** or
**xv** utility, depending on which one can be found.
On macOS, the image is opened with the native Preview application.
On Windows, the image is opened with the standard PNG display utility.
:param title: Optional title to use for the image window, where possible.
"""
_show(self, title=title)
def split(self):
"""
Split this image into individual bands. This method returns a
tuple of individual image bands from an image. For example,
splitting an "RGB" image creates three new images each
containing a copy of one of the original bands (red, green,
blue).
If you need only one band, :py:meth:`~PIL.Image.Image.getchannel`
method can be more convenient and faster.
:returns: A tuple containing bands.
"""
self.load()
if self.im.bands == 1:
ims = [self.copy()]
else:
ims = map(self._new, self.im.split())
return tuple(ims)
def getchannel(self, channel):
"""
Returns an image containing a single channel of the source image.
:param channel: What channel to return. Could be index
(0 for "R" channel of "RGB") or channel name
("A" for alpha channel of "RGBA").
:returns: An image in "L" mode.
.. versionadded:: 4.3.0
"""
self.load()
if isinstance(channel, str):
try:
channel = self.getbands().index(channel)
except ValueError as e:
raise ValueError(f'The image has no channel "{channel}"') from e
return self._new(self.im.getband(channel))
def tell(self):
"""
Returns the current frame number. See :py:meth:`~PIL.Image.Image.seek`.
If defined, :attr:`~PIL.Image.Image.n_frames` refers to the
number of available frames.
:returns: Frame number, starting with 0.
"""
return 0
def thumbnail(self, size, resample=BICUBIC, reducing_gap=2.0):
"""
Make this image into a thumbnail. This method modifies the
image to contain a thumbnail version of itself, no larger than
the given size. This method calculates an appropriate thumbnail
size to preserve the aspect of the image, calls the
:py:meth:`~PIL.Image.Image.draft` method to configure the file reader
(where applicable), and finally resizes the image.
Note that this function modifies the :py:class:`~PIL.Image.Image`
object in place. If you need to use the full resolution image as well,
apply this method to a :py:meth:`~PIL.Image.Image.copy` of the original
image.
:param size: Requested size.
:param resample: Optional resampling filter. This can be one
of :py:data:`PIL.Image.NEAREST`, :py:data:`PIL.Image.BOX`,
:py:data:`PIL.Image.BILINEAR`, :py:data:`PIL.Image.HAMMING`,
:py:data:`PIL.Image.BICUBIC` or :py:data:`PIL.Image.LANCZOS`.
If omitted, it defaults to :py:data:`PIL.Image.BICUBIC`.
(was :py:data:`PIL.Image.NEAREST` prior to version 2.5.0).
See: :ref:`concept-filters`.
:param reducing_gap: Apply optimization by resizing the image
in two steps. First, reducing the image by integer times
using :py:meth:`~PIL.Image.Image.reduce` or
:py:meth:`~PIL.Image.Image.draft` for JPEG images.
Second, resizing using regular resampling. The last step
changes size no less than by ``reducing_gap`` times.
``reducing_gap`` may be None (no first step is performed)
or should be greater than 1.0. The bigger ``reducing_gap``,
the closer the result to the fair resampling.
The smaller ``reducing_gap``, the faster resizing.
With ``reducing_gap`` greater or equal to 3.0, the result is
indistinguishable from fair resampling in most cases.
The default value is 2.0 (very close to fair resampling
while still being faster in many cases).
:returns: None
"""
x, y = map(math.floor, size)
if x >= self.width and y >= self.height:
return
def round_aspect(number, key):
return max(min(math.floor(number), math.ceil(number), key=key), 1)
# preserve aspect ratio
aspect = self.width / self.height
if x / y >= aspect:
x = round_aspect(y * aspect, key=lambda n: abs(aspect - n / y))
else:
y = round_aspect(
x / aspect, key=lambda n: 0 if n == 0 else abs(aspect - x / n)
)
size = (x, y)
box = None
if reducing_gap is not None:
res = self.draft(None, (size[0] * reducing_gap, size[1] * reducing_gap))
if res is not None:
box = res[1]
if self.size != size:
im = self.resize(size, resample, box=box, reducing_gap=reducing_gap)
self.im = im.im
self._size = size
self.mode = self.im.mode
self.readonly = 0
self.pyaccess = None
# FIXME: the different transform methods need further explanation
# instead of bloating the method docs, add a separate chapter.
def transform(
self, size, method, data=None, resample=NEAREST, fill=1, fillcolor=None
):
"""
Transforms this image. This method creates a new image with the
given size, and the same mode as the original, and copies data
to the new image using the given transform.
:param size: The output size.
:param method: The transformation method. This is one of
:py:data:`PIL.Image.EXTENT` (cut out a rectangular subregion),
:py:data:`PIL.Image.AFFINE` (affine transform),
:py:data:`PIL.Image.PERSPECTIVE` (perspective transform),
:py:data:`PIL.Image.QUAD` (map a quadrilateral to a rectangle), or
:py:data:`PIL.Image.MESH` (map a number of source quadrilaterals
in one operation).
It may also be an :py:class:`~PIL.Image.ImageTransformHandler`
object::
class Example(Image.ImageTransformHandler):
def transform(self, size, data, resample, fill=1):
# Return result
It may also be an object with a ``method.getdata`` method
that returns a tuple supplying new ``method`` and ``data`` values::
class Example:
def getdata(self):
method = Image.EXTENT
data = (0, 0, 100, 100)
return method, data
:param data: Extra data to the transformation method.
:param resample: Optional resampling filter. It can be one of
:py:data:`PIL.Image.NEAREST` (use nearest neighbour),
:py:data:`PIL.Image.BILINEAR` (linear interpolation in a 2x2
environment), or :py:data:`PIL.Image.BICUBIC` (cubic spline
interpolation in a 4x4 environment). If omitted, or if the image
has mode "1" or "P", it is set to :py:data:`PIL.Image.NEAREST`.
See: :ref:`concept-filters`.
:param fill: If ``method`` is an
:py:class:`~PIL.Image.ImageTransformHandler` object, this is one of
the arguments passed to it. Otherwise, it is unused.
:param fillcolor: Optional fill color for the area outside the
transform in the output image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if self.mode in ("LA", "RGBA") and resample != NEAREST:
return (
self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode])
.transform(size, method, data, resample, fill, fillcolor)
.convert(self.mode)
)
if isinstance(method, ImageTransformHandler):
return method.transform(size, self, resample=resample, fill=fill)
if hasattr(method, "getdata"):
# compatibility w. old-style transform objects
method, data = method.getdata()
if data is None:
raise ValueError("missing method data")
im = new(self.mode, size, fillcolor)
if self.mode == "P" and self.palette:
im.palette = self.palette.copy()
im.info = self.info.copy()
if method == MESH:
# list of quads
for box, quad in data:
im.__transformer(box, self, QUAD, quad, resample, fillcolor is None)
else:
im.__transformer(
(0, 0) + size, self, method, data, resample, fillcolor is None
)
return im
def __transformer(self, box, image, method, data, resample=NEAREST, fill=1):
w = box[2] - box[0]
h = box[3] - box[1]
if method == AFFINE:
data = data[0:6]
elif method == EXTENT:
# convert extent to an affine transform
x0, y0, x1, y1 = data
xs = (x1 - x0) / w
ys = (y1 - y0) / h
method = AFFINE
data = (xs, 0, x0, 0, ys, y0)
elif method == PERSPECTIVE:
data = data[0:8]
elif method == QUAD:
# quadrilateral warp. data specifies the four corners
# given as NW, SW, SE, and NE.
nw = data[0:2]
sw = data[2:4]
se = data[4:6]
ne = data[6:8]
x0, y0 = nw
As = 1.0 / w
At = 1.0 / h
data = (
x0,
(ne[0] - x0) * As,
(sw[0] - x0) * At,
(se[0] - sw[0] - ne[0] + x0) * As * At,
y0,
(ne[1] - y0) * As,
(sw[1] - y0) * At,
(se[1] - sw[1] - ne[1] + y0) * As * At,
)
else:
raise ValueError("unknown transformation method")
if resample not in (NEAREST, BILINEAR, BICUBIC):
if resample in (BOX, HAMMING, LANCZOS):
message = {
BOX: "Image.BOX",
HAMMING: "Image.HAMMING",
LANCZOS: "Image.LANCZOS/Image.ANTIALIAS",
}[resample] + f" ({resample}) cannot be used."
else:
message = f"Unknown resampling filter ({resample})."
filters = [
f"{filter[1]} ({filter[0]})"
for filter in (
(NEAREST, "Image.NEAREST"),
(BILINEAR, "Image.BILINEAR"),
(BICUBIC, "Image.BICUBIC"),
)
]
raise ValueError(
message + " Use " + ", ".join(filters[:-1]) + " or " + filters[-1]
)
image.load()
self.load()
if image.mode in ("1", "P"):
resample = NEAREST
self.im.transform2(box, image.im, method, data, resample, fill)
def transpose(self, method):
"""
Transpose image (flip or rotate in 90 degree steps)
:param method: One of :py:data:`PIL.Image.FLIP_LEFT_RIGHT`,
:py:data:`PIL.Image.FLIP_TOP_BOTTOM`, :py:data:`PIL.Image.ROTATE_90`,
:py:data:`PIL.Image.ROTATE_180`, :py:data:`PIL.Image.ROTATE_270`,
:py:data:`PIL.Image.TRANSPOSE` or :py:data:`PIL.Image.TRANSVERSE`.
:returns: Returns a flipped or rotated copy of this image.
"""
self.load()
return self._new(self.im.transpose(method))
def effect_spread(self, distance):
"""
Randomly spread pixels in an image.
:param distance: Distance to spread pixels.
"""
self.load()
return self._new(self.im.effect_spread(distance))
def toqimage(self):
"""Returns a QImage copy of this image"""
from . import ImageQt
if not ImageQt.qt_is_installed:
raise ImportError("Qt bindings are not installed")
return ImageQt.toqimage(self)
def toqpixmap(self):
"""Returns a QPixmap copy of this image"""
from . import ImageQt
if not ImageQt.qt_is_installed:
raise ImportError("Qt bindings are not installed")
return ImageQt.toqpixmap(self)
def build_prototype_image():
image = Image.new("L", (1, len(_Palm8BitColormapValues)))
image.putdata(list(range(len(_Palm8BitColormapValues))))
palettedata = ()
for colormapValue in _Palm8BitColormapValues:
palettedata += colormapValue
palettedata += (0, 0, 0) * (256 - len(_Palm8BitColormapValues))
image.putpalette(palettedata)
return image | null |
167,557 | from . import Image, ImageFile
from ._binary import o8
from ._binary import o16be as o16b
_FLAGS = {"custom-colormap": 0x4000, "is-compressed": 0x8000, "has-transparent": 0x2000}
_COMPRESSION_TYPES = {"none": 0xFF, "rle": 0x01, "scanline": 0x00}
class ImageFile(Image.Image):
"""Base class for image file format handlers."""
def __init__(self, fp=None, filename=None):
super().__init__()
self._min_frame = 0
self.custom_mimetype = None
self.tile = None
""" A list of tile descriptors, or ``None`` """
self.readonly = 1 # until we know better
self.decoderconfig = ()
self.decodermaxblock = MAXBLOCK
if isPath(fp):
# filename
self.fp = open(fp, "rb")
self.filename = fp
self._exclusive_fp = True
else:
# stream
self.fp = fp
self.filename = filename
# can be overridden
self._exclusive_fp = None
try:
try:
self._open()
except (
IndexError, # end of data
TypeError, # end of data (ord)
KeyError, # unsupported mode
EOFError, # got header but not the first frame
struct.error,
) as v:
raise SyntaxError(v) from v
if not self.mode or self.size[0] <= 0:
raise SyntaxError("not identified by this driver")
except BaseException:
# close the file only if we have opened it this constructor
if self._exclusive_fp:
self.fp.close()
raise
def get_format_mimetype(self):
if self.custom_mimetype:
return self.custom_mimetype
if self.format is not None:
return Image.MIME.get(self.format.upper())
def verify(self):
"""Check file integrity"""
# raise exception if something's wrong. must be called
# directly after open, and closes file when finished.
if self._exclusive_fp:
self.fp.close()
self.fp = None
def load(self):
"""Load image data based on tile list"""
if self.tile is None:
raise OSError("cannot load this image")
pixel = Image.Image.load(self)
if not self.tile:
return pixel
self.map = None
use_mmap = self.filename and len(self.tile) == 1
# As of pypy 2.1.0, memory mapping was failing here.
use_mmap = use_mmap and not hasattr(sys, "pypy_version_info")
readonly = 0
# look for read/seek overrides
try:
read = self.load_read
# don't use mmap if there are custom read/seek functions
use_mmap = False
except AttributeError:
read = self.fp.read
try:
seek = self.load_seek
use_mmap = False
except AttributeError:
seek = self.fp.seek
if use_mmap:
# try memory mapping
decoder_name, extents, offset, args = self.tile[0]
if (
decoder_name == "raw"
and len(args) >= 3
and args[0] == self.mode
and args[0] in Image._MAPMODES
):
try:
# use mmap, if possible
import mmap
with open(self.filename) as fp:
self.map = mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ)
self.im = Image.core.map_buffer(
self.map, self.size, decoder_name, offset, args
)
readonly = 1
# After trashing self.im,
# we might need to reload the palette data.
if self.palette:
self.palette.dirty = 1
except (AttributeError, OSError, ImportError):
self.map = None
self.load_prepare()
err_code = -3 # initialize to unknown error
if not self.map:
# sort tiles in file order
self.tile.sort(key=_tilesort)
try:
# FIXME: This is a hack to handle TIFF's JpegTables tag.
prefix = self.tile_prefix
except AttributeError:
prefix = b""
# Remove consecutive duplicates that only differ by their offset
self.tile = [
list(tiles)[-1]
for _, tiles in itertools.groupby(
self.tile, lambda tile: (tile[0], tile[1], tile[3])
)
]
for decoder_name, extents, offset, args in self.tile:
decoder = Image._getdecoder(
self.mode, decoder_name, args, self.decoderconfig
)
try:
seek(offset)
decoder.setimage(self.im, extents)
if decoder.pulls_fd:
decoder.setfd(self.fp)
status, err_code = decoder.decode(b"")
else:
b = prefix
while True:
try:
s = read(self.decodermaxblock)
except (IndexError, struct.error) as e:
# truncated png/gif
if LOAD_TRUNCATED_IMAGES:
break
else:
raise OSError("image file is truncated") from e
if not s: # truncated jpeg
if LOAD_TRUNCATED_IMAGES:
break
else:
raise OSError(
"image file is truncated "
f"({len(b)} bytes not processed)"
)
b = b + s
n, err_code = decoder.decode(b)
if n < 0:
break
b = b[n:]
finally:
# Need to cleanup here to prevent leaks
decoder.cleanup()
self.tile = []
self.readonly = readonly
self.load_end()
if self._exclusive_fp and self._close_exclusive_fp_after_loading:
self.fp.close()
self.fp = None
if not self.map and not LOAD_TRUNCATED_IMAGES and err_code < 0:
# still raised if decoder fails to return anything
raise_oserror(err_code)
return Image.Image.load(self)
def load_prepare(self):
# create image memory if necessary
if not self.im or self.im.mode != self.mode or self.im.size != self.size:
self.im = Image.core.new(self.mode, self.size)
# create palette (optional)
if self.mode == "P":
Image.Image.load(self)
def load_end(self):
# may be overridden
pass
# may be defined for contained formats
# def load_seek(self, pos):
# pass
# may be defined for blocked formats (e.g. PNG)
# def load_read(self, bytes):
# pass
def _seek_check(self, frame):
if (
frame < self._min_frame
# Only check upper limit on frames if additional seek operations
# are not required to do so
or (
not (hasattr(self, "_n_frames") and self._n_frames is None)
and frame >= self.n_frames + self._min_frame
)
):
raise EOFError("attempt to seek outside sequence")
return self.tell() != frame
def o8(i):
return bytes((i & 255,))
def _save(im, fp, filename):
if im.mode == "P":
# we assume this is a color Palm image with the standard colormap,
# unless the "info" dict has a "custom-colormap" field
rawmode = "P"
bpp = 8
version = 1
elif im.mode == "L":
if im.encoderinfo.get("bpp") in (1, 2, 4):
# this is 8-bit grayscale, so we shift it to get the high-order bits,
# and invert it because
# Palm does greyscale from white (0) to black (1)
bpp = im.encoderinfo["bpp"]
im = im.point(
lambda x, shift=8 - bpp, maxval=(1 << bpp) - 1: maxval - (x >> shift)
)
elif im.info.get("bpp") in (1, 2, 4):
# here we assume that even though the inherent mode is 8-bit grayscale,
# only the lower bpp bits are significant.
# We invert them to match the Palm.
bpp = im.info["bpp"]
im = im.point(lambda x, maxval=(1 << bpp) - 1: maxval - (x & maxval))
else:
raise OSError(f"cannot write mode {im.mode} as Palm")
# we ignore the palette here
im.mode = "P"
rawmode = "P;" + str(bpp)
version = 1
elif im.mode == "1":
# monochrome -- write it inverted, as is the Palm standard
rawmode = "1;I"
bpp = 1
version = 0
else:
raise OSError(f"cannot write mode {im.mode} as Palm")
#
# make sure image data is available
im.load()
# write header
cols = im.size[0]
rows = im.size[1]
rowbytes = int((cols + (16 // bpp - 1)) / (16 // bpp)) * 2
transparent_index = 0
compression_type = _COMPRESSION_TYPES["none"]
flags = 0
if im.mode == "P" and "custom-colormap" in im.info:
flags = flags & _FLAGS["custom-colormap"]
colormapsize = 4 * 256 + 2
colormapmode = im.palette.mode
colormap = im.getdata().getpalette()
else:
colormapsize = 0
if "offset" in im.info:
offset = (rowbytes * rows + 16 + 3 + colormapsize) // 4
else:
offset = 0
fp.write(o16b(cols) + o16b(rows) + o16b(rowbytes) + o16b(flags))
fp.write(o8(bpp))
fp.write(o8(version))
fp.write(o16b(offset))
fp.write(o8(transparent_index))
fp.write(o8(compression_type))
fp.write(o16b(0)) # reserved by Palm
# now write colormap if necessary
if colormapsize > 0:
fp.write(o16b(256))
for i in range(256):
fp.write(o8(i))
if colormapmode == "RGB":
fp.write(
o8(colormap[3 * i])
+ o8(colormap[3 * i + 1])
+ o8(colormap[3 * i + 2])
)
elif colormapmode == "RGBA":
fp.write(
o8(colormap[4 * i])
+ o8(colormap[4 * i + 1])
+ o8(colormap[4 * i + 2])
)
# now convert data to raw form
ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, rowbytes, 1))])
if hasattr(fp, "flush"):
fp.flush() | null |
167,558 | from . import Image, ImageFile
from ._binary import i16le as word
from ._binary import i32le as dword
from ._binary import si16le as short
from ._binary import si32le as _long
_handler = None
The provided code snippet includes necessary dependencies for implementing the `register_handler` function. Write a Python function `def register_handler(handler)` to solve the following problem:
Install application-specific WMF image handler. :param handler: Handler object.
Here is the function:
def register_handler(handler):
"""
Install application-specific WMF image handler.
:param handler: Handler object.
"""
global _handler
_handler = handler | Install application-specific WMF image handler. :param handler: Handler object. |
167,559 | from . import Image, ImageFile
from ._binary import i16le as word
from ._binary import i32le as dword
from ._binary import si16le as short
from ._binary import si32le as _long
def _accept(prefix):
return (
prefix[:6] == b"\xd7\xcd\xc6\x9a\x00\x00" or prefix[:4] == b"\x01\x00\x00\x00"
) | null |
167,560 | from . import Image, ImageFile
from ._binary import i16le as word
from ._binary import i32le as dword
from ._binary import si16le as short
from ._binary import si32le as _long
_handler = None
if hasattr(Image.core, "drawwmf"):
# install default handler (windows only)
register_handler(WmfHandler())
def _save(im, fp, filename):
if _handler is None or not hasattr(_handler, "save"):
raise OSError("WMF save handler not installed")
_handler.save(im, fp, filename) | null |
167,561 | import struct
from io import BytesIO
from . import Image, ImageFile
MAGIC = b"FTEX"
def _accept(prefix):
return prefix[:4] == MAGIC | null |
167,562 | from . import Image
from ._binary import i32le as i32
from .PcxImagePlugin import PcxImageFile
MAGIC = 0x3ADE68B1
def _accept(prefix):
return len(prefix) >= 4 and i32(prefix) == MAGIC | null |
167,563 | import calendar
import codecs
import collections
import mmap
import os
import re
import time
import zlib
PDFDocEncoding = {
0x16: "\u0017",
0x18: "\u02D8",
0x19: "\u02C7",
0x1A: "\u02C6",
0x1B: "\u02D9",
0x1C: "\u02DD",
0x1D: "\u02DB",
0x1E: "\u02DA",
0x1F: "\u02DC",
0x80: "\u2022",
0x81: "\u2020",
0x82: "\u2021",
0x83: "\u2026",
0x84: "\u2014",
0x85: "\u2013",
0x86: "\u0192",
0x87: "\u2044",
0x88: "\u2039",
0x89: "\u203A",
0x8A: "\u2212",
0x8B: "\u2030",
0x8C: "\u201E",
0x8D: "\u201C",
0x8E: "\u201D",
0x8F: "\u2018",
0x90: "\u2019",
0x91: "\u201A",
0x92: "\u2122",
0x93: "\uFB01",
0x94: "\uFB02",
0x95: "\u0141",
0x96: "\u0152",
0x97: "\u0160",
0x98: "\u0178",
0x99: "\u017D",
0x9A: "\u0131",
0x9B: "\u0142",
0x9C: "\u0153",
0x9D: "\u0161",
0x9E: "\u017E",
0xA0: "\u20AC",
}
def decode_text(b):
if b[: len(codecs.BOM_UTF16_BE)] == codecs.BOM_UTF16_BE:
return b[len(codecs.BOM_UTF16_BE) :].decode("utf_16_be")
else:
return "".join(PDFDocEncoding.get(byte, chr(byte)) for byte in b) | null |
167,564 | import calendar
import codecs
import collections
import mmap
import os
import re
import time
import zlib
class PdfFormatError(RuntimeError):
"""An error that probably indicates a syntactic or semantic error in the
PDF file structure"""
pass
def check_format_condition(condition, error_message):
if not condition:
raise PdfFormatError(error_message) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.