code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
from bisect import bisect_left
import numpy as np
from PySide2 import QtCore, QtQml, QtQuick, QtGui
from PySide2.QtCore import Signal, Property, Slot
from PySide2.QtCharts import QtCharts
from wwb_scanner.file_handlers import BaseImporter
from wwb_scanner.scan_objects.spectrum import Spectrum
from wwb_scanner.ui.pyside.utils import IntervalTimer, is_pathlike
GRAPH_DTYPE = np.dtype([
('x', np.float64),
('y', np.float64),
])
class GraphPoint(QtCore.QPointF):
def __init__(self, *args):
self._index = 0
super().__init__(*args)
def _g_index(self): return self._index
def _s_index(self, value):
self._index = value
index = Property(int, _g_index, _s_index)
class GraphTableModel(QtCore.QAbstractTableModel):
def __init__(self, *args):
self._data = np.zeros((2,0), dtype=np.float64)
super().__init__(*args)
def columnCount(self, parent):
return self._data.shape[1]
def rowCount(self, parent):
return self._data.shape[0]
def flags(self, index):
return QtCore.Qt.ItemFlags.ItemIsEnabled
def data(self, index, role):
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
return float(self._data[index.row(), index.column()])
return QtCore.QVariant()
def _reshape_data(self, d_arr):
new_shape = (2, d_arr.size)
cur_shape = self._data.shape
parent = QtCore.QModelIndex()
if cur_shape[-1] > new_shape[-1]:
start_col = cur_shape[-1] - 1
end_col = new_shape[-1] - 1
ix = self.index(0, 0)
parent = ix
self.beginRemoveColumns(parent, start_col, end_col)
self._data = self._data[...,:end_col+1]
self.endRemoveColumns()
elif cur_shape[-1] < new_shape[-1]:
start_col = cur_shape[-1]
end_col = new_shape[-1] - 1
ix = self.index(0, 0)
self.beginInsertColumns(parent, start_col, end_col)
data = np.zeros(new_shape, dtype=np.float64)
data[:,:start_col] = self._data[:,:]
data[0,start_col:] = d_arr['x'][start_col:]
data[1,start_col:] = d_arr['y'][start_col:]
self._data = data
self.endInsertColumns()
def set_from_graph_dtype(self, d_arr):
if d_arr.size != self._data.shape[-1]:
self._reshape_data(d_arr)
x_changed = True
y_changed = True
else:
x_changed = not np.array_equal(self._data[0], d_arr['x'])
y_changed = not np.array_equal(self._data[1], d_arr['y'])
if not x_changed and not y_changed:
return
data = self._data
change_ix_x = np.flatnonzero(np.not_equal(data[0], d_arr['x']))
change_ix_y = np.flatnonzero(np.not_equal(data[1], d_arr['y']))
change_ix = set(change_ix_x) | set(change_ix_y)
if not len(change_ix):
return
data[0] = d_arr['x']
data[1] = d_arr['y']
tl = self.index(0, min(change_ix))
br = self.index(data.shape[0]-1, max(change_ix))
self.dataChanged.emit(tl, br)
class SpectrumGraphData(QtQuick.QQuickItem):
_n_model = Signal()
_n_name = Signal()
_n_min_value = Signal()
_n_max_value = Signal()
_n_spectrum = Signal()
_n_color = Signal()
_n_graphVisible = Signal()
def __init__(self, *args):
self.xy_data = np.zeros(0, dtype=GRAPH_DTYPE)
self._min_value = QtCore.QPointF(0., 0.)
self._max_value = QtCore.QPointF(0., 0.)
self._model = None
self._spectrum = None
self._name = None
self._color = None
self._graphVisible = True
super().__init__(*args)
def _g_model(self): return self._model
def _s_model(self, value):
self._model = value
self._n_model.emit()
self._on_model_changed()
model = Property(QtCore.QObject, _g_model, _s_model, notify=_n_model)
def _g_name(self):
name = self._name
if name is not None:
is_p, p = is_pathlike(name)
if is_p:
return p.name
return name
def _s_name(self, value):
if value == self._name:
return
self._name = value
self._n_name.emit()
name = Property(str, _g_name, _s_name, notify=_n_name)
def _g_color(self): return self._color
def _s_color(self, value):
if value == self._color:
return
self._color = value
self._n_color.emit()
if self.spectrum is not None:
rgba = value.getRgbF()
if rgba != self.spectrum.color:
self.spectrum.color.from_list(rgba)
color = Property(QtGui.QColor, _g_color, _s_color, notify=_n_color)
def _g_graphVisible(self): return self._graphVisible
def _s_graphVisible(self, value):
if value == self._graphVisible:
return
self._graphVisible = value
self._n_graphVisible.emit()
graphVisible = Property(bool, _g_graphVisible, _s_graphVisible, notify=_n_graphVisible)
def _g_min_value(self): return self._min_value
def _s_min_value(self, value):
self._min_value = value
self._n_min_value.emit()
minValue = Property(QtCore.QPointF, _g_min_value, _s_min_value, notify=_n_min_value)
def _g_max_value(self): return self._max_value
def _s_max_value(self, value):
self._max_value = value
self._n_max_value.emit()
maxValue = Property(QtCore.QPointF, _g_max_value, _s_max_value, notify=_n_max_value)
def _g_spectrum(self): return self._spectrum
def _s_spectrum(self, value):
if value == self._spectrum:
return
self._spectrum = value
self._n_spectrum.emit()
if value is not None:
if self.name is None:
self.name = value.name
else:
value.name = self.name
if value.color != value.DEFAULT_COLOR:
self.color = QtGui.QColor.fromRgbF(*value.color.to_list())
self.update_spectrum_data()
spectrum = Property(object, _g_spectrum, _s_spectrum, notify=_n_spectrum)
def _set_from_graph_dtype(self, d_arr):
if self.model is None:
return
self.model.set_from_graph_dtype(d_arr)
def _on_model_changed(self):
self._set_series_from_data()
@Slot(float, result=QtCore.QPointF)
def get_nearest_by_x(self, value):
xy_data = self.xy_data
if not xy_data.size:
return QtCore.QPointF(-1, -1)
ix = np.searchsorted(xy_data['x'], value)
if ix >= xy_data.size:
ix = xy_data.size - 1
x = xy_data['x'][ix]
y = xy_data['y'][ix]
return QtCore.QPointF(x, y)
@Slot()
def update_spectrum_data(self):
if self.spectrum is None:
return
if not self.spectrum.data_updated.is_set():
return
self._update_data_from_spectrum()
self._set_series_from_data()
def _update_data_from_spectrum(self):
spectrum = self.spectrum
dtype = np.dtype(float)
with spectrum.data_update_lock:
xy_data = np.zeros(spectrum.sample_data.size, dtype=GRAPH_DTYPE)
data = spectrum.sample_data.data
xy_data['x'] = data['frequency']
freqmin = xy_data['x'].min()
freqmax = xy_data['x'].max()
xy_data['y'] = spectrum.sample_data.data['dbFS']
nan_ix = np.flatnonzero(np.isnan(xy_data['y']))
xy_data['y'][nan_ix] = -110
self.xy_data = xy_data
spectrum.data_updated.clear()
self._update_extents()
def _update_extents(self):
self.minValue = QtCore.QPointF(self.xy_data['x'].min(), self.xy_data['y'].min())
self.maxValue = QtCore.QPointF(self.xy_data['x'].max(), self.xy_data['y'].max())
def _set_series_from_data(self):
if self.model is None:
return
self.model.set_from_graph_dtype(self.xy_data)
@Slot(QtCore.QUrl)
def load_from_file(self, uri):
filename = uri.toLocalFile()
spectrum = BaseImporter.import_file(filename)
spectrum.set_data_updated()
self.spectrum = spectrum
self.update_spectrum_data()
@Slot(QtCore.QUrl)
def save_to_file(self, uri):
filename = uri.toLocalFile()
self.spectrum.export_to_file(filename=filename)
class LiveSpectrumGraphData(SpectrumGraphData):
_n_update_interval = Signal()
_n_update_timer = Signal()
_n_scanner = Signal()
updateSpectrumData = Signal()
def __init__(self, *args):
self._update_interval = None
self._update_timer = None
self._scanner = None
super().__init__(*args)
self.updateSpectrumData.connect(self.update_spectrum_data)
def _g_scanner(self): return self._scanner
def _s_scanner(self, value):
if value == self._scanner:
return
if self._scanner is not None:
self._scanner.disconnect(self)
self._scanner = value
self._n_scanner.emit()
if self._scanner is not None:
self.spectrum = self._scanner.spectrum
self._scanner.scannerRunState.connect(self.on_scanner_run_state)
self.update_interval = .1
else:
self.update_interval = -1
scanner = Property(QtCore.QObject, _g_scanner, _s_scanner, notify=_n_scanner)
def on_scanner_run_state(self, state):
if self.scanner is None:
return
if not self.scanner.running:
self.update_interval = -1
if self.spectrum is not None:
with self.spectrum.data_update_lock:
self.spectrum.set_data_updated()
self.update_spectrum_data()
self.scanner = None
def _g_update_interval(self): return self._update_interval
def _s_update_interval(self, value):
if value == self._update_interval:
return
self._update_interval = value
if self.update_timer is not None:
self.update_timer.stop.emit()
if value is not None and value > 0:
ms = int(round(value * 1000))
if self.update_timer is None:
self.update_timer = IntervalTimer(interval_ms=ms)
self.update_timer.trigger.connect(self.on_update_timer_trigger)
else:
self.update_timer.interval_ms = ms
self.update_timer.start.emit()
self._n_update_interval.emit()
update_interval = Property(float, _g_update_interval, _s_update_interval, notify=_n_update_interval)
def _g_update_timer(self): return self._update_timer
def _s_update_timer(self, value):
self._update_timer = value
self._n_update_timer.emit()
update_timer = Property(QtCore.QObject, _g_update_timer, _s_update_timer, notify=_n_update_timer)
def on_update_timer_trigger(self, *args):
self.updateSpectrumData.emit()
def _update_extents(self):
if self.scanner is not None:
min_x = self.scanner.startFreq
if self.xy_data.size and self.xy_data['x'].min() < min_x:
min_x = self.xy_data['x'].min()
max_x = self.scanner.endFreq
if self.xy_data.size and self.xy_data['x'].max() > max_x:
max_x = self.xy_data['x'].max()
else:
min_x = self.xy_data['x'].min()
max_x = self.xy_data['y'].max()
self.minValue = QtCore.QPointF(min_x, self.xy_data['y'].min())
self.maxValue = QtCore.QPointF(max_x, self.xy_data['y'].max())
def register_qml_types():
QtQml.qmlRegisterType(GraphTableModel, 'GraphUtils', 1, 0, 'GraphTableModel')
QtQml.qmlRegisterType(SpectrumGraphData, 'GraphUtils', 1, 0, 'SpectrumGraphData')
QtQml.qmlRegisterType(LiveSpectrumGraphData, 'GraphUtils', 1, 0, 'LiveSpectrumGraphData') | /rtlsdr-wwb-scanner-0.0.1.tar.gz/rtlsdr-wwb-scanner-0.0.1/wwb_scanner/ui/pyside/graph.py | 0.550849 | 0.218607 | graph.py | pypi |
import threading
import numpy as np
from wwb_scanner.core import JSONMixin
from wwb_scanner.utils.dbstore import db_store
from wwb_scanner.scanner.sdrwrapper import SdrWrapper
from wwb_scanner.scanner.config import ScanConfig
from wwb_scanner.scanner.sample_processing import (
SampleCollection,
calc_num_samples,
WINDOW_TYPES,
)
from wwb_scanner.scan_objects import Spectrum
def mhz_to_hz(mhz):
return mhz * 1000000.0
def hz_to_mhz(hz):
return hz / 1000000.0
def get_freq_resolution(nfft, fs):
freqs = np.fft.fftfreq(nfft, 1/fs)
freqs = np.fft.fftshift(freqs)
r = np.unique(np.diff(np.around(freqs)))
if r.size != 1:
print('!!! Not unique: ', r)
return r.mean()
return r[0]
def is_equal_spacing(nfft, fs, step_size):
freqs = np.fft.fftfreq(nfft, 1/fs)
freqs = np.fft.fftshift(freqs)
freqs2 = freqs + step_size
all_freqs = np.unique(np.around(np.append(freqs, freqs2)))
diff = np.unique(np.diff(np.around(all_freqs)))
print(diff)
return diff.size == 1
class StopScanner(Exception):
pass
class ScannerBase(JSONMixin):
WINDOW_TYPES = WINDOW_TYPES
def __init__(self, **kwargs):
self._running = threading.Event()
self._stopped = threading.Event()
self._current_freq = None
self._progress = 0.
ckwargs = kwargs.get('config')
if not ckwargs:
ckwargs = db_store.get_scan_config()
if not ckwargs:
ckwargs = {}
self.config = ScanConfig(ckwargs)
self.device_config = self.config.device
self.sampling_config = self.config.sampling
if 'spectrum' in kwargs:
self.spectrum = Spectrum.from_json(kwargs['spectrum'])
else:
self.spectrum = Spectrum()
self.spectrum.scan_config = self.config
if not kwargs.get('__from_json__'):
self.sample_collection = SampleCollection(scanner=self)
@property
def current_freq(self):
return self._current_freq
@current_freq.setter
def current_freq(self, value):
self._current_freq = value
if value is not None:
f_min, f_max = self.config.scan_range
self.progress = (value - f_min) / (f_max - f_min)
self.on_current_freq(value)
def on_current_freq(self, value):
pass
@property
def progress(self):
return self._progress
@progress.setter
def progress(self, value):
if value == self._progress:
return
self._progress = value
self.on_progress(value)
def on_progress(self, value):
pass
def build_sample_sets(self):
freq, end_freq = self.config.scan_range
sample_collection = self.sample_collection
while freq <= end_freq:
sample_set = sample_collection.build_sample_set(mhz_to_hz(freq))
freq += self.step_size
def run_scan(self):
self.build_sample_sets()
running = self._running
running.set()
self.sample_collection.scan_all_freqs()
self.sample_collection.stopped.wait()
if running.is_set():
self.save_to_dbstore()
running.clear()
self._stopped.set()
def stop_scan(self):
self._running.clear()
self.sample_collection.cancel()
self._stopped.wait()
def save_to_dbstore(self):
self.spectrum.save_to_dbstore()
def _serialize(self):
d = dict(
config=self.config._serialize(),
spectrum=self.spectrum._serialize(),
sample_collection=self.sample_collection._serialize(),
)
return d
def _deserialize(self, **kwargs):
data = kwargs.get('sample_collection')
self.sample_collection = SampleCollection.from_json(data, scanner=self)
class Scanner(ScannerBase):
'''
params:
scan_range: (list) frequency range to scan (in MHz)
step_size: increment (in MHz) to return scan values
'''
def __init__(self, **kwargs):
super(Scanner, self).__init__(**kwargs)
self.sdr_wrapper = SdrWrapper(scanner=self)
self.gain = self.gain
@property
def sdr(self):
return self.sdr_wrapper.sdr
@property
def sample_rate(self):
return self.sampling_config.get('sample_rate')
@sample_rate.setter
def sample_rate(self, value):
self.sampling_config.sample_rate = value
@property
def freq_correction(self):
return self.device_config.get('freq_correction')
@freq_correction.setter
def freq_correction(self, value):
self.device_config.freq_correction = value
@property
def sweeps_per_scan(self):
return self.sampling_config.sweeps_per_scan
@sweeps_per_scan.setter
def sweeps_per_scan(self, value):
self.sampling_config.sweeps_per_scan = value
@property
def samples_per_sweep(self):
return self.sampling_config.samples_per_sweep
@samples_per_sweep.setter
def samples_per_sweep(self, value):
self.sampling_config.samples_per_sweep = value
@property
def step_size(self):
step_size = getattr(self, '_step_size', None)
if step_size is not None:
return step_size
c = self.sampling_config
overlap = c.sweep_overlap_ratio
self.sdr.sample_rate = c.sample_rate
rs = int(round(self.sdr.sample_rate))
self.sample_rate = rs
nfft = self.window_size
resolution = get_freq_resolution(nfft, rs)
step_size = self._step_size = rs / 2. * overlap
self._equal_spacing = is_equal_spacing(nfft, rs, step_size)
if not self.equal_spacing:
step_size -= step_size % resolution
step_size = round(step_size)
if step_size <= 0:
step_size += resolution
self._equal_spacing = is_equal_spacing(nfft, rs, step_size)
step_size = hz_to_mhz(step_size)
self._step_size = step_size
print(f'step_size: {step_size!r}, equal_spacing: {self._equal_spacing}')
return step_size
@property
def equal_spacing(self):
r = getattr(self, '_equal_spacing', None)
if r is not None:
return r
_ = self.step_size
return self._equal_spacing
@property
def window_size(self):
c = self.config
return c.sampling.get('window_size')
@window_size.setter
def window_size(self, value):
if value == self.sampling_config.get('window_size'):
return
self.sampling_config.window_size = value
@property
def gain(self):
return self.device_config.get('gain')
@gain.setter
def gain(self, value):
if value is not None and hasattr(self, 'sdr_wrapper'):
value = self.get_nearest_gain(value)
self.device_config.gain = value
@property
def gains(self):
gains = getattr(self, '_gains', None)
if gains is None:
gains = self._gains = self.get_gains()
return gains
def get_gains(self):
self.sdr_wrapper.enable_scanner_updates = False
with self.sdr_wrapper:
sdr = self.sdr
if sdr is None:
gains = None
else:
gains = self.sdr.get_gains()
self.sdr_wrapper.enable_scanner_updates = True
if gains is not None:
gains = [gain / 10. for gain in gains]
return gains
def get_nearest_gain(self, gain):
gains = self.gains
if gains is None:
return gain
npgains = np.array(gains)
return gains[np.abs(npgains - gain).argmin()]
def run_scan(self):
with self.sdr_wrapper:
super(Scanner, self).run_scan()
def on_sample_set_processed(self, sample_set):
powers = sample_set.powers
freqs = sample_set.frequencies
spectrum = self.spectrum
center_freq = sample_set.center_frequency
if self.equal_spacing:
force_lower_freq = True
else:
force_lower_freq = False
spectrum.add_sample_set(
frequency=freqs,
magnitude=powers,
center_frequency=center_freq,
force_lower_freq=force_lower_freq,
)
self.progress = self.sample_collection.calc_progress()
class ThreadedScanner(threading.Thread, Scanner):
def __init__(self, **kwargs):
threading.Thread.__init__(self)
Scanner.__init__(self, **kwargs)
self.plot = kwargs.get('plot')
self.run_once = kwargs.get('run_once', True)
self.scan_wait_timeout = kwargs.get('scan_wait_timeout', 5.)
self.scanning = threading.Event()
self.waiting = threading.Event()
self.stopping = threading.Event()
self.stopped = threading.Event()
self.need_update = threading.Event()
self.need_update_lock = threading.Lock()
def on_current_freq(self, value):
if self.plot is not None:
self.plot.update_plot()
with self.need_update_lock:
self.need_update.set()
def run(self):
scanning = self.scanning
waiting = self.waiting
stopping = self.stopping
stopped = self.stopped
scan_wait_timeout = self.scan_wait_timeout
run_once = self.run_once
run_scan = self.run_scan
while True:
if stopping.is_set():
break
scanning.set()
run_scan()
scanning.clear()
if run_once:
break
waiting.wait(scan_wait_timeout)
stopped.set()
def stop(self):
self.stopping.set()
self.waiting.set()
self.stopped.wait()
def scan_and_plot(**kwargs):
scanner = Scanner(**kwargs)
scanner.run_scan()
scanner.spectrum.show_plot()
return scanner
def scan_and_save(filename=None, frequency_format=None, **kwargs):
scanner = Scanner(**kwargs)
scanner.run_scan()
scanner.spectrum.export_to_file(filename=filename, frequency_format=frequency_format)
return scanner | /rtlsdr-wwb-scanner-0.0.1.tar.gz/rtlsdr-wwb-scanner-0.0.1/wwb_scanner/scanner/main.py | 0.552057 | 0.201479 | main.py | pypi |
import time
import threading
import numpy as np
from scipy.signal.windows import __all__ as WINDOW_TYPES
from scipy.signal import welch, get_window, hilbert
from wwb_scanner.core import JSONMixin
WINDOW_TYPES = [s for s in WINDOW_TYPES if s != 'get_window']
NPERSEG = 128
def next_2_to_pow(val):
val -= 1
val |= val >> 1
val |= val >> 2
val |= val >> 4
val |= val >> 8
val |= val >> 16
return val + 1
def calc_num_samples(num_samples):
return next_2_to_pow(int(num_samples))
def sort_psd(f, Pxx, onesided=False):
return np.fft.fftshift(f), np.fft.fftshift(Pxx)
class SampleSet(JSONMixin):
__slots__ = ('scanner', 'center_frequency', 'raw', 'current_sweep', 'complete',
'_frequencies', 'powers', 'collection', 'process_thread', 'samples_discarded')
_serialize_attrs = ('center_frequency', '_frequencies', 'powers')
def __init__(self, **kwargs):
for key in self.__slots__:
if key == '_frequencies':
key = 'frequencies'
setattr(self, key, kwargs.get(key))
self.complete = threading.Event()
self.samples_discarded = False
if self.scanner is None and self.collection is not None:
self.scanner = self.collection.scanner
@property
def frequencies(self):
f = getattr(self, '_frequencies', None)
if f is None:
f = self._frequencies= self.calc_expected_freqs()
return f
@frequencies.setter
def frequencies(self, value):
self._frequencies = value
@property
def sweeps_per_scan(self):
return self.scanner.sweeps_per_scan
@property
def samples_per_sweep(self):
return self.scanner.samples_per_sweep
@property
def window_size(self):
return getattr(self.scanner, 'window_size', NPERSEG)
def read_samples(self):
scanner = self.scanner
freq = self.center_frequency
sweeps_per_scan = scanner.sweeps_per_scan
samples_per_sweep = scanner.samples_per_sweep
sdr = scanner.sdr
sdr.set_center_freq(freq)
self.raw = np.zeros((sweeps_per_scan, samples_per_sweep), 'complex')
self.powers = np.zeros((sweeps_per_scan, samples_per_sweep), 'float64')
sdr.read_samples_async(self.samples_callback, num_samples=samples_per_sweep)
def samples_callback(self, iq, context):
samples_per_sweep = self.scanner.samples_per_sweep
if not self.samples_discarded:
self.samples_discarded = True
return
current_sweep = getattr(self, 'current_sweep', None)
if current_sweep is None:
current_sweep = self.current_sweep = 0
if current_sweep >= self.raw.shape[0]:
self.on_sample_read_complete()
return
self.raw[current_sweep] = iq
self.current_sweep += 1
if current_sweep > self.raw.shape[0]:
self.on_sample_read_complete()
def on_sample_read_complete(self):
sdr = self.scanner.sdr
if not sdr.read_async_canceling:
sdr.cancel_read_async()
self.process_samples()
def translate_freq(self, samples, freq, rs):
# Adapted from https://github.com/vsergeev/luaradio/blob/master/radio/blocks/signal/frequencytranslator.lua
if not np.iscomplexobj(samples):
samples = hilbert(samples)
omega = 2 * np.pi * (freq / rs)
def iter_phase():
p = 0
i = 0
while i < samples.shape[-1]:
yield p
p += omega
p -= 2 * np.pi
i += 1
phase_rot = np.fromiter(iter_phase(), dtype=np.float)
phase_rot = np.unwrap(phase_rot)
xlator = np.zeros(phase_rot.size, dtype=samples.dtype)
xlator.real = np.cos(phase_rot)
xlator.imag = np.sin(phase_rot)
samples *= xlator
return samples
def process_samples(self):
rs = self.scanner.sample_rate
fc = self.center_frequency
samples = self.raw.flatten()
win_size = self.window_size
win = get_window(self.scanner.sampling_config.window_type, win_size)
freqs, Pxx = welch(samples, fs=rs, window=win, detrend=False,
nperseg=win_size, scaling='density', return_onesided=False)
iPxx = np.fft.irfft(Pxx)
iPxx = self.translate_freq(iPxx, fc, rs)
Pxx = np.abs(np.fft.rfft(iPxx.real))
freqs, Pxx = sort_psd(freqs, Pxx)
freqs = np.around(freqs)
freqs += fc
freqs /= 1e6
self.powers = Pxx
if not np.array_equal(freqs, self.frequencies):
print('freq not equal: %s, %s' % (self.frequencies.size, freqs.size))
self.frequencies = freqs
self.raw = None
self.collection.on_sample_set_processed(self)
self.complete.set()
def calc_expected_freqs(self):
freq = self.center_frequency
scanner = self.scanner
rs = scanner.sample_rate
win_size = self.window_size
num_samples = scanner.samples_per_sweep * scanner.sweeps_per_scan
overlap_ratio = scanner.sampling_config.sweep_overlap_ratio
fake_samples = np.zeros(num_samples, 'complex')
f_expected, Pxx = welch(fake_samples.real, fs=rs, nperseg=win_size, return_onesided=False)
f_expected, Pxx = sort_psd(f_expected, Pxx)
f_expected = np.around(f_expected)
f_expected += freq
f_expected /= 1e6
return f_expected
def _serialize(self):
return {k:getattr(self, k) for k in self._serialize_attrs}
class SampleCollection(JSONMixin):
def __init__(self, **kwargs):
self.scanner = kwargs.get('scanner')
self.scanning = threading.Event()
self.stopped = threading.Event()
self.sample_sets = {}
def calc_progress(self):
num_sets = len(self.sample_sets)
if not num_sets:
return 0
num_complete = 0.
for sample_set in self.sample_sets.values():
if sample_set.complete.is_set():
num_complete += 1
return num_complete / num_sets
def add_sample_set(self, sample_set):
self.sample_sets[sample_set.center_frequency] = sample_set
def build_sample_set(self, freq):
sample_set = SampleSet(collection=self, center_frequency=freq)
self.add_sample_set(sample_set)
return sample_set
def scan_all_freqs(self):
self.scanning.set()
complete_events = set()
for key in sorted(self.sample_sets.keys()):
if not self.scanning.is_set():
break
sample_set = self.sample_sets[key]
sample_set.read_samples()
if not sample_set.complete.is_set():
complete_events.add(sample_set.complete)
if self.scanning.is_set():
for e in complete_events.copy():
if e.is_set():
complete_events.discard(e)
else:
e.wait()
self.scanning.clear()
self.stopped.set()
def stop(self):
if self.scanning.is_set():
self.scanning.clear()
self.stopped.wait()
def cancel(self):
if self.scanning.is_set():
self.scanning.clear()
self.stopped.wait()
def on_sample_set_processed(self, sample_set):
self.scanner.on_sample_set_processed(sample_set)
def _serialize(self):
return {'sample_sets':
{k: v._serialize() for k, v in self.sample_sets.items()},
}
def _deserialize(self, **kwargs):
for key, val in kwargs.get('sample_sets', {}).items():
sample_set = SampleSet.from_json(val, collection=self)
self.sample_sets[key] = sample_set | /rtlsdr-wwb-scanner-0.0.1.tar.gz/rtlsdr-wwb-scanner-0.0.1/wwb_scanner/scanner/sample_processing.py | 0.672439 | 0.216125 | sample_processing.py | pypi |
import numpy as np
from scipy.interpolate import CubicSpline
import jsonfactory
from wwb_scanner.core import JSONMixin
from wwb_scanner.utils import dbmath
class SampleArray(JSONMixin):
dtype = np.dtype([
('frequency', np.float64),
('iq', np.complex128),
('magnitude', np.float64),
('dbFS', np.float64)
])
def __init__(self, data=None, keep_sorted=True):
self.keep_sorted = keep_sorted
if data is None:
data = np.empty([0], dtype=self.dtype)
self.data = data
if keep_sorted:
self.data = np.sort(self.data, order='frequency')
@classmethod
def create(cls, keep_sorted=True, **kwargs):
data = kwargs.get('data')
obj = cls(data, keep_sorted=keep_sorted)
if not obj.data.size:
obj.set_fields(**kwargs)
return obj
def set_fields(self, **kwargs):
f = kwargs.get('frequency')
if f is None:
raise Exception('frequency array must be provided')
if not isinstance(f, np.ndarray):
f = np.array([f])
data = np.zeros(f.size, dtype=self.dtype)
data['frequency'] = f
for key, val in kwargs.items():
if key not in self.dtype.fields:
continue
if key == 'frequency':
continue
if not isinstance(val, np.ndarray):
val = np.array([val])
data[key] = val
if data is None:
return
iq = kwargs.get('iq')
mag = kwargs.get('magnitude')
dbFS = kwargs.get('dbFS')
if iq is not None and mag is None:
mag = data['magnitude'] = np.abs(iq)
if dbFS is not None and mag is None:
mag = data['magnitude'] = dbmath.from_dB(dbFS)
if mag is not None and dbFS is None:
data['dbFS'] = dbmath.to_dB(mag)
self.append(data)
def __getattr__(self, attr):
if attr in self.dtype.fields.keys():
return self.data[attr]
raise AttributeError
def __setattr__(self, attr, val):
if attr in self.dtype.fields.keys():
self.data[attr] = val
super(SampleArray, self).__setattr__(attr, val)
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
@property
def size(self):
return self.data.size
@property
def shape(self):
return self.data.shape
def _check_obj_type(self, other):
if isinstance(other, SampleArray):
data = other.data
else:
if isinstance(other, np.ndarray) and other.dtype == self.dtype:
data = other
else:
raise Exception('Cannot extend this object type: {}'.format(other))
return data
def append(self, other):
if self.keep_sorted:
self.insert_sorted(other)
else:
data = self._check_obj_type(other)
self.data = np.append(self.data, data)
def insert_sorted(self, other):
data = self._check_obj_type(other)
in_ix_self = np.flatnonzero(np.in1d(self.frequency, data['frequency']))
in_ix_data = np.flatnonzero(np.in1d(data['frequency'], self.frequency))
if in_ix_self.size:
self.iq[in_ix_self] = np.mean([
self.iq[in_ix_self], data['iq'][in_ix_data]
], axis=0)
self.magnitude[in_ix_self] = np.mean([
self.magnitude[in_ix_self], data['magnitude'][in_ix_data]
], axis=0)
self.dbFS[in_ix_self] = np.mean([
self.dbFS[in_ix_self], data['dbFS'][in_ix_data]
], axis=0)
nin_ix = np.flatnonzero(np.in1d(data['frequency'], self.frequency, invert=True))
if nin_ix.size:
d = np.append(self.data, data[nin_ix])
d = np.sort(d, order='frequency')
self.data = d
def smooth(self, window_size):
x = self.magnitude
w = np.hanning(window_size)
s = np.r_[x[window_size-1:0:-1], x, x[-2:-window_size-1:-1]]
y = np.convolve(w/w.sum(), s, mode='valid')
m = y[(window_size//2-1):-(window_size//2)]
if m.size != x.size:
raise Exception('Smooth result size {} != data size {}'.format(m.size, x.size))
self.data['magnitude'] = m
self.data['dbFS'] = dbmath.to_dB(m)
def interpolate(self, spacing=0.025):
fmin = np.ceil(self.frequency.min())
fmax = np.floor(self.frequency.max())
x = self.frequency
y = self.magnitude
cs = CubicSpline(x, y)
xs = np.arange(fmin, fmax+spacing, spacing)
n_dec = len(str(spacing).split('.')[1])
xs = np.around(xs, n_dec)
ys = cs(xs)
data = np.zeros(xs.size, dtype=self.dtype)
data['frequency'] = xs
data['magnitude'] = ys
data['dbFS'] = dbmath.to_dB(ys)
self.data = data
def _serialize(self):
return {'data':self.data, 'keep_sorted':self.keep_sorted}
def __repr__(self):
return '<{self.__class__.__name__}: {self}>'.format(self=self)
def __str__(self):
return str(self.data)
@jsonfactory.register
class JSONEncoder(object):
def encode(self, o):
if isinstance(o, SampleArray):
d = o._serialize()
d['__class__'] = o.__class__.__name__
return d
return None
def decode(self, d):
if d.get('__class__') == 'SampleArray':
return SampleArray(d['data'], d['keep_sorted'])
return d | /rtlsdr-wwb-scanner-0.0.1.tar.gz/rtlsdr-wwb-scanner-0.0.1/wwb_scanner/scan_objects/samplearray.py | 0.642096 | 0.235163 | samplearray.py | pypi |
import time
import numbers
import numpy as np
from wwb_scanner.core import JSONMixin
from wwb_scanner.utils import dbmath
class Sample(JSONMixin):
def __init__(self, **kwargs):
self.init_complete = kwargs.get('init_complete', False)
self.spectrum = kwargs.get('spectrum')
self.frequency = kwargs.get('frequency')
self.iq = iq = kwargs.get('iq')
self.magnitude = m = kwargs.get('magnitude')
self.power = kwargs.get('power')
self.dbFS = kwargs.get('dbFS')
self.init_complete = True
@property
def spectrum_index(self):
f = self.spectrum.sample_data['frequency']
if self.frequency not in f:
return None
return np.argwhere(f == self.frequency)[0][0]
@property
def frequency(self):
return getattr(self, '_frequency', None)
@frequency.setter
def frequency(self, value):
if not isinstance(value, numbers.Number):
return
if self.frequency == value:
return
if not isinstance(value, float):
value = float(value)
self._frequency = value
@property
def iq(self):
ix = self.spectrum_index
if ix is None:
return None
return self.spectrum.sample_data['iq'][ix]
@iq.setter
def iq(self, value):
if value is None:
return
if self.init_complete:
old = self.iq
if old == value:
return
if isinstance(value, (list, tuple)):
i, q = value
value = np.complex128(float(i) + 1j*float(q))
ix = self.spectrum_index
self.spectrum.sample_data['iq'][ix] = value
if not self.init_complete:
return
self.spectrum.on_sample_change(sample=self, iq=value, old=old)
@property
def magnitude(self):
ix = self.spectrum_index
if ix is None:
return None
return self.spectrum.sample_data['magnitude'][ix]
@magnitude.setter
def magnitude(self, value):
if value is None:
return
if not isinstance(value, numbers.Number):
return
if self.init_complete:
old = self.magnitude
if old == value:
return
if not isinstance(value, float):
value = float(value)
ix = self.spectrum_index
self.spectrum.sample_data['magnitude'][ix] = value
if not self.init_complete:
return
self.spectrum.on_sample_change(sample=self, magnitude=value, old=old)
@property
def dbFS(self):
ix = self.spectrum_index
if ix is None:
return None
return self.spectrum.sample_data['dbFS'][ix]
@dbFS.setter
def dbFS(self, value):
if value is None:
return
if self.init_complete:
old = self.dbFS
if old == value:
return
if not isinstance(value, numbers.Number):
return
m = dbmath.from_dB(value)
ix = self.spectrum_index
self.spectrum.sample_data['dbFS'][ix] = value
self.spectrum.sample_data['magnitude'][ix] = m
if not self.init_complete:
return
self.spectrum.on_sample_change(sample=self, dbFS=value, old=old)
@property
def formatted_frequency(self):
return '%07.4f' % (self.frequency)
@property
def formatted_magnitude(self):
return '%03.1f' % (self.magnitude)
@property
def formatted_dbFS(self):
return '%03.1f' % (self.dbFS)
def _serialize(self):
d = {'frequency':self.frequency}
if self.iq is not None:
d['iq'] = (str(self.iq.real), str(self.iq.imag))
elif self.magnitude is not None:
d['magnitude'] = self.magnitude
else:
d['dbFS'] = self.dbFS
return d
def __repr__(self):
return str(self)
def __str__(self):
return '%s (%s dB)' % (self.formatted_frequency, self.dbFS)
class TimeBasedSample(Sample):
def __init__(self, **kwargs):
ts = kwargs.get('timestamp')
if ts is None:
ts = time.time()
self.timestamp = ts
super(TimeBasedSample, self).__init__(**kwargs) | /rtlsdr-wwb-scanner-0.0.1.tar.gz/rtlsdr-wwb-scanner-0.0.1/wwb_scanner/scan_objects/sample.py | 0.715424 | 0.152663 | sample.py | pypi |
class Color(dict):
_color_keys = ['r', 'g', 'b', 'a']
def __init__(self, initdict=None, **kwargs):
if initdict is None:
initdict = {}
initdict.setdefault('r', 0.)
initdict.setdefault('g', 1.)
initdict.setdefault('b', 0.)
initdict.setdefault('a', 1.)
super(Color, self).__init__(initdict, **kwargs)
def copy(self):
return Color({key:self[key] for key in self._color_keys})
def from_list(self, l):
for i, val in enumerate(l):
key = self._color_keys[i]
self[key] = val
def to_list(self):
return [self[key] for key in self._color_keys]
def to_hex(self, include_alpha=False):
keys = self._color_keys
if not include_alpha:
keys = keys[:3]
vals = [int(self[key] * 255) for key in keys]
hexstr = ['#']
for v in vals:
s = hex(v).split('0x')[1]
if len(s) == 1:
s = '0%s' % (s)
hexstr.append(s)
return ''.join(hexstr)
@classmethod
def from_hex(cls, hexstr):
hexstr = hexstr.split('#')[1]
d = {}
i = 0
while len(hexstr):
s = '0x%s' % (hexstr[:2])
key = cls._color_keys[i]
d[key] = float.fromhex(s) / 255.
if len(hexstr) > 2:
hexstr = hexstr[2:]
else:
break
i += 1
return cls(d)
def __eq__(self, other):
if isinstance(other, Color):
other = other.to_list()
elif isinstance(other, dict):
other = Color(other).to_list()
elif isinstance(other, (list, tuple)):
other = list(other)
else:
return NotImplemented
self_list = self.to_list()
if len(other) < 3:
return False
elif len(other) == 3:
if self['a'] != 1:
return False
return self_list[:3] == other
return self_list == other
def __ne__(self, other):
eq = self.__eq__(other)
if eq is NotImplemented:
return eq
return not eq
def __repr__(self):
return '<{self.__class__}: {self}>'.format(self=self)
def __str__(self):
return str(self.to_list()) | /rtlsdr-wwb-scanner-0.0.1.tar.gz/rtlsdr-wwb-scanner-0.0.1/wwb_scanner/utils/color.py | 0.616359 | 0.252257 | color.py | pypi |
import sys
import traceback
from DSCode.ds_code_registry import *
from DSCode.config_data_registry import config
def get_tool_object(region, test_id):
"""
About function
--------------
This function returns the appropriate FAST tool flow object for a market
In markets config there must be a key "Constructors" and this key will have
a dictionary. In that dictionary there will be one key of "Tool".
Example
-------
US:{"Constructors": {
"Sales": "FAST_US_Sales",
"Stores": "FAST_US_Stores",
"Tool": "Fast_US_Tool"},
....
}
Parameters
----------
region: region or market name by which DS developer has registered code to library,
test_id: test_id of the current test
Return values
-------
tool flow object
"""
config_copy = config[region].copy() if region in config else config.copy()
if 'Tool' in config_copy['Constructors']:
tool_object = getattr(sys.modules[__name__],
config_copy["Constructors"]['Tool'])(config=config_copy,
region=region,
test_id=test_id)
return tool_object
raise Exception("An Error has occured while creating stores object: {}"\
.format(traceback.format_exc()))
def get_tool_msrmt_object(region, test_id):
"""
About function
--------------
This function returns the appropriate FAST tool flow object for a market
In markets config there must be a key "Constructors" and this key will have
a dictionary. In that dictionary there will be one key of "Tool".
Example
-------
US:{"Constructors": {
"Sales": "FAST_US_Sales",
"Stores": "FAST_US_Stores",
"Tool": "Fast_US_Tool"},
....
}
Parameters
----------
region: region or market name by which DS developer has registered code to library,
test_id: test_id of the current test
Return values
-------
tool flow object
"""
config_copy = config[region].copy() if region in config else config.copy()
if 'Tool' in config_copy['Constructors']:
tool_object = getattr(sys.modules[__name__],
config_copy["Constructors"]['ToolMeasurement'])( fast_tool_plan = get_tool_object(region, test_id),
config=config_copy,
region=region,
test_id=test_id)
return tool_object
raise Exception("An Error has occured while creating stores object: {}"\
.format(traceback.format_exc())) | /rtm_fast_unification-0.0.1-py3-none-any.whl/rtm_fast_unification/DSCode/tool_objects/tool_selection.py | 0.57344 | 0.199639 | tool_selection.py | pypi |
from typing import Tuple
import pandas as pd
from DSCode.library.ds_code_test_plan import FastTool
from DSCode.library.ds_code_test_measurement import FastToolMeasurement
class FastToolUS(FastTool):
"""
A class to represent features of FastToolUS.
Attributes
----------
config : configuration present in config_data either for a region or overall
region: key present in config
sales_object : Object of sales class
store_object : Object of store class
Methods
-------
data_extract(): to calculate required sales/volume and get the store details in population
calculate_rsv(): calculate the RSV/POS/Sales/Volume value required
and estimate number of stores in population
get_breakeven_lift(): estimates the breakeven lift% value
get_cost(): estimates the cost of implementing RTM activity if breakeven lift is known
"""
def __init__(self, config, region, test_id):
super().__init__(config=config, region=region, test_id=test_id)
def calculate_rsv_estimate(self, target_variable, timeframestart, timeframeend,\
storelist, applicability_criteria,uploaded_file_df=None):
if "channel" not in applicability_criteria:
return -1, -1, "Pass channel in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
rsvestimate, store_count, message, flag = super().calculate_rsv_estimate(
target_variable = target_variable,
timeframestart = timeframestart,
timeframeend = timeframeend,
storelist = storelist,
applicability_criteria = applicability_criteria,
uploaded_file_df=uploaded_file_df)
return rsvestimate, store_count, message, flag
def set_sales_weeks(self, applicability_criteria):
"""This function in US market, returns the value of sales week parameter
to be used based on channel value passed"""
return self._config['metadata']['test_configuration']\
['sales_weeks'][applicability_criteria['channel']]
def set_lift_sales_weeks(self, applicability_criteria):
"""This function in US market, returns the value of sales week for lift calculation,
parameter to be used based on channel value passed"""
return self._config['metadata']['test_configuration']\
['sales_lifts_sales_weeks'][applicability_criteria['channel']]
def set_summary_sales_weeks(self, applicability_criteria):
"""This function in US market, returns the value of sales week for summary calculation,
parameter to be used based on channel value passed"""
return self._config['metadata']\
['test_planning']['summary_sales_weeks']\
[applicability_criteria['channel']]
def set_test_vs_pop_comp(self, applicability_criteria):
"""This function in US market, returns the list of store attributes to be used for
comparing test and population stores
parameter to be used based on channel and team value passed"""
return self._config['metadata']\
["test_planning"]["test_vs_population_compare"]\
[applicability_criteria['team']]\
[applicability_criteria['channel']].copy()
def set_test_vs_pop_comp_sum(self, applicability_criteria):
"""This function in US market, returns the list of store attributes to be used for
comparing test and population stores for summary page,
parameter to be used based on channel and team value passed"""
return self._config['metadata']['test_planning']\
['test_vs_population_compare_summary']\
[applicability_criteria['channel']].copy()
def set_test_vs_cntrl_comp(self, applicability_criteria):
"""This function in US market, returns the list of store attributes to be used for
comparing test and control stores
parameter to be used based on channel and team value passed"""
return self._config['metadata']\
["test_planning"]["test_vs_control_compare"]\
[applicability_criteria['team']]\
[applicability_criteria['channel']].copy()
def set_test_vs_cntrl_comp_sum(self, applicability_criteria):
"""This function in US market, returns the list of store attributes to be used for
comparing test and control stores for summary page
parameter to be used based on channel and team value passed"""
return self._config['metadata']\
["test_planning"]["test_vs_control_compare_summary"]\
[applicability_criteria['channel']].copy()
def get_test_parameter(self, confidence_level, margin_of_error, num_of_teststores,\
target_variable, test_type, applicability_criteria,\
uploaded_file_df=None) -> Tuple[float, str, bool]:
if "channel" not in applicability_criteria:
return 0, "Pass channel", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
applicability_criteria['sales_weeks'] = self\
.set_sales_weeks(applicability_criteria)
applicability_criteria['sales_lifts_sales_weeks'] = self\
.set_lift_sales_weeks(applicability_criteria)
test_parameter, message, success_flag = super()\
.get_test_parameter( confidence_level=confidence_level,
margin_of_error=margin_of_error,
num_of_teststores=num_of_teststores,
target_variable=target_variable,
test_type=test_type,
applicability_criteria=applicability_criteria,
uploaded_file_df=uploaded_file_df,
)
return test_parameter, message, success_flag
def power_marginoferror_calculation(self, num_of_teststores, target_variable, \
test_type, applicability_criteria, \
uploaded_file_df=None) -> Tuple[float, float, float, str, bool]:
if "channel" not in applicability_criteria:
return 0, 0, 0, "Pass channel", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
applicability_criteria['sales_weeks'] = self\
.set_sales_weeks(applicability_criteria)
applicability_criteria['sales_lifts_sales_weeks'] = self\
.set_lift_sales_weeks(applicability_criteria)
num_of_teststores, power_moferr_df, margin_of_error, message, success_flag = super()\
.power_marginoferror_calculation(num_of_teststores = num_of_teststores,
target_variable=target_variable,
test_type=test_type,
applicability_criteria=applicability_criteria,
uploaded_file_df=uploaded_file_df
)
return num_of_teststores, power_moferr_df, margin_of_error, message, success_flag
def teststores_sample_size(self, margin_of_error, target_variable, test_type,\
applicability_criteria, uploaded_file_df=None) -> Tuple[float, float, str, bool]:
if "channel" not in applicability_criteria:
return 0, 0, 0, "Pass channel", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
applicability_criteria['sales_weeks'] = self.set_sales_weeks(applicability_criteria)
applicability_criteria['sales_lifts_sales_weeks'] = self\
.set_lift_sales_weeks(applicability_criteria)
number_test_stores_req, power_stores_df, message, success_flag = super()\
.teststores_sample_size(
margin_of_error=margin_of_error,
target_variable=target_variable,
test_type=test_type,
applicability_criteria=applicability_criteria,
uploaded_file_df=uploaded_file_df)
return number_test_stores_req, power_stores_df, message, success_flag
def identify_test_stores(self, num_of_teststores, target_variable, test_type,\
applicability_criteria, stratification_variables, uploaded_file_df=None) \
-> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, \
pd.DataFrame, pd.DataFrame, list, str, bool]:
if "channel" not in applicability_criteria:
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(),\
[],"Pass channel in applicability criteria", False
if "team" not in applicability_criteria:
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), \
[], "Pass team in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
applicability_criteria['sales_weeks'] = self\
.set_sales_weeks(applicability_criteria)
applicability_criteria['sales_lifts_sales_weeks'] = self\
.set_lift_sales_weeks(applicability_criteria)
applicability_criteria['test_vs_population_compare'] = self\
.set_test_vs_pop_comp(applicability_criteria)
teststores, stores_master_df, annualrsvlifts, valid_sales_stores,\
weekly_total_sales, consideryearweeks, message, success_flag = super()\
.identify_test_stores(num_of_teststores = num_of_teststores,
target_variable=target_variable,
test_type=test_type,
applicability_criteria=applicability_criteria,
stratification_variables = stratification_variables,
uploaded_file_df=uploaded_file_df)
return teststores, stores_master_df, annualrsvlifts, valid_sales_stores,\
weekly_total_sales, consideryearweeks, message, success_flag
def test_population_mapping(self, teststores, target_variable, test_type,\
applicability_criteria, uploaded_file_df=None) -> Tuple[pd.DataFrame, str, bool]:
if "channel" not in applicability_criteria:
return pd.DataFrame(), "Pass channel in applicability criteria", False
if "team" not in applicability_criteria:
return pd.DataFrame(), "Pass team in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
applicability_criteria['sales_weeks'] = self\
.set_sales_weeks(applicability_criteria)
applicability_criteria['sales_lifts_sales_weeks'] = self\
.set_lift_sales_weeks(applicability_criteria)
applicability_criteria['summary_sales_weeks'] = self\
.set_summary_sales_weeks(applicability_criteria)
applicability_criteria['test_vs_population_compare'] = self\
.set_test_vs_pop_comp(applicability_criteria)
return super().test_population_mapping( teststores=teststores,
target_variable=target_variable,
test_type=test_type,
applicability_criteria=applicability_criteria,
uploaded_file_df=uploaded_file_df)
def test_store_summary(self, teststores, target_variable, test_type, \
applicability_criteria, uploaded_file_df=None) -> Tuple[dict, dict, dict, str, bool]:
if "channel" not in applicability_criteria:
return {},{},{}, "Pass channel in applicability criteria", False
if "team" not in applicability_criteria:
return {},{},{}, "Pass team in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
applicability_criteria['sales_weeks'] = self\
.set_sales_weeks(applicability_criteria)
applicability_criteria['sales_lifts_sales_weeks'] = self\
.set_lift_sales_weeks(applicability_criteria)
applicability_criteria['summary_sales_weeks'] = self\
.set_summary_sales_weeks(applicability_criteria)
applicability_criteria['test_vs_population_compare_summary'] = self\
.set_test_vs_pop_comp_sum(applicability_criteria)
return super().test_store_summary(teststores = teststores,
target_variable=target_variable,
test_type=test_type,
applicability_criteria=applicability_criteria,
uploaded_file_df=uploaded_file_df)
def test_store_comparison_summary(self, test_stores, target_variable, test_type, \
applicability_criteria, uploaded_file_df=None) -> Tuple[pd.DataFrame, dict, str, bool]:
if "channel" not in applicability_criteria:
return pd.DataFrame(), {}, "Pass channel in applicability criteria", False
if "team" not in applicability_criteria:
return pd.DataFrame(), {}, "Pass team in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
combined_avg, metrics_dict, message, success_flag = super().test_store_comparison_summary(
test_stores = test_stores,
target_variable = target_variable,
test_type = test_type,
applicability_criteria = applicability_criteria,
uploaded_file_df=uploaded_file_df)
return combined_avg, metrics_dict, message, success_flag
def test_stores_format_check(self, target_variable, num_of_teststores, test_type, applicability_criteria, teststores_data, uploaded_file_df=None) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, list, str, int, bool]:
if "channel" not in applicability_criteria:
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(),\
[],"Pass channel in applicability criteria", False
if "team" not in applicability_criteria:
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), \
[], "Pass team in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
applicability_criteria['sales_weeks'] = self\
.set_sales_weeks(applicability_criteria)
applicability_criteria['sales_lifts_sales_weeks'] = self\
.set_lift_sales_weeks(applicability_criteria)
applicability_criteria['test_vs_population_compare'] = self\
.set_test_vs_pop_comp(applicability_criteria)
teststores,annualrsvlifts,valid_sales_stores,consideryearweeks,message,\
num_of_teststores,success_flag = super().test_stores_format_check(target_variable, num_of_teststores, test_type, applicability_criteria, teststores_data, uploaded_file_df)
return teststores,annualrsvlifts,valid_sales_stores,consideryearweeks,message,\
num_of_teststores,success_flag
def manual_teststores_selection(self, test_type, target_variable, applicability_criteria, uploaded_file_df=None):
if "channel" not in applicability_criteria:
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(),\
[],"Pass channel in applicability criteria", False
if "team" not in applicability_criteria:
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), \
[], "Pass team in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
applicability_criteria['sales_weeks'] = self\
.set_sales_weeks(applicability_criteria)
applicability_criteria['sales_lifts_sales_weeks'] = self\
.set_lift_sales_weeks(applicability_criteria)
applicability_criteria['test_vs_population_compare'] = self\
.set_test_vs_pop_comp(applicability_criteria)
teststores,valid_sales_stores,consideryearweeks,num_of_teststores,margin_of_error,\
confidence_interval,power_of_test,message,success_flag \
= super().manual_teststores_selection(test_type, target_variable, applicability_criteria, uploaded_file_df)
return teststores,valid_sales_stores,consideryearweeks,num_of_teststores,margin_of_error,\
confidence_interval,power_of_test,message,success_flag
"""CONTROL STORES"""
def identify_control_stores(self, teststores, target_variable, applicability_criteria,
test_type,one_to_one=True, business_categories=[],
reqcontrolstores=1, control_store_pool=None,
len_control_pool=None, uploaded_file_df=None)\
-> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, str, bool]:
if "channel" not in applicability_criteria:
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(),\
"Pass channel in applicability criteria", False
if "team" not in applicability_criteria:
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(),\
"Pass team in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
applicability_criteria['sales_weeks'] = self\
.set_sales_weeks(applicability_criteria)
applicability_criteria['sales_lifts_sales_weeks'] = self\
.set_lift_sales_weeks(applicability_criteria)
applicability_criteria['test_vs_control_compare'] = self\
.set_test_vs_cntrl_comp(applicability_criteria)
return super().identify_control_stores(teststores = teststores,
target_variable = target_variable,
applicability_criteria = applicability_criteria.copy(),
test_type = test_type, one_to_one = one_to_one,
business_categories = business_categories,
reqcontrolstores = reqcontrolstores,
control_store_pool = control_store_pool,
len_control_pool = len_control_pool,
uploaded_file_df = uploaded_file_df)
def average_weekly_target_similarity_correlation(self, test_control_data,
target_variable,
applicability_criteria,
business_categories=[])\
-> Tuple[dict, pd.DataFrame, str, bool]:
if "channel" not in applicability_criteria:
return {},pd.DataFrame(),\
"Pass channel in applicability criteria", False
if "team" not in applicability_criteria:
return {},pd.DataFrame(),\
"Pass team in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
return super().average_weekly_target_similarity_correlation(test_control_data = test_control_data,
target_variable = target_variable,
applicability_criteria = applicability_criteria,
business_categories = business_categories)
def control_store_summary(self, test_type, test_control_mapping_stores, business_categories,\
target_variable, applicability_criteria, uploaded_file_df=None)\
-> Tuple[dict, dict, dict, str, bool]:
if "channel" not in applicability_criteria:
return {},{},{},\
"Pass channel in applicability criteria", False
if "team" not in applicability_criteria:
return {},{},{},\
"Pass team in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
applicability_criteria['sales_weeks'] = self\
.set_sales_weeks(applicability_criteria)
applicability_criteria['summary_sales_weeks'] = self\
.set_summary_sales_weeks(applicability_criteria)
applicability_criteria['test_vs_control_compare_summary'] = self\
.set_test_vs_cntrl_comp_sum(applicability_criteria)
return super().control_store_summary(test_type = test_type,
test_control_mapping_stores = test_control_mapping_stores,
business_categories = business_categories,
target_variable = target_variable,
applicability_criteria = applicability_criteria.copy(),
uploaded_file_df = uploaded_file_df)
def manual_upload_control_store_pool(self,control_store_pool_data,
teststores, target_variable,
applicability_criteria, test_type,
business_categories=[],
reqcontrolstores=1,
one_to_one=True, uploaded_file_df=None)\
-> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, str, bool]:
if "channel" not in applicability_criteria:
return pd.DataFrame(),pd.DataFrame(),pd.DataFrame(),pd.DataFrame(),\
"Pass channel in applicability criteria", False
if "team" not in applicability_criteria:
return pd.DataFrame(),pd.DataFrame(),pd.DataFrame(),pd.DataFrame(),\
"Pass team in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
applicability_criteria['sales_weeks'] = self\
.set_sales_weeks(applicability_criteria)
applicability_criteria['test_vs_control_compare'] = self\
.set_test_vs_cntrl_comp(applicability_criteria)
return super().manual_upload_control_store_pool(control_store_pool_data = control_store_pool_data,
teststores = teststores,
test_type= test_type,
target_variable = target_variable,
applicability_criteria = applicability_criteria,
business_categories = business_categories,
reqcontrolstores = reqcontrolstores,
one_to_one = one_to_one,
uploaded_file_df =uploaded_file_df)
def recompute_control_stores(self, test_control_stores, target_variable, business_categories,\
include_cbu_features,reqcontrolstores, applicability_criteria,\
test_type,uploaded_file_df=None)\
-> Tuple[pd.DataFrame,pd.DataFrame,pd.DataFrame,pd.DataFrame,str,bool]:
if "channel" not in applicability_criteria:
return pd.DataFrame(),pd.DataFrame(),pd.DataFrame(),pd.DataFrame(),\
"Pass channel in applicability criteria", False
if "team" not in applicability_criteria:
return pd.DataFrame(),pd.DataFrame(),pd.DataFrame(),pd.DataFrame(),\
"Pass team in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
applicability_criteria['sales_weeks'] = self\
.set_sales_weeks(applicability_criteria)
applicability_criteria['sales_lifts_sales_weeks'] = self\
.set_lift_sales_weeks(applicability_criteria)
applicability_criteria['test_vs_control_compare'] = self\
.set_test_vs_cntrl_comp(applicability_criteria)
return super().recompute_control_stores(test_control_stores = test_control_stores,
target_variable = target_variable,
business_categories = business_categories,
include_cbu_features = include_cbu_features,
reqcontrolstores = reqcontrolstores,
test_type = test_type,
applicability_criteria = applicability_criteria,
uploaded_file_df = uploaded_file_df)
def manual_teststore_controlstore_upload(self, target_variable, test_control_store_data, test_type,
applicability_criteria, uploaded_file_df=None, business_categories = None,)-> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, str, bool]:
if "channel" not in applicability_criteria:
return pd.DataFrame(),pd.DataFrame(),pd.DataFrame(),\
"Pass channel in applicability criteria", False
if "team" not in applicability_criteria:
return pd.DataFrame(),pd.DataFrame(),pd.DataFrame(),\
"Pass team in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
applicability_criteria['sales_weeks'] = self\
.set_sales_weeks(applicability_criteria)
applicability_criteria['sales_lifts_sales_weeks'] = self\
.set_lift_sales_weeks(applicability_criteria)
applicability_criteria['summary_sales_weeks'] = self.set_summary_sales_weeks(applicability_criteria)
applicability_criteria['test_vs_control_compare'] = self\
.set_test_vs_cntrl_comp(applicability_criteria)
control_test_pairs, stores_master_df, annualrsvlifts, message, success_flag = super().manual_teststore_controlstore_upload(target_variable = target_variable,
test_control_store_data = test_control_store_data,
test_type = test_type,
applicability_criteria = applicability_criteria,
uploaded_file_df=uploaded_file_df, business_categories = business_categories)
if success_flag is True:
control_test_pairs = control_test_pairs.merge(stores_master_df[[self._storemstrmapping['partner_id'], 'TDLinx_No', 'StoreNumber']].rename(columns={self._storemstrmapping['partner_id']:'Test_store_'+self._storemstrmapping['partner_id'], 'StoreNumber':'Test_store_StoreNumber', 'TDLinx_No':'Test_store_TDLinx_No'}), on='Test_store_'+self._storemstrmapping['partner_id'])
control_test_pairs = control_test_pairs.merge(stores_master_df[[self._storemstrmapping['partner_id'], 'TDLinx_No', 'StoreNumber']].rename(columns={'StoreNumber':'Control_store_StoreNumber', 'TDLinx_No':'Control_store_TDLinx_No'}), on=self._storemstrmapping['partner_id'])
return control_test_pairs, stores_master_df, annualrsvlifts, message, success_flag
class FastToolMsrmtUS(FastToolMeasurement):
def __init__(self, fast_tool_plan, config, region, test_id):
super().__init__(fast_tool_plan = fast_tool_plan, config = config, region = region, test_id = test_id)
def get_test_vs_control_linegraph(self, teststores, target_variable, test_type, applicability_criteria, weeks_before=None, weeks_after=None,control_stores_sales_method='Approach1', business_categories=None) -> Tuple[dict, str, bool]:
if "channel" not in applicability_criteria:
return dict(), "Pass channel in applicability criteria", False
if "businessType" not in applicability_criteria:
return dict(), "Pass channel in applicability criteria", False
if "team" not in applicability_criteria:
return dict(), "Pass team in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
return super().get_test_vs_control_linegraph(teststores = teststores,
target_variable = target_variable,
test_type = test_type,
applicability_criteria = applicability_criteria,
control_stores_sales_method = control_stores_sales_method,
business_categories = business_categories,
weeks_after=weeks_after,
weeks_before=weeks_before)
def _get_cost(self, test_master_table, population_store_weekly_sales=None, target_variable=None)->float:
rsv_estimate = population_store_weekly_sales[target_variable].sum()
break_even_lift = float(test_master_table['break_even_lift'].values[0])
return self._fast_tool_plan.get_cost(rsv_estimate=rsv_estimate, breakevenliftpercentage=break_even_lift)
def get_target_variable_analysis_results(self, teststores, target_variable, test_type, applicability_criteria,control_stores_sales_method='Approach1',
outlier_column=None, business_categories=None, uploaded_file_df=None)-> Tuple[float, str, pd.DataFrame, pd.DataFrame, dict, dict, str, bool]:
if "channel" not in applicability_criteria:
return 0.0, "", pd.DataFrame(), pd.DataFrame(), dict(), dict(), "Pass channel in applicability criteria", False
if "businessType" not in applicability_criteria:
return 0.0, "", pd.DataFrame(), pd.DataFrame(), dict(), dict(), "Pass channel in applicability criteria", False
if "team" not in applicability_criteria:
return 0.0, "", pd.DataFrame(), pd.DataFrame(), dict(), dict(), "Pass team in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
return super().get_target_variable_analysis_results(teststores = teststores,
target_variable = target_variable,
test_type = test_type,
applicability_criteria = applicability_criteria,
control_stores_sales_method=control_stores_sales_method,
outlier_column=outlier_column,
business_categories=business_categories,
uploaded_file_df=uploaded_file_df) | /rtm_fast_unification-0.0.1-py3-none-any.whl/rtm_fast_unification/DSCode/regions/US/ds_code.py | 0.831998 | 0.428114 | ds_code.py | pypi |
from typing import Tuple
import pandas as pd
from DSCode.library.sql.sales_master import Sales
from DSCode.library.sql.stores_master import Stores
class FastSalesUS(Sales):
"""
A class to represent features of TargetEstimate.
...
Attributes
----------
config : configuration present in config_data either for a region or overall
region: key present in config
sales_object : Object of sales class
store_object : Object of store class
Methods
-------
data_extract(): to calculate required sales/volume and get the store details in population
calculate_rsv(): calculate the RSV/POS/Sales/Volume value required
and estimate number of stores in population
get_breakeven_lift(): estimates the breakeven lift% value
get_cost(): estimates the cost of implementing RTM activity if breakeven lift is known
"""
def __init__(self, config, test_id) -> None:
super().__init__(config=config, test_id=test_id)
self._cbu_sales = pd.DataFrame()
self._overall = pd.DataFrame()
def get_cbu_sales(self, stores, applicability_criteria, weeks):
"""
About function
--------------
This function interacts with weekly sales table and calculates the
sales of selected products (total sales of products) at store in the given weeks
Parameters
----------
stores: list of store identifier values for which sales need to be calculated
applicability_criteria: key-value pairs of the filters (product and store) selection
made in the tool week: list of week values in which sales needs to be calculated
Return values
-------
dataframe with weekly sales of the stores
"""
if "businessType" not in applicability_criteria:
raise Exception("businessType not passed to applicability criteria")
applicability_criteria['consumption_value'].append("")
applicability_criteria['seasonal_value'].append("")
applicability_criteria['category_value'].append("")
applicability_criteria['brands_value'].append("")
applicability_criteria['pack_value'].append("")
applicability_criteria['store_value'] = tuple(stores)
applicability_criteria['week_value'] = tuple(weeks)
applicability_criteria['category_value'] = tuple(applicability_criteria['category_value'])
applicability_criteria['brands_value'] = tuple(applicability_criteria['brands_value'])
applicability_criteria['pack_value'] = tuple(applicability_criteria['pack_value'])
applicability_criteria['consumption_value'] = tuple(
applicability_criteria['consumption_value']
)
applicability_criteria['seasonal_value'] = tuple(applicability_criteria['seasonal_value'])
if applicability_criteria['businessType'] == "MW":
query = """SELECT Week, StoreId,ROUND(SUM(POS),2) as POS,ROUND(SUM(Volume),2) as Volume,
'{channel}' as StoreClassification
FROM {sales_table} as sales JOIN {product_table} as products ON products.UPC=sales.UPC
AND products.StoreClassification='{channel}'
AND Category IN {category_value}
AND brands IN {brands_value}
AND PackType IN {pack_value}
AND Consumption IN {consumption_value}
AND seasonalPackaging IN {seasonal_value}
AND IsMars = 1 AND Week IN {week_value}
AND StoreId IN {store_value}
GROUP BY Week, StoreId"""
else:
query = """SELECT Week, StoreId,ROUND(SUM(POS),2) as POS,ROUND(SUM(Volume),2) as Volume,
'{channel}' as StoreClassification
FROM {sales_table} as sales JOIN {product_table} as products ON products.UPC=sales.UPC
AND products.StoreClassification='{channel}'
AND Category IN {category_value}
AND brands IN {brands_value}
AND PackType IN {pack_value}
AND Consumption IN {consumption_value}
AND seasonalPackaging IN {seasonal_value}
AND Week IN {week_value}
AND StoreId IN {store_value}
GROUP BY Week, StoreId"""
self._cbu_sales = self.execute_sql_query(query, applicability_criteria)
return self._cbu_sales
def get_overall_sales(self, stores, weeks, applicability_criteria=None):
"""
About function
--------------
This function interacts with weekly sales table and calculates the overall sales
(doesnt consider product attributes) at store in the given weeks
Parameters
----------
stores: list of store identifier values for which sales need to be calculated
applicability_criteria: key-value pairs of the filters (product and store) selection
made in the tool
week: list of week values in which sales needs to be calculated
Return values
-------
dataframe with weekly sales of the stores
"""
if "businessType" not in applicability_criteria:
raise Exception("businessType not passed to applicability criteria")
applicability_criteria['store_value'] = tuple(stores)
applicability_criteria['week_value'] = tuple(weeks)
if applicability_criteria['businessType'] == "MW":
query = """SELECT Week, StoreId, ROUND(SUM(POS),2) as POS,
ROUND(SUM(Volume),2) as Volume,'{channel}' as StoreClassification
FROM {sales_table} as sales JOIN {product_table} as products ON products.UPC=sales.UPC
AND products.StoreClassification='{channel}'
AND IsMars = 1 AND Week IN {week_value}
AND StoreId IN {store_value}
GROUP BY Week, StoreId"""
else:
query = """SELECT Week, StoreId, ROUND(SUM(POS), 2) as POS,
ROUND(SUM(Volume), 2) as Volume, '{channel}' as StoreClassification
FROM {sales_table} as sales
WHERE Week IN {week_value} AND StoreId IN {store_value}
GROUP BY Week, StoreId"""
self._overall = self.execute_sql_query(query, applicability_criteria)
return self._overall
def get_total_weekly_target_data(self, test_master_df, stores_list, sales_week,target_variable,\
applicability_criteria, test_type, consideryearweeks=None) \
-> Tuple[pd.DataFrame, list, str, bool]:
"""
About function
--------------
This function gets the overall sales in the "sales week" time period or
weeks to be considered
Parameters
----------
prewindow_end: date on which preperiod ends,
stores_list: list of stores for which lift is calculated,
applicability_criteria: the product and stores attributes selected at
tool in dictionary format,
test_type: type of test from the tool selection (Activity, RTM impact, others...),
sales_week: optional parameter is the number of weeks for
which the sales to be calculated and validated ,
consideryearweeks: optional parameter a list of weeks, if want to skip
the calculation of weeks
example
-------
if want to calculate yearly lift then sales_week = 104 (52*2 number of weeks);
sales_lifts_sales_weeks = 52 (number of weeks in a year)
Return values
-------
overall sales values dataframe,
list of weeks on which sales is calculated,
message
success flag
"""
sales_week=self._config['metadata']['test_configuration']\
['sales_weeks']\
[applicability_criteria['channel']]
weekly_target_variables_file, consideryearweeks,\
message, success_flag = super().get_total_weekly_target_data(
target_variable=target_variable,
test_master_df = test_master_df,
stores_list = stores_list,
sales_week = sales_week,
applicability_criteria = applicability_criteria,
test_type = test_type,
consideryearweeks = consideryearweeks)
return weekly_target_variables_file, consideryearweeks, message, success_flag
def get_max_week_config_master(self, applicability_criteria=None):
"""
About function
--------------
This function interacts with config master table in the database and returns
max date maintained in the table
Parameters
----------
stores: list of store identifier values for which sales need to be calculated
applicability_criteria: key-value pairs of the filters (product and store)
selection made in the tool
Return values
-------
max date maintained in the table
"""
config_master = self.execute_sql_query(query="SELECT * FROM {table_name}",
data={"table_name": self._config['tables']\
['config_mstr'],
"channel":applicability_criteria["channel"]})
print(config_master)
print(config_master.columns)
return config_master[config_master['key'] == 'max_date']['week'].values[0]
class FastStoresUS(Stores):
"""
A class to represent features of TargetEstimate.
...
Attributes
----------
config : configuration present in config_data either for a region or overall
region: key present in config
sales_object : Object of sales class
store_object : Object of store class
Methods
-------
data_extract(): to calculate required sales/volume and get the store details in population
calculate_rsv(): calculate the RSV/POS/Sales/Volume value required
and estimate number of stores in population
get_breakeven_lift(): estimates the breakeven lift% value
get_cost(): estimates the cost of implementing RTM activity if breakeven lift is known
"""
def __init__(self, config, test_id) -> None:
super().__init__(config=config, test_id=test_id)
def get_filtered_stores(self, applicability_criteria):
"""
About function
--------------
This function needs to be overriden, developer needs to write the query
to get store information from the storemaster table
based on the filter selected in the applicability criteria
Parameters
----------
applicability_criteria: key-value pairs of the filters (product and store)
selection made in the tool
Return values
-------
store attributes dataframe
"""
if "team" not in applicability_criteria:
raise Exception("team is not passed to applicability criteria")
# """Returns the store information based on applicability criteria filters"""
applicability_criteria['regions_value'].append("")
applicability_criteria['territory_value'].append("")
applicability_criteria['store_name_value'].append("")
applicability_criteria['segments_value'].append("")
applicability_criteria['store_name_value'] = \
tuple(applicability_criteria['store_name_value'])
applicability_criteria['segments_value'] = \
tuple(applicability_criteria['segments_value'])
applicability_criteria['regions_value'] = \
tuple(applicability_criteria['regions_value'])
applicability_criteria['territory_value'] = \
tuple(applicability_criteria['territory_value'])
if applicability_criteria['team'] == 'RTM':
query_store_filter = """SELECT *
FROM {store_table}
WHERE StoreClassification = '{channel}'
AND IsCovered = 1
AND RegionName IN {regions_value}
AND TerritoryName IN {territory_value}
AND StoreName IN {store_name_value}
AND MasterChain IN {segments_value}"""
else:
query_store_filter = """SELECT * FROM {store_table}
WHERE StoreClassification = '{channel}'
AND RegionName IN {regions_value}
AND TerritoryName IN {territory_value}
AND StoreName IN {store_name_value}
AND MasterChain IN {segments_value}"""
return self.execute_sql_query(query_store_filter, applicability_criteria)
def get_uploaded_stores_info(self, stores_list, applicability_criteria):
"""
About function
--------------
This function needs to be overriden, developer needs to write the query to get
store information from the storemaster table
based on the list of the store identifier (config[store_mstr_columns][partner_id])
value present in stores_list
Parameters
----------
applicability_criteria: key-value pairs of the filters (product and store)
selection made in the tool
Return values
-------
store attributes dataframe
"""
applicability_criteria['store_value'] = tuple(stores_list)
query_uploaded_population = """SELECT *
FROM {store_table}
WHERE StoreId IN {store_value}
AND StoreClassification = '{channel}'"""
return self.execute_sql_query(query_uploaded_population, applicability_criteria)
def check_details_uploaded_stores(self, stores_list, applicability_criteria):
"""
This function based on the selection (StoreId, StoreNumber, TDLinxNo) will check
uploaded values with appropriate columns
"""
if stores_list is None:
stores_list = []
stores_list.append(-1)
applicability_criteria['store_value'] = tuple(stores_list)
query = "SELECT * FROM {store_table} WHERE {store_identifier_attribute} IN {store_value}"
return self.execute_sql_query(query, stores=stores_list,data= applicability_criteria)
def validate_uploaded_presence_store_master(self, uploaded_stores, \
store_identifier, applicability_criteria)->Tuple[pd.DataFrame, str, bool]:
if 'store_identifier_attribute' not in applicability_criteria:
return pd.DataFrame(),\
"Please pass the Store identifier attribute to the function", False
store_idn_att = applicability_criteria['store_identifier_attribute']
if (applicability_criteria['store_identifier_attribute'] == 'StoreNumber') \
& (('segments_value' not in applicability_criteria) \
or (len(applicability_criteria['segments_value']) == 0)):
return pd.DataFrame(), "Please pass the MasterChain info to the upload function", False
stores_list = list(uploaded_stores[store_identifier].unique())
upld_str_dtls = self.check_details_uploaded_stores(stores_list=stores_list[:],
applicability_criteria=applicability_criteria)
str_iden_clmn = self._storemstrmapping['partner_id']
banner_clmn = self._config["store_mstr_columns"]["banner"]
covered_clmn = self._storemstrmapping["is_covered"]
if upld_str_dtls.shape[0] == 0:
return upld_str_dtls, "All uploaded stores are not present in Store Master!!", False
stores_list = list(set(stores_list) - set([-1]))
message = "Out of {uploaded_stores} uploaded stores, {stores_present} in store master"\
.format(uploaded_stores=len(stores_list),
stores_present=len(set(upld_str_dtls[store_idn_att].unique())- set([-1])))
# """If different channel stores are uploaded"""
if applicability_criteria["channel"] not in upld_str_dtls[banner_clmn].unique():
diff_banner_str = upld_str_dtls[upld_str_dtls[banner_clmn]!=applicability_criteria["channel"]]
return pd.DataFrame(), "Please remove!! Stores belonging to other channels {} \n"\
.format(diff_banner_str[store_idn_att].values.tolist()), False
check_details = upld_str_dtls[upld_str_dtls[banner_clmn] == applicability_criteria["channel"]]
message = "Out of {uploaded_stores} valid stores, {stores_present} in selected channel"\
.format(uploaded_stores = len(set(upld_str_dtls[store_idn_att].unique())- set([-1])),
stores_present=check_details[store_idn_att].nunique()) + '\n' +message
upld_str_dtls = check_details
# """Fetch details of the uploaded stores"""
if (applicability_criteria["team"] == "RTM") & (0 in upld_str_dtls[covered_clmn].unique()):
return pd.DataFrame(), "Please remove!! NON-RTM stores found. List of {} that belongs to NON-RTM: {}\n"\
.format(store_idn_att,
upld_str_dtls[upld_str_dtls[covered_clmn]==0]\
[store_idn_att].unique()) + message, False
if applicability_criteria["team"] == "RTM":
check_details = upld_str_dtls[upld_str_dtls[covered_clmn] == 1]
upld_str_dtls = check_details
# """if store number are uploaded then no storenumber is mapped to multiple storeIds"""
if store_identifier == 'StoreNumber':
map_store_id = upld_str_dtls.groupby('StoreNumber').aggregate({str_iden_clmn:'nunique'}).reset_index()
if map_store_id[map_store_id[str_iden_clmn]>1].shape[0]>0:
return pd.DataFrame(), \
"Please remove!!Some Store Numbers({}) are mapped to multiple StoreIds\n"\
.format(list(map_store_id[map_store_id[str_iden_clmn]>1]\
['StoreNumber'].unique())) + message, False
return upld_str_dtls, message, True | /rtm_fast_unification-0.0.1-py3-none-any.whl/rtm_fast_unification/DSCode/regions/US/common_utility.py | 0.874614 | 0.536677 | common_utility.py | pypi |
config_uk = {
"Constructors": {
"Sales": "FastSalesUK",
"Stores": "FastStoresUK",
"Tool": "FastToolUK",
},
"feature_parameter": {
"is_product_present": 1,
"active_store_filter_type": "test",
"test_variable_dates": 0,
"control_store_buffer": 1.2,
"data_continuity_check": 1
},
"store_mstr_columns": {
"banner": "Customer_Group",
"segment": "Customer_Chain",
"territory": "Territory",
"storegrid": "Sub_Channel",
"partner_id": "Customer_Number",
"baycount": "",
"partner_id_backup": "Customer_Number",
"FSR": "Sales_Representative",
},
"Test_store_Partner_ID_backup": "Test_store_Customer_Number",
"heading_names": {
"banner": "Banner",
"segment": "Segment",
"territory": "Territory",
"storegrid": "Overall Segment",
"partner_id": "Customer Name",
"store_segment": "Store Segment",
"currency": "£",
},
"weekly_target_variable": {
"banner": "Customer_Group",
"banner_code": "Store_Number",
"partner_id": "Customer_Number",
"rsv": "RSV",
"volume": "Volume",
"week_no": "Week Number",
"year": "Year",
"segment": "Customer_Chain",
"week": "Week",
"overall_segment": "Sub_Channel",
"territory": "Territory",
"RSV": "RSV",
"cbulvl1": "CBU_Lvl1",
"packformat": "Pack_Format",
},
"tables": {
"control_store_mstr": "[FAST_UK].[Tl_Controlstore_Mstr]",
"measurement": "[FAST_UK].[Tl_Measurement_Tbl]",
"record_mstr": "[FAST_UK].[Tl_RecordMstr]",
"store_mstr": "[FAST_UK].[Tl_StoreMstr]",
"test_mstr": "[FAST_UK].[Tl_TestMstr]",
"test_store_map": "[FAST_UK].[Tl_Teststore_map]",
"weekly_mstr": "[FAST_UK].[Tl_Weekly_target_mst]",
"upload_stores": "[FAST_UK].[Tl_Upload_store_population]",
"config_mstr": "[FAST_UK].[Tl_ConfigMstr]",
"visit_mstr": "[FAST_UK].[Tl_Visit_Data]",
'pack_mstr':"[FAST_UK].[Tl_Pack_Format]",
'cbu_mstr':"[FAST_UK].[Tl_CBU_Lvl1]"
},
"metadata": {
"test_configuration": {
"sales_weeks": 104,
"sales_lifts_sales_weeks": 52,
"sales_diff_percentage": 10,
"power_of_test": 0.7,
"min_teststores": 30,
"rawconvfactors": {
"CO-OP": 0.18,
"ASDA": 0.25,
"TESCO": 0.21,
"POUNDLAND": 0.15,
"SAINSBURY": 0.18,
"MORRISONS": 0.21,
},
},
"test_planning": {
"default_stratification": ["Customer_Group"],
"test_vs_population_compare": [
"total_checkout_locations",
"Store_Size_Sq_Ft",
"Manned_Checkouts",
],
"test_vs_population_compare_summary": [
"Store_Size_Sq_Ft",
"Manned_Checkouts",
],
"sampling_iterations": 10,
"test_vs_population_pvalue": 0.8,
"test_vs_control_compare": ["Customer_Group", "Customer_Chain"],
"test_vs_control_compare_summary": [],
"business_category_specific_compare": [],
"business_categories_count": 0,
"test_vs_control_pvalue": 0.8,
"test_vs_control_similaritymeasure_difference_threshold": 0.05,
"summary_sales_weeks": 52,
"validate_datapoints_multiplier": 2,
"teststores_columns": [
"Customer_Number",
"Sales_Representative",
"Customer_Group",
"Territory",
"Store_Size_Sq_Ft",
"Customer_Chain",
],
"upload_stores_identifier":"Customer_Number",
"upload_teststores_identifier":'Test_store_Customer_Number',
"upload_controlstores_identifier":'Control_store_Customer_Number',
"user_populationstores_columns": {
"Customer_Number": "int64"
},
"user_teststores_columns": {
"Test_store_Customer_Number": "int64"
},
"control_storespool_columns": {
"Control_store_Customer_Number": "int64"
},
"confidence_level": 0.85,
"similarity_measure": 0.7,
"correlation": 0.4,
"margin_of_error": 0.04,
"power_of_test": 0.7,
"power_values": [60, 65, 70, 75, 80, 85, 90, 95],
"user_testcontrolstores_columns": {
"Test_store_Customer_Number": "int64",
"Control_store_Customer_Number": "int64",
},
"control_storespool_columns": {
"Control_store_Customer_Number": "int64"
},
},
"test_measurement": {
"probability_thresholds": [0.60, 0.85, 1],
"testmeasurement_columns": [
"Customer_Number",
"Sales_Representative",
"Customer_Group",
"Territory",
"Sub_Channel",
"Store_Size_Sq_Ft",
"Customer_Chain",
],
"user_customgroup_columns": {
"Test_store_Customer_Number": "int64",
"Group": "object",
},
},
},
"filePath": {
"TestStore": {
"file_name": "/DSCode/regions/UK/upload_templates/Upload_Teststores_Template_UK.xlsx"
},
"controlStore": {
"file_name": "/DSCode/regions/UK/upload_templates/Test_Control_Pairs_Upload_Template_UK.xlsx"
},
"controlStore_Pool": {
"file_name": "/DSCode/regions/UK/upload_templates/Control_Pairs_Pool_Upload_Template_UK.xlsx"
},
"RSV_STORES": {
"file_name": "/DSCode/regions/UK/upload_templates/Upload_Population_Template_UK.xlsx"
},
},
"excel_header": {
"test_store": "Test_store_Customer_Number",
"control_store": "Control_store_Customer_Number",
},
"report_generate": {
"common": {"region_name": "UNITED KINGDOM", "flag_name": "flag_UK.png"},
"control_compare_variable": ["Touchability_Score", "Store_Size_Sq_Ft"],
"test_compare_variable": ["Touchability_Score", "Store_Size_Sq_Ft"],
"store_feature": ["Customer Group"],
"row_span": 2,
"matching_criteria": [
"Customer_Group",
"Territory",
"Touchability_Score",
"Store_Size_Sq_Ft",
],
},
"result_grid_excel": {
"header_data": ["Week", "Category", "Metric", "Variable", "Value"],
"category_format": {
"Confectionary_Combination_1": "category1",
"Confectionary_Combination_2": "category2",
"Confectionary_Combination_3": "category3",
"Confectionary_Combination_4": "category4",
},
},
} | /rtm_fast_unification-0.0.1-py3-none-any.whl/rtm_fast_unification/DSCode/regions/UK/config_data.py | 0.426919 | 0.382084 | config_data.py | pypi |
from datetime import datetime
from typing import Tuple
import pandas as pd
from DSCode.library.sql.sales_master import Sales
from DSCode.library.sql.stores_master import Stores
class FastStoresUK(Stores):
"""
A class to represent features of TargetEstimate.
...
Attributes
----------
config : configuration present in config_data either for a region or overall
region: key present in config
sales_object : Object of sales class
store_object : Object of store class
Methods
-------
data_extract(): to calculate required sales/volume and get the store details in population
calculate_rsv(): calculate the RSV/POS/Sales/Volume value required
and estimate number of stores in population
get_breakeven_lift(): estimates the breakeven lift% value
get_cost(): estimates the cost of implementing RTM activity if breakeven lift is known
"""
def __init__(self, config, test_id) -> None:
self._config = config
super().__init__(
config=self._config, test_id=test_id
)
def filter_population(self, applicability_criteria, \
storelist=None, uploaded_file_df=None) -> pd.DataFrame:
if storelist is None:
storelist = []
stores_master_df = super().filter_population(
storelist=storelist,
applicability_criteria=applicability_criteria,
uploaded_file_df=uploaded_file_df
)
if applicability_criteria["test_type"] != "RTM Impact Test":
stores_master_df = stores_master_df[
stores_master_df['Customer_Status'].isin(applicability_criteria['Customer_Status'])
]
return stores_master_df
def get_filtered_stores(self, applicability_criteria) -> pd.DataFrame:
applicability_criteria["banners"].append("")
applicability_criteria["segments"].append("")
applicability_criteria["store_segments"].append("")
applicability_criteria["territories"].append("")
applicability_criteria["Customer_Status"].append("")
filter_store_query = """Select * from {table}
where Customer_Group IN {banners}
and Sub_Channel IN {segments}
and Customer_Chain IN {store_segments}
and Territory IN {territories}
and Customer_Status IN {Customer_Status}"""
return self.execute_sql_query(filter_store_query, data={
"table": self._config['tables']['store_mstr'],
"banners": tuple(applicability_criteria["banners"]),
"segments": tuple(applicability_criteria["segments"]),
"store_segments": tuple(applicability_criteria["store_segments"]),
"territories": tuple(applicability_criteria["territories"]),
"Customer_Status": tuple(applicability_criteria["Customer_Status"])
})
def get_uploaded_stores_info(self, stores_list, applicability_criteria) -> pd.DataFrame:
"""Returns the store information of uploaded population"""
# applicability_criteria["store_value"] = tuple(stores_list)
query_uploaded_population = "SELECT * FROM {table} WHERE Customer_Number IN {store_value}"
# table=applicability_criteria["store_table"]
return self.execute_sql_query(query_uploaded_population, data={
"table": self._config['tables']['store_mstr'],
"store_value": tuple(stores_list)})
class FastSalesUK(Sales):
"""
A class to represent features of TargetEstimate.
...
Attributes
----------
config : configuration present in config_data either for a region or overall
region: key present in config
sales_object : Object of sales class
store_object : Object of store class
Methods
-------
data_extract(): to calculate required sales/volume and get the store details in population
calculate_rsv(): calculate the RSV/POS/Sales/Volume value required
and estimate number of stores in population
get_breakeven_lift(): estimates the breakeven lift% value
get_cost(): estimates the cost of implementing RTM activity if breakeven lift is known
"""
def __init__(self, config, test_id) -> None:
self._config = config
super().__init__(
config=config, test_id=test_id
)
def get_cbu_sales(self, stores, applicability_criteria, weeks) -> pd.DataFrame:
cbu_lvl_query = (
"Select CBU_Lvl1_Mapping from {table} where CBU_Lvl1 IN {cat_list}"
)
data = {
"cat_list": tuple(applicability_criteria["cbu_lvl1_categories"]),
"table": self._config["tables"]["cbu_mstr"],
}
cbu_lvl1_mapping_list = pd.DataFrame(self.execute_sql_query(cbu_lvl_query, data=data))
cbu_lvl1_mapping_list = cbu_lvl1_mapping_list["CBU_Lvl1_Mapping"].to_list(
)
pack_format_query = (
"Select Pack_Format_Mapping from {table} where Pack_Format IN {cat_list}"
)
data = {
"cat_list": tuple(applicability_criteria["pack_lvl_categories"]),
"table": self._config["tables"]["pack_mstr"],
}
pack_format_mapping_list = pd.DataFrame(
self.execute_sql_query(pack_format_query, data=data))
pack_format_mapping_list = pack_format_mapping_list["Pack_Format_Mapping"].to_list()
pack_format_mapping_list.append(-1)
cbu_lvl1_mapping_list.append(-1)
customer_list = stores
sqlquery = """SELECT Customer_Group,Customer_Number, Week, SUM(RSV) as RSV, SUM(Volume) as Volume
FROM {table}
WHERE Week IN {weeks_val} AND CBU_Lvl1_Mapping IN {cbu_lvl_val}
AND Pack_Format_Mapping IN {pack_format_val} AND Customer_Number IN {stores_val}
GROUP By Customer_Group, Customer_Number, Week """
data = {
"table": self._config["tables"]["weekly_mstr"],
"weeks_val": tuple(weeks),
"cbu_lvl_val": tuple(cbu_lvl1_mapping_list),
"pack_format_val": tuple(pack_format_mapping_list),
"stores_val": tuple(customer_list),
}
return pd.DataFrame(self.execute_sql_query(sqlquery, data=data))
def get_overall_sales(self, stores, applicability_criteria, weeks):
sqlquery = """SELECT Customer_Group,Customer_Number, Week, SUM(RSV) as RSV,
SUM(Volume) as Volume,imputed
FROM {weekly_mstr_table}
WHERE Week IN {weeks_val} AND Customer_Number IN {stores_val}
GROUP By Customer_Group, Customer_Number, Week,imputed"""
return pd.DataFrame(self.execute_sql_query(sqlquery,
data={"weekly_mstr_table": self._config['tables']['weekly_mstr'],
"weeks_val": tuple(weeks), "stores_val": tuple(stores)}))
def get_valid_weekly_target_data(self, stores, applicability_criteria,\
target_variable, test_master_df,test_type, sales_week,\
consideryearweeks=None) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, list, str, bool]:
prewindow_start = datetime.strptime(test_master_df["pre_start"]\
.values[0], '%Y-%m-%d').date()
prewindow_end = datetime.strptime(test_master_df["pre_end"]\
.values[0], '%Y-%m-%d').date()
postwindow_start = datetime.strptime(test_master_df["testwin_start"]\
.values[0], '%Y-%m-%d').date()
postwindow_end = datetime.strptime(test_master_df["testwin_end"]\
.values[0], '%Y-%m-%d').date()
if test_type == "RTM Impact Test":
pre_window_yearweeks = self.find_weeks(prewindow_start,
prewindow_end)
post_window_yearweeks = self.find_weeks(postwindow_start,
postwindow_end)
max_week_data_available = self.get_max_week_config_master(
applicability_criteria)
post_window_yearweeks = [
i for i in post_window_yearweeks if i <= int(max_week_data_available)]
if len(applicability_criteria['banners']) == 1 and\
'POUNDLAND'.upper() in map(str.upper,
applicability_criteria['banners']):
if 201739 in pre_window_yearweeks:
pre_window_yearweeks.remove(201739)
all_weeks = set(pre_window_yearweeks).union(post_window_yearweeks)
weekly_merged_level_sales, weekly_overal_level_sales, weekly_cbu_level_sales,consideryearweeks, \
message, success_flag = super().get_valid_weekly_target_data(
stores = stores,
applicability_criteria=applicability_criteria,
target_variable=target_variable,
test_master_df = test_master_df,
test_type = test_type,
sales_week = sales_week,
consideryearweeks = all_weeks)
return weekly_merged_level_sales, weekly_overal_level_sales, weekly_cbu_level_sales,consideryearweeks, \
message, success_flag
else:
yearweeks = self.find_last104_weeks_from_baseline_end(prewindow_end)
if len(applicability_criteria['banners']) == 1 \
and 'POUNDLAND'.upper() in map(str.upper, applicability_criteria['banners']):
if 201739 in yearweeks:
yearweeks.remove(201739)
yearweeks.sort(reverse=True)
consideryearweeks = yearweeks[:sales_week]
consideryearweeks.sort(reverse=False)
weekly_merged_level_sales, weekly_overal_level_sales, weekly_cbu_level_sales,_, \
message, success_flag = super().get_valid_weekly_target_data(
stores = stores,
applicability_criteria=applicability_criteria,
target_variable=target_variable,
test_master_df = test_master_df,
test_type = test_type,
sales_week = sales_week,
consideryearweeks = consideryearweeks)
return weekly_merged_level_sales, weekly_overal_level_sales, weekly_cbu_level_sales,consideryearweeks, \
message, success_flag
def get_annual_rsv_lifts(self, target_variable, test_master_df, stores,\
applicability_criteria, test_type) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, list, str, bool]:
"""
get_annual_rsv_lifts
"""
# Getting the the target varaibles file
sales_week = self.get_sales_weeks(applicability_criteria)
sales_lifts_sales_weeks = self.get_lift_sales_weeks(applicability_criteria)
weekly_ovrl_cbu_sales,weekly_overall_sales, weekly_cbu_sales, consideryearweeks,\
message, success_flag = self.get_valid_weekly_target_data(
stores = stores,
applicability_criteria=applicability_criteria,
target_variable=target_variable,
test_master_df = test_master_df,
test_type = test_type,
sales_week = sales_week)
if success_flag is False:
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), consideryearweeks, message, False
if test_type == "RTM Impact Test":
weeks1 = consideryearweeks[0]
weeks2 = consideryearweeks[1]
else:
weeks1 = consideryearweeks[:sales_lifts_sales_weeks]
weeks2 = consideryearweeks[sales_lifts_sales_weeks:]
annualrsvdatamerged, _, success_flag = self\
._lift_calculation_util(weekly_sales = weekly_ovrl_cbu_sales,
first_half_weeks=weeks1,
second_half_weeks=weeks2,
target_variable=target_variable)
return annualrsvdatamerged, weekly_ovrl_cbu_sales,weekly_overall_sales, weekly_cbu_sales,\
consideryearweeks, "Annual Lift calculated Successfully!", True | /rtm_fast_unification-0.0.1-py3-none-any.whl/rtm_fast_unification/DSCode/regions/UK/common_utility.py | 0.860222 | 0.427038 | common_utility.py | pypi |
import numpy as np
from datetime import datetime
from scipy.sparse import issparse
def gower_matrix(data_x, data_y=None, weight=None, cat_features=None):
# function checks
X = data_x
if data_y is None: Y = data_x
else: Y = data_y
if not isinstance(X, np.ndarray):
if not np.array_equal(X.columns, Y.columns): raise TypeError("X and Y must have same columns!")
else:
if not X.shape[1] == Y.shape[1]: raise TypeError("X and Y must have same y-dim!")
if issparse(X) or issparse(Y): raise TypeError("Sparse matrices are not supported!")
x_n_rows, x_n_cols = X.shape
y_n_rows, y_n_cols = Y.shape
if cat_features is None:
if not isinstance(X, np.ndarray):
is_number = np.vectorize(lambda x: not np.issubdtype(x, np.number))
cat_features = is_number(X.dtypes)
else:
cat_features = np.zeros(x_n_cols, dtype=bool)
for col in range(x_n_cols):
if not np.issubdtype(type(X[0, col]), np.number):
cat_features[col]=True
else:
cat_features = np.array(cat_features)
# print(cat_features)
if not isinstance(X, np.ndarray): X = np.asarray(X)
if not isinstance(Y, np.ndarray): Y = np.asarray(Y)
Z = np.concatenate((X,Y))
x_index = range(0,x_n_rows)
y_index = range(x_n_rows,x_n_rows+y_n_rows)
Z_num = Z[:,np.logical_not(cat_features)]
num_cols = Z_num.shape[1]
num_ranges = np.zeros(num_cols)
num_max = np.zeros(num_cols)
for col in range(num_cols):
col_array = Z_num[:, col].astype(np.float32)
max = np.nanmax(col_array)
min = np.nanmin(col_array)
if np.isnan(max):
max = 0.0
if np.isnan(min):
min = 0.0
num_max[col] = max
num_ranges[col] = np.absolute((1 - min / max)) if (max != 0) else 0.0
# This is to normalize the numeric values between 0 and 1.
Z_num = np.divide(Z_num ,num_max,out=np.zeros_like(Z_num), where=num_max!=0)
Z_cat = Z[:,cat_features]
if weight is None:
weight = np.ones(Z.shape[1])
#print(weight)
weight_cat=weight[cat_features]
weight_num=weight[np.logical_not(cat_features)]
out = np.zeros((x_n_rows, y_n_rows), dtype=np.float32)
weight_sum = weight.sum()
X_cat = Z_cat[x_index,]
X_num = Z_num[x_index,]
Y_cat = Z_cat[y_index,]
Y_num = Z_num[y_index,]
# print(X_cat,X_num,Y_cat,Y_num)
for i in range(x_n_rows):
j_start= i
if x_n_rows != y_n_rows:
j_start = 0
# call the main function
res = gower_get(X_cat[i,:],
X_num[i,:],
Y_cat[j_start:y_n_rows,:],
Y_num[j_start:y_n_rows,:],
weight_cat,
weight_num,
weight_sum,
cat_features,
num_ranges,
num_max)
#print(res)
out[i,j_start:]=res
if x_n_rows == y_n_rows: out[i:,j_start]=res
return out
def str_to_date(date_str):
return datetime.strptime(str(date_str),'%Y-%m-%d').date()
def gower_get(xi_cat,xi_num,xj_cat,xj_num,feature_weight_cat,
feature_weight_num,feature_weight_sum,categorical_features,
ranges_of_numeric,max_of_numeric ):
# categorical columns
sij_cat = np.where(xi_cat == xj_cat,np.zeros_like(xi_cat),np.ones_like(xi_cat))
sum_cat = np.multiply(feature_weight_cat,sij_cat).sum(axis=1)
# numerical columns
abs_delta=np.absolute(xi_num-xj_num)
sij_num=np.divide(abs_delta, ranges_of_numeric, out=np.zeros_like(abs_delta), where=ranges_of_numeric!=0)
sum_num = np.multiply(feature_weight_num,sij_num).sum(axis=1)
sums= np.add(sum_cat,sum_num)
sum_sij = np.divide(sums,feature_weight_sum)
return sum_sij
def smallest_indices(ary, n):
"""Returns the n largest indices from a numpy array."""
#n += 1
flat = np.nan_to_num(ary.flatten(), nan=999)
indices = np.argpartition(-flat, -n)[-n:]
indices = indices[np.argsort(flat[indices])]
#indices = np.delete(indices,0,0)
values = flat[indices]
return {'index': indices, 'values': values}
def gower_topn(data_x, data_y=None, weight=None, cat_features=None, n = 5):
if data_x.shape[0] >= 2: TypeError("Only support `data_x` of 1 row. ")
dm = gower_matrix(data_x, data_y, weight, cat_features)
return smallest_indices(np.nan_to_num(dm[0], nan=1),n) | /rtm_fast_unification-0.0.1-py3-none-any.whl/rtm_fast_unification/DSCode/library/ds_common_functions.py | 0.41182 | 0.473049 | ds_common_functions.py | pypi |
from datetime import datetime, timedelta
from typing import Tuple
import pandas as pd
from DSCode.library.ds_common_functions import str_to_date
from .utility.sql_utility import SqlUtility
class Sales (SqlUtility):
"""
A class to represent features of Sales.
...
Attributes
----------
Methods
-------
"""
def __init__(self, config, test_id=None):
"""
Constructs all the necessary attributes for the rsv estimate object.
Parameters
----------
config : configuration present in config_data either for a region or overall
test_id: the id given to current test
"""
super().__init__(config)
self._test_id = test_id
self._tarvarmapping = self._config["weekly_target_variable"]
self._metadata = self._config["metadata"]["test_configuration"]
def get_sales_weeks(self, applicability_criteria) -> int:
"""
About function
--------------
This function returns the sales weeks set from config of the region
This checks applicability criteria first if there is "sales_weeks" key in it
Parameters
----------
applicability_criteria: key-value pairs of the filters (product and store)
selection made in the tool
example
-------
if want to calculate yearly lift then sales_week = 104 (52*2 number of weeks);
sales_lifts_sales_weeks = 52 (number of weeks in a year)
Return values
-------
integer value of weeks to be considered in sales calculation
"""
if 'sales_weeks' in applicability_criteria:
return applicability_criteria['sales_weeks']
return self._metadata["sales_weeks"]
def get_lift_sales_weeks(self, applicability_criteria) -> int:
"""
About function
--------------
This function returns the number of weeks to be considered for lift calculation
This checks applicability criteria first if there is "sales_lifts_sales_weeks" key in it
Parameters
----------
applicability_criteria: key-value pairs of the filters (product and store)
selection made in the tool
example
-------
if want to calculate yearly lift then sales_week = 104 (52*2 number of weeks);
sales_lifts_sales_weeks = 52 (number of weeks in a year)
Return values
-------
integer value of weeks to be considered for lift calculation
"""
if 'sales_lifts_sales_weeks' in applicability_criteria:
return applicability_criteria['sales_lifts_sales_weeks']
return self._metadata['sales_lifts_sales_weeks']
def get_summary_sales_weeks(self, applicability_criteria):
"""
About function
--------------
It returns the number of weeks for which sales need to be calculated
to calculate the summary.
It checks both applicability criteria and config.
First priority is given to applicability criteria
Parameters
----------
applicability_criteria: key-value pairs of the filters (product and store)
selection made in the tool
Return values
-------
integer value of weeks to be considered for summary calculations
"""
if 'summary_sales_weeks' in applicability_criteria:
return applicability_criteria['summary_sales_weeks']
return self._config['metadata']['test_planning']\
['summary_sales_weeks']
def get_cbu_sales(self, stores, applicability_criteria, weeks) -> pd.DataFrame:
"""
About function
--------------
This function interacts with weekly sales table and calculates the
sales and volume of selected products(total sales of products)
at store in the given weeks
Parameters
----------
stores: list of store identifier values for which sales need to be calculated
applicability_criteria: key-value pairs of the filters (product and store)
selection made in the tool
week: list of week values in which sales needs to be calculated
Return values
-------
dataframe with weekly sales of the stores
"""
print("{} {} {} {}".format(self._test_id,stores, applicability_criteria, weeks))
return pd.DataFrame()
def get_overall_sales(self, stores, weeks, applicability_criteria=None) -> pd.DataFrame:
"""
About function
--------------
This function interacts with weekly sales table and calculates the overall
sales and volume (doesnt consider product attributes) at store in the given weeks
Parameters
----------
stores: list of store identifier values for which sales need to be calculated
applicability_criteria: key-value pairs of the filters (product and store)
selection made in the tool
week: list of week values in which sales needs to be calculated
Return values
-------
dataframe with weekly sales of the stores
"""
print("{} {} {} {} ".format(self._test_id,stores, applicability_criteria, weeks))
return pd.DataFrame()
def get_max_week_config_master(self, applicability_criteria=None) -> str:
"""
About function
--------------
This function interacts with config master table in the database and returns
max date maintained in the config table
Parameters
----------
stores: list of store identifier values for which sales need to be calculated
applicability_criteria: key-value pairs of the filters (product and store)
selection made in the tool
Return values
-------
max date maintained in the table
"""
print(len(applicability_criteria.keys()))
config_master = self.execute_sql_query(query="SELECT * FROM {table_name}",
data={
"table_name": self._config['tables']['config_mstr']
})
return config_master[config_master['key_name'] == 'max_date']['week'].values[0]
def get_valid_weekly_target_data(self, stores, applicability_criteria,\
target_variable, test_master_df,test_type,\
sales_week, consideryearweeks=None) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, list, str, bool]:
"""
About function
--------------
get valid weekly target data calculates the selected product sales and overall sales;
merge them and validate for continuity check.
Set 'is_product_present' in config to 0 in case region doesnt have product attributes.
And set 'data_continuity_check' in config to 0 in case dont want to have
sales continuity checks
Parameters
----------
stores: list of store identifier values for which sales need to be calculated
applicability_criteria: key-value pairs of the filters (product and store)
selection made in the tool
target_variable: Sales column name or volume column name
prewindow_start: starting date of the prewindow
prewindow_end: end date of the prewindow
postwindow_start: starting date of the postwindow
postwindow_end: end date of the postwindow
test_type: to handle any test_type conditions
business_categories: optional parameter
consideryearweeks: optional parameter to skip the week calculation done in the function
Return values
-------
product and overall sales value merged at store and week level, message and success flag
"""
if target_variable is not None:
#Following condition is there in case there are regions that wants validate sales on
# different time period
if consideryearweeks is None:
consideryearweeks = []
if not consideryearweeks:
yearweeks = self.find_last104_weeks_from_baseline_end(
str_to_date(test_master_df['pre_end'].values[0])
)
yearweeks.sort(reverse=True)
consideryearweeks = yearweeks[:sales_week]
consideryearweeks.sort(reverse=False)
stores.append(-1)
print(test_type)
#"""Execute the overall sales query"""
weekly_overal_level_sales = self.get_overall_sales(
stores=stores,
applicability_criteria=applicability_criteria,
weeks=consideryearweeks)
#"""if region supports product attributes"""
if weekly_overal_level_sales.shape[0] == 0:
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), consideryearweeks, "Overall sales for stores not found", False
store_identifier = self._tarvarmapping['partner_id']
weekly_cbu_level_sales = pd.DataFrame()
if self._config["feature_parameter"]["is_product_present"] == 1:
#"""Execute the CBU sales query """
weekly_cbu_level_sales = self.get_cbu_sales(
stores=stores,
applicability_criteria=applicability_criteria,
weeks=consideryearweeks
)
if weekly_cbu_level_sales.shape[0] == 0:
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), consideryearweeks,"CBU sales for stores not found", False
rsv_lbl = self._tarvarmapping["rsv"]
vol_lbl = self._tarvarmapping['volume']
#"""Renaming the CBU query target variables to required columnes"""
weekly_cbu_level_sales.rename(columns={rsv_lbl: "CBU_Category_"+rsv_lbl,
vol_lbl: 'CBU_Category_'+vol_lbl},
inplace=True)
#"""Join the CBU query results and Overall query results"""
weekly_merged_level_sales = weekly_overal_level_sales\
.merge(weekly_cbu_level_sales,
on=[store_identifier,
self._tarvarmapping["banner"],
self._tarvarmapping['week']])
#"""Remove stores with 0 sales value"""
if target_variable == rsv_lbl:
eliminatestores1 = weekly_merged_level_sales[weekly_merged_level_sales[
"CBU_Category_"+target_variable] == 0][store_identifier].unique()
else:
eliminatestores1 = weekly_merged_level_sales[weekly_merged_level_sales[
"CBU_Category_"+target_variable] == 0][store_identifier].unique()
weekly_merged_level_sales = weekly_merged_level_sales[~(
weekly_merged_level_sales[store_identifier]\
.isin(eliminatestores1))]
else:
weekly_merged_level_sales = weekly_overal_level_sales
#"""Data Continuity check"""
if self._config['feature_parameter']['data_continuity_check'] == 1:
weekcountsdf = weekly_merged_level_sales\
.groupby(store_identifier)[self._tarvarmapping["week"]]\
.nunique()\
.reset_index()\
.rename(columns={self._tarvarmapping["week"]: "Week_Count"})
eliminatestores2 = weekcountsdf[weekcountsdf["Week_Count"]
< sales_week][store_identifier].unique()
if len(eliminatestores2) > 0:
#"""Eliminate stores that may not have continuous data"""
weekly_merged_level_sales = weekly_merged_level_sales[~(
weekly_merged_level_sales[store_identifier].isin(eliminatestores2))]
if weekly_merged_level_sales.shape[0] == 0:
return weekly_merged_level_sales, weekly_overal_level_sales,\
weekly_cbu_level_sales, consideryearweeks,\
"No store match with continuity criteria! Modify parameter selected", \
False
if weekly_merged_level_sales.shape[0] == 0:
return weekly_merged_level_sales, weekly_overal_level_sales,\
weekly_cbu_level_sales, consideryearweeks,\
"No common week-store pair found in overall and cbu sales",\
False
print("Unique weeks",
weekly_merged_level_sales[self._tarvarmapping['week']].nunique())
return weekly_merged_level_sales, weekly_overal_level_sales, weekly_cbu_level_sales,consideryearweeks, \
"Valid Sales calculated Successfully!!", True
def get_sales_calculate_rsv(self, stores, target_variable, \
applicability_criteria, consideryearweeks) -> Tuple[pd.DataFrame, list]:
'''
get sales calculate rsv calls the cbu sales/overall sales and
calculate the total sales in the selected time period and population stores.
Set 'is_product_present' in config to 0 in case region doesnt have product attributes.
Parameters:
-----------
stores: list of store identifier values for which sales need to be calculated
applicability_criteria: key-value pairs of the filters (product and store)
selection made in the tool
consideryearweeks: to calculate sales in that period
Returns:
--------
product and overall sales value merged at store
week level,
message,
success flag
'''
if target_variable is not None:
applicability_criteria['week_value'] = tuple(consideryearweeks)
stores.append(-1)
start_time = datetime.now()
if self._config["feature_parameter"]["is_product_present"] == 1:
weekly_overal_level_sales = self.get_cbu_sales(
stores=stores,
applicability_criteria=applicability_criteria,
weeks=consideryearweeks)
else:
weekly_overal_level_sales = self.get_overall_sales(
stores=stores,
applicability_criteria=applicability_criteria,
weeks=consideryearweeks)
print("Time taken (get_sales_calculate_rsv) sales: {} seconds".format(
(datetime.now()-start_time).total_seconds()))
return weekly_overal_level_sales, consideryearweeks
return pd.DataFrame(), []
def _lift_calculation_util(self, weekly_sales, first_half_weeks,
second_half_weeks, target_variable) -> Tuple[pd.DataFrame, str, bool]:
"""
About function
--------------
utility function that calculates the lift(growth in cbu and overall sales) for stores
Parameters
----------
weekly_sales: dataframe that has weekly store sales
first_half_weeks: list of week values present in first half of time frame
second_half_weeks: list of week values present in second half of time frame
target_variable: key-value pairs of the filters (product and store)
selection made in the tool
Return values
-------
dataframe with lift CBU and overall lift values, message and success flag
"""
weekly_rsv_year1 = weekly_sales[weekly_sales[self._tarvarmapping["week"]]\
.isin(first_half_weeks)]
weekly_rsv_year2 = weekly_sales[weekly_sales[self._tarvarmapping["week"]]\
.isin(second_half_weeks)]
weekly_rsv_year1[self._tarvarmapping["year"]] = "Year1"
weekly_rsv_year2[self._tarvarmapping["year"]] = "Year2"
aggdict = {k: sum for k in [self._tarvarmapping["rsv"],
self._tarvarmapping["volume"]]}
groupbycolumns = [self._tarvarmapping["partner_id"],
self._tarvarmapping["banner"],
self._tarvarmapping["year"]]
rsv_lbl =self._tarvarmapping["rsv"]
vol_lbl = self._tarvarmapping["volume"]
if self._config['feature_parameter']["is_product_present"] == 1:
aggdict.update({k: sum for k in [
"CBU_Category_"+rsv_lbl,
"CBU_Category_"+vol_lbl]})
annualrsvdatayear1 = weekly_rsv_year1\
.groupby(groupbycolumns)\
.agg(aggdict)\
.reset_index()
annualrsvdatayear2 = weekly_rsv_year2\
.groupby(groupbycolumns)\
.agg(aggdict)\
.reset_index()
annualrsvdatayear1[rsv_lbl] = annualrsvdatayear1[rsv_lbl]\
.round(2)
annualrsvdatayear2[rsv_lbl] = annualrsvdatayear2[rsv_lbl]\
.round(2)
annualrsvdatayear1[vol_lbl] = annualrsvdatayear1[vol_lbl]\
.round(2)
annualrsvdatayear2[vol_lbl] = annualrsvdatayear2[vol_lbl]\
.round(2)
annualrsvdatayear1colsdict = {rsv_lbl: rsv_lbl +' Year 1',
vol_lbl: vol_lbl + ' Year 1'}
annualrsvdatayear2colsdict = {rsv_lbl: rsv_lbl +' Year 2',
vol_lbl: vol_lbl+' Year 2'}
if self._config['feature_parameter']["is_product_present"] == 1:
cbu_rsv_lbl = "CBU_Category_"+self._tarvarmapping["rsv"]
cbu_vol_lbl = "CBU_Category_"+self._tarvarmapping["volume"]
annualrsvdatayear1[cbu_rsv_lbl] = annualrsvdatayear1[cbu_rsv_lbl].round(2)
annualrsvdatayear2[cbu_rsv_lbl] = annualrsvdatayear2[cbu_rsv_lbl].round(2)
annualrsvdatayear1[cbu_vol_lbl] = annualrsvdatayear1[cbu_vol_lbl].round(2)
annualrsvdatayear2[cbu_vol_lbl] = annualrsvdatayear2[cbu_vol_lbl].round(2)
annualrsvdatayear1colsdict.update({cbu_rsv_lbl: cbu_rsv_lbl + ' Year 1',
cbu_vol_lbl: cbu_vol_lbl + " Year 1"})
annualrsvdatayear2colsdict.update({cbu_rsv_lbl: cbu_rsv_lbl+' Year 2',
cbu_vol_lbl: cbu_vol_lbl+" Year 2"})
annualrsvdatayear1.rename(
columns=annualrsvdatayear1colsdict, inplace=True)
annualrsvdatayear2.rename(
columns=annualrsvdatayear2colsdict, inplace=True)
mergecols = [self._tarvarmapping["partner_id"],
self._tarvarmapping["banner"]]
annualrsvdatamerged = annualrsvdatayear1.merge(
annualrsvdatayear2,
on=mergecols)
annualrsvdatamerged.drop(labels=[self._tarvarmapping["year"]+"_x",
self._tarvarmapping["year"]+"_y"],
axis=1,
inplace=True)
salesfilter = ((annualrsvdatamerged[target_variable+" Year 1"] > 0)
& (annualrsvdatamerged[target_variable+" Year 2"] > 0))
annualrsvdatamerged = annualrsvdatamerged[salesfilter]
trg_var_yr1_lbl =target_variable+" Year 1"
trg_var_yr2_lbl =target_variable+" Year 2"
trg_lift_lbl = target_variable+" Lift"
annualrsvdatamerged[trg_lift_lbl] = (annualrsvdatamerged[trg_var_yr2_lbl] -
annualrsvdatamerged[trg_var_yr1_lbl])\
/annualrsvdatamerged[trg_var_yr1_lbl]
annualrsvdatamerged[target_variable +
" Lift"] = annualrsvdatamerged[target_variable+" Lift"].round(2)
if self._config['feature_parameter']["is_product_present"] == 1:
cbu_year2_sales_lbl = "CBU_Category_"+target_variable+" Year 2"
cbu_year1_sales_lbl = "CBU_Category_"+target_variable+" Year 1"
cbu_lift_lbl = "CBU_Category_"+target_variable+" Lift"
annualrsvdatamerged[cbu_lift_lbl] = (annualrsvdatamerged[cbu_year2_sales_lbl] -
annualrsvdatamerged[cbu_year1_sales_lbl])\
/annualrsvdatamerged[cbu_year1_sales_lbl]
annualrsvdatamerged[cbu_lift_lbl] = \
annualrsvdatamerged[cbu_lift_lbl].round(2)
return annualrsvdatamerged, "Successfully calculated lift values", True
def get_annual_rsv_lifts(self, target_variable, test_master_df, stores, \
applicability_criteria, test_type)\
-> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, list, str, bool]:
"""
About function
--------------
get annual rsv lifts calculate the lift of the stores passed
calls the get_valid_weekly_target_data and divides the sales of stores into two
time periods (division of weeks done based on the value of 'sales_lifts_sales_weeks')
Parameters
----------
target_variable: weekly sales column which is needs to be estimates (sales or volume),
prewindow_start: date from which preperiod starts,
prewindow_end: date on which preperiod ends,
postwindow_start:date from which postperiod starts,
postwindow_end: date on which postperiod ends,
stores: list of stores for which lift is calculated,
applicability_criteria: the product and stores attributes selected at tool in
dictionary format,
test_type: type of test from the tool selection (Activity, RTM impact, others...),
example
-------
if want to calculate yearly lift then sales_week = 104 (52*2 number of weeks);
sales_lifts_sales_weeks = 52 (number of weeks in a year)
Return values
-------
dataframe with lift CBU and overall lift values, list of weeks on which
sales is calculated,message and success flag
"""
# Getting the the target varaibles file
sales_week = self.get_sales_weeks(applicability_criteria)
sales_lifts_sales_weeks = self.get_lift_sales_weeks(applicability_criteria)
weekly_ovrl_cbu_sales,weekly_overall_sales, weekly_cbu_sales, consideryearweeks,\
message, success_flag = self.get_valid_weekly_target_data(
stores=stores,
applicability_criteria=applicability_criteria,
target_variable=target_variable,
test_master_df = test_master_df,
test_type=test_type,
sales_week=sales_week)
weeks1 = consideryearweeks[:sales_lifts_sales_weeks]
weeks2 = consideryearweeks[sales_lifts_sales_weeks:]
if success_flag is False:
return pd.DataFrame(), weekly_ovrl_cbu_sales, consideryearweeks, message, success_flag
annualrsvdatamerged, _, success_flag = self\
._lift_calculation_util(weekly_sales=weekly_ovrl_cbu_sales,
first_half_weeks=weeks1,
second_half_weeks=weeks2,
target_variable=target_variable)
return annualrsvdatamerged, weekly_ovrl_cbu_sales, weekly_overall_sales, weekly_cbu_sales, consideryearweeks,\
"Annual Lift calculated Successfully!", True
def get_total_weekly_target_data(self, test_master_df, stores_list,sales_week, target_variable,
applicability_criteria,test_type,
consideryearweeks = None)\
-> Tuple[pd.DataFrame, list, str, bool]:
"""
About function
--------------
This function gets the overall sales in the "sales week" time
period or weeks to be considered
Parameters
----------
prewindow_end: date on which preperiod ends,
stores_list: list of stores for which lift is calculated,
applicability_criteria: the product and stores attributes
selected at tool in dictionary format,
test_type: type of test from the tool selection
(Activity, RTM impact, others...),
sales_week: optional parameter is the number of weeks for which the sales
to be calculated and validated,
consideryearweeks: optional parameter a list of weeks, if want to skip the
calculation of weeks
example
-------
if want to calculate yearly lift then sales_week = 104 (52*2 number of weeks);
sales_lifts_sales_weeks = 52 (number of weeks in a year)
Return values
-------
overall sales values dataframe,
list of weeks on which sales is calculated,
message
success flag
"""
print(test_type, target_variable)
if consideryearweeks is None:
consideryearweeks = []
if not consideryearweeks:
yearweeks = self.find_last104_weeks_from_baseline_end(
datetime.strptime(test_master_df['pre_end'].values[0], '%Y-%m-%d').date())
yearweeks.sort(reverse=True)
consideryearweeks = yearweeks[:sales_week]
consideryearweeks.sort(reverse=False)
stores_list.append(-1)
weekly_overal_level_sales = self.get_overall_sales(
stores=stores_list,
applicability_criteria=applicability_criteria,
weeks=consideryearweeks)
if weekly_overal_level_sales.shape[0] == 0:
return weekly_overal_level_sales, consideryearweeks, "No Sales found", False
return weekly_overal_level_sales, consideryearweeks, "Sales calculated successfully!", True
def get_weekly_targetvariables_data(self, target_variable, test_master_df,
stores, applicability_criteria) \
-> Tuple[pd.DataFrame, pd.DataFrame, list, list, str, bool]:
"""
About function
--------------
This function fetches the sales in prewindow and postwindow selected
and returns respective sales
Parameters
----------
stores: list of stores for which lift is calculated,
applicability_criteria: the product and stores attributes
selected at tool in dictionary format,
test_master_df: is a dataframe that have records of the current
test from test measurement table
Return values
-------
overall sales values dataframe,
list of weeks on which sales is calculated,
message
success flag
"""
pre_window_weeknumbers = self.find_weeks(
str_to_date(test_master_df["pre_start"].values[0]),
str_to_date(test_master_df["pre_end"].values[0])
)
pre_window_weeknumbers = list(map(int, pre_window_weeknumbers))
post_window_weeknumbers = self.find_weeks(
str_to_date(test_master_df["testwin_start"].values[0]),
str_to_date(test_master_df["testwin_end"].values[0])
)
post_window_weeknumbers = list(map(int, post_window_weeknumbers))
weeks_req = post_window_weeknumbers[:]
weeks_req.extend(pre_window_weeknumbers)
if self._config['feature_parameter']["is_product_present"] == 1:
weekly_target_data = self.get_cbu_sales(
stores=stores[:],
applicability_criteria=applicability_criteria,
weeks=weeks_req)
else:
weekly_target_data = self.get_overall_sales(
stores=stores[:],
applicability_criteria=applicability_criteria,
weeks=weeks_req)
if target_variable not in weekly_target_data.columns.tolist():
return pd.DataFrame(), pd.DataFrame(), [], []
# Select for relevant CBU and weeks
prewindow_filter = (
(weekly_target_data[self._tarvarmapping["week"]].isin(pre_window_weeknumbers)))
postwindow_filter = (
(weekly_target_data[self._tarvarmapping["week"]].isin(post_window_weeknumbers)))
prewindow_target_data = weekly_target_data[prewindow_filter][[
self._tarvarmapping["partner_id"], self._tarvarmapping["banner"],
self._tarvarmapping["week"], self._tarvarmapping['rsv'],
self._tarvarmapping['volume']]]
postwindow_target_data = weekly_target_data[postwindow_filter][[
self._tarvarmapping["partner_id"], self._tarvarmapping["banner"],
self._tarvarmapping["week"], self._tarvarmapping['rsv'],
self._tarvarmapping['volume']]]
return prewindow_target_data, postwindow_target_data, pre_window_weeknumbers, \
post_window_weeknumbers, "Sales Calculated successfully!!", True
def get_pre_post_sales_test_measurement(self, target_variable, test_control_stores_with_time_period,
applicability_criteria, stores_list, weeks_req=None, weeks_before=None, weeks_after = None)->Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, list, str, bool]:
if weeks_req is None:
weeks_req = []
if weeks_before is None:
weeks_before = 0
if weeks_after is None:
weeks_after = 0
columns_req = ['pre_start', 'pre_end', 'testwin_start','testwin_end','Test_store_'+self._tarvarmapping['partner_id'], 'Test_store_'+self._tarvarmapping['banner'],
self._tarvarmapping['partner_id'], self._tarvarmapping['banner']]
if len(set(columns_req).intersection(set(test_control_stores_with_time_period.columns)))<len(columns_req):
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), list(), "Either of these columns not passed {}".format(columns_req), False
stores_date_info = test_control_stores_with_time_period.to_dict(orient='records')
for record in stores_date_info:
pre_start = datetime.strptime(record['pre_start'], '%Y-%m-%d').date() \
-timedelta(weeks=weeks_before)
pre_end = datetime.strptime(record['pre_end'], '%Y-%m-%d').date() \
-timedelta(weeks=weeks_before)
testwin_start = datetime.strptime(record['testwin_start'], '%Y-%m-%d').date() \
+timedelta(weeks=weeks_after)
testwin_end = datetime.strptime(record['testwin_end'], '%Y-%m-%d').date() \
+timedelta(weeks=weeks_after)
pre_window_weeknumbers = self.find_weeks(pre_start,
pre_end)
pre_window_weeknumbers = list(map(int, pre_window_weeknumbers))
post_window_weeknumbers = self.find_weeks(testwin_start,
testwin_end)
post_window_weeknumbers = list(map(int, post_window_weeknumbers))
weeks_req.extend(pre_window_weeknumbers)
weeks_req.extend(post_window_weeknumbers)
record['pre_period_weeks_required'] = pre_window_weeknumbers
record['post_period_weeks_required'] = post_window_weeknumbers
weeks_req = list(set(weeks_req))
stores_list = list(set(stores_list))
if self._config["feature_parameter"]["is_product_present"] is 1:
weekly_sales = self.get_cbu_sales(
stores=stores_list[:],
applicability_criteria=applicability_criteria,
weeks=weeks_req[:])
else:
weekly_sales = self.get_overall_sales(
stores=stores_list[:],
applicability_criteria=applicability_criteria,
weeks=weeks_req[:])
if weekly_sales.shape[0] == 0:
return pd.DataFrame(), pd.DataFrame(), list()
req_pre_sales, req_post_sales = self._convert_sales_pre_post(weekly_sales = weekly_sales.drop(self._tarvarmapping['banner'], axis=1),
stores_date_info_dict_list = stores_date_info,
target_variable = target_variable,
test_control_map_table = test_control_stores_with_time_period)
req_post_sales = req_post_sales.merge(test_control_stores_with_time_period[['Test_store_'+self._tarvarmapping['partner_id'], 'Test_store_'+self._tarvarmapping['banner'],
self._tarvarmapping['partner_id'], self._tarvarmapping['banner']]],
on=['Test_store_'+self._tarvarmapping['partner_id'],self._tarvarmapping['partner_id']])
req_pre_sales = req_pre_sales.merge(test_control_stores_with_time_period[['Test_store_'+self._tarvarmapping['partner_id'], 'Test_store_'+self._tarvarmapping['banner'],
self._tarvarmapping['partner_id'], self._tarvarmapping['banner']]],
on=['Test_store_'+self._tarvarmapping['partner_id'],self._tarvarmapping['partner_id']])
return req_pre_sales, req_post_sales, weekly_sales, stores_date_info, "sales computed successfully!!", True | /rtm_fast_unification-0.0.1-py3-none-any.whl/rtm_fast_unification/DSCode/library/sql/sales_master.py | 0.838597 | 0.466663 | sales_master.py | pypi |
from datetime import datetime
from typing import Tuple, final
import pandas as pd
from .utility.sql_utility import SqlUtility
class Stores(SqlUtility):
def __init__(self, config, test_id):
super().__init__(config)
self._test_id = test_id
self._metadata = self._config["metadata"]
self._storemstrmapping = self._config["store_mstr_columns"]
def set_test_id(self, test_id):
"""Sets the current test_id"""
self._test_id = test_id
def get_filtered_stores(self, applicability_criteria)->pd.DataFrame:
"""
About function
--------------
This function needs to be overriden, developer needs to write the query to get store information from the storemaster table
based on the filter selected in the applicability criteria
Parameters
----------
applicability_criteria: key-value pairs of the filters (product and store) selection made in the tool
Return values
-------
store attributes dataframe
"""
pass
def get_uploaded_stores_info(self, stores_list, applicability_criteria)->pd.DataFrame:
"""
About function
--------------
This function needs to be overriden, developer needs to write the query to get store information from the storemaster table
based on the list of the store identifier (config[store_mstr_columns][partner_id]) value present in stores_list
Parameters
----------
applicability_criteria: key-value pairs of the filters (product and store) selection made in the tool
Return values
-------
store attributes dataframe
"""
pass
def filter_population_uploaded_stores(self, uploaded_file_df=None) -> list:
"""
About function
--------------
Once the uploaded population stores are stored in the database table 'config['tables']['upload_stores']' (storing of uploaded stores will be done by UI team)
now these stores will be considered as the population stores everytime (store filters in the applicability criteria will be considered)
Note: config['tables']['upload_stores'] this table will be maintaining store identifier refer config[store_mstr_columns][partner_id]
Parameters
----------
applicability_criteria: key-value pairs of the filters (product and store) selection made in the tool
uploaded_file_df: optional dataframe for DS person to work on upload functionality; it will have same attributes like config['tables']['upload_stores']
Return values
-------
list of store identifiers
"""
if (self._test_id is None) or ("upload_stores" not in self._config['tables']):
storelist = []
else:
if uploaded_file_df is None:
storelistquery = """SELECT store_id as {column_name} FROM {db}
WHERE test_id_id = {testid}"""
storelistqueryDf = self.execute_sql_query(query=storelistquery, data={"column_name": self._config["store_mstr_columns"]['partner_id'],
"db": self._config['tables']['upload_stores'],
"testid": self._test_id})
else:
storelistqueryDf = uploaded_file_df
storelistqueryDf = storelistqueryDf[storelistqueryDf['test_id_id'] == self._test_id]
storelist = [] if storelistqueryDf.shape[0] == 0 else list(storelistqueryDf[self._config["store_mstr_columns"]['partner_id']].unique())
return storelist
def filter_population(self, applicability_criteria, storelist=[], uploaded_file_df = None)->pd.DataFrame:
"""
About function
--------------
All the tool features depends on the population of stores, this function fetches the store records that meet either of the following:
1) Store that were uploaded by user
2) if stores were not uploaded then this function will use the store filter selected in applicability criteria to get stores
Parameters
----------
applicability_criteria: key-value pairs of the filters (product and store) selection made in the tool
storelist: optional parameter in case the developer needs to fetch the attributes of known store identifier
uploaded_file_df: optional dataframe for DS person to work on upload functionality; it will have same attributes like config['tables']['upload_stores']
Return values
-------
dataframe of store attributes
"""
if not storelist:
storelist = self.filter_population_uploaded_stores(uploaded_file_df=uploaded_file_df)
if not storelist:
stores_master_df = self.get_filtered_stores( applicability_criteria=applicability_criteria)
else:
storelist.append(-1)
stores_master_df = self.get_uploaded_stores_info(
stores_list = storelist,
applicability_criteria=applicability_criteria)
return stores_master_df
def read_test_master_table_by_test_ids(self, test_id) -> pd.DataFrame:
"""
About function
--------------
This function reads all the details regarding the test from test master table refer config[tables][test_mstr]
Parameters
----------
test_id: could be a list of test ids or a single test id
-------
dataframe of test master filtered for specified test_id/s
"""
if isinstance(test_id, list) ==False:
test_id = [test_id]
test_id.append(-1)
return self.execute_sql_query(query="SELECT * FROM {test_master_table} WHERE is_active=1 and is_deleted = 0 AND test_id IN {test_ids}", data={'test_master_table': self._config['tables']['test_mstr'], "test_ids":tuple(test_id)})
def read_test_measurement_table_by_test_ids(self, test_id):
"""
About function
--------------
This function reads all the details from the test measturement table refer config[tables][test_mstr]
Parameters
----------
test_id: could be a list of test ids or a single test id
-------
dataframe of test measurement filtered for specified test_id/s
"""
if isinstance(test_id, list) ==False:
test_id = [test_id]
test_id.append(-1)
temp = self.execute_sql_query(query="SELECT * FROM {test_measurement_table} WHERE is_active=1 and is_deleted = 0 AND test_id IN {test_ids}", data={'test_measurement_table': self._config['tables']['measurement'], "test_ids":tuple(test_id)})
if temp.shape[0] == 0:
temp = pd.DataFrame(columns=['test_id_id'])
return temp
def read_test_master_table_active_tests(self, date):
"""
About function
--------------
This function returns all tests records that have postwindow end date greater than input date. Table refer config[tables][test_mstr]
Parameters
----------
date: string value of format 'yyyy-mm-dd'
-------
dataframe of test measurement
"""
temp = self.execute_sql_query(query="SELECT * FROM {test_master_table} WHERE is_active=1 and is_deleted = 0 AND testwin_end> '{date}'", data={'test_master_table': self._config['tables']['test_mstr'], "date":date})
if temp.shape[0] == 0:
temp = pd.DataFrame(columns=['test_id'])
return temp
def read_test_map_table_by_test_ids(self, test_id)->pd.DataFrame:
"""
About function
--------------
This function reads all the details from the test store map table refer config[tables][test_store_map];
This table stores the list of stores uploaded for the test
Parameters
----------
test_id: could be a list of test ids or a single test id
-------
dataframe of test store map filtered for specified test_id/s
"""
if isinstance(test_id, list) ==False:
test_id = [test_id]
test_id.append(-1)
temp = self.execute_sql_query(query="SELECT * FROM {test_map_table} WHERE is_active=1 and is_deleted = 0 AND test_id_id IN {test_id_ids}", data={"test_map_table": self._config['tables']['test_store_map'], "test_id_ids":tuple(test_id)})
if temp.shape[0] == 0:
temp = pd.DataFrame(columns=['created_on','modified_on','is_active','deleted_at','is_deleted','storemap_id','teststore_id','created_by_id','test_id_id','updated_by_id'])
return temp
def read_test_map_table_active_test_variable_dates(self, date) -> pd.DataFrame:
"""
About function
--------------
This function reads all the details from the test store map table where postwindow end is greater than the date;
Table name refer config[tables][test_store_map];
This table stores the list of stores uploaded for the test
Parameters
----------
test_id: could be a list of test ids or a single test id
-------
dataframe of test store map filtered for specified test_id/s
"""
temp = self.execute_sql_query(query="SELECT * FROM {test_map_table} WHERE is_active=1 and is_deleted = 0 AND testwin_end > '{date}'", data={"test_map_table": self._config['tables']['test_store_map'], "date":date})
if temp.shape[0] == 0:
temp = pd.DataFrame(columns=['created_on','modified_on','is_active','deleted_at','is_deleted','storemap_id','teststore_id','created_by_id','test_id_id','updated_by_id'])
return temp
def read_control_store_by_test_ids(self, test_id)->Tuple[str, bool]:
"""
About function
--------------
This function reads all the details from the control store master table where test id is in the list/value passed;
Table name refer config[tables][control_store_mstr];
This table stores the list of stores uploaded for the test
Parameters
----------
test_id: could be a list of test ids or a single test id
-------
dataframe of test store map filtered for specified test_id/s
"""
if isinstance(test_id, list) ==False:
test_id = [test_id]
test_id.append(-1)
temp = self.execute_sql_query(query="SELECT * FROM {control_store_master} WHERE is_active = 1 and is_deleted = 0 AND test_id_id IN {test_id_ids}", data={"control_store_master": self._config['tables']['control_store_mstr'], "test_id_ids":tuple(test_id)})
if temp.shape[0] == 0:
temp = pd.DataFrame(columns=['is_active','is_deleted','constore_id','created_by_id','test_id_id','updated_by_id'])
return temp
@final
def filter_active_test_control_stores(self, stores_master_df=None, remove_type=None, max_week_data_available=None):
"""
About function
--------------
This function removes the test or control stores from the stores_master_df passed
Parameters
----------
stores_master_df: population stores or dataframe of the stores
remove_type: default is value set in config; based on that it either removes the active test stores or both active test and control stores,
max_week_data_available: string 'yyyy-mm-dd' maximum date for which data is available in the database
Return value
-------
message, success_flag
"""
# Need to make Change here as per interaction with Database (starting here)
storemstrmapping = self._config["store_mstr_columns"]
if self._config["feature_parameter"]["test_variable_dates"] == 1:
test_map_df = self.read_test_map_table_active_test_variable_dates(max_week_data_available)
active_test = list(test_map_df['test_id_id'].unique())
temp_test = list(self.read_test_master_table_by_test_ids(active_test)['test_id'].unique())
active_test = set(temp_test).intersection(set(active_test))
else:
print("Fetching active test from test master table")
active_test = list(self.read_test_master_table_active_tests(max_week_data_available)['test_id'].unique())
active_test = list(set(active_test) - set([self._test_id]))
if len(active_test)>0:
test_map_df = self.read_test_map_table_by_test_ids(active_test)
else:
return stores_master_df
active_test = list(set(active_test) - set([self._test_id]))
if len(active_test) != 0:
filtered_stores_df = stores_master_df[~stores_master_df[self._storemstrmapping['partner_id']].isin(
test_map_df[test_map_df['test_id_id'].isin(active_test)]['teststore_id'].unique())]
test_control_pair_df = self.read_control_store_by_test_ids(active_test)
if test_control_pair_df.shape[0] > 0 and remove_type == 'both':
"""get the control stores info from the active tests details"""
active_control_stores = test_control_pair_df[test_control_pair_df["test_id_id"].isin(
active_test)]
"""Filter the control stores"""
if active_control_stores.shape[0] != 0:
filtered_stores_df = filtered_stores_df[~filtered_stores_df[storemstrmapping["partner_id"]].isin(
active_control_stores[storemstrmapping['partner_id']].values.tolist())]
stores_master_df = filtered_stores_df
return stores_master_df
@final
def validate_uploaded_stores_format(self, reference_file, uploaded_file, columns) -> Tuple[str, bool]:
"""
About function
--------------
This function validates the uploaded store file with the columns passed
Parameters
----------
uploaded_file: the actual file user has uploaded
uploaded_file: template file
columns: by default it is extracted from the config file
-------
message, success_flag
"""
if reference_file.shape[1] == 0:
return "Reference file doesnt have any columns!!", False
if uploaded_file.shape[1] == 0:
return "Uploaded file doesnt have any columns!!", False
if uploaded_file.shape[0] == 0:
return "Uploaded file is empty!!", False
check_number_columns = uploaded_file.shape[1] == reference_file.shape[1]
if check_number_columns is False:
return "Please refer template. Number of columns in uploaded file does not match with template", False
check_column_names = sorted(
uploaded_file.columns) == sorted(reference_file.columns)
if check_column_names is False:
return "Please refer template. Name of columns in uploaded file does not match with template", False
actual_file_column_format = dict(
uploaded_file.loc[:, sorted(uploaded_file.columns)].dtypes)
reference_file_column_format = dict(sorted(columns))
if actual_file_column_format != reference_file_column_format:
return "Please ensure that uploaded matches the following datatypes: {}".format(reference_file_column_format), False
check_format = actual_file_column_format == reference_file_column_format
if check_number_columns & check_column_names & check_format:
return "Uploaded file follows the template!!", True
return "Uploaded file doesn't follow the template!!", False
def validate_uploaded_presence_store_master(self, uploaded_stores, store_identifier, applicability_criteria)->Tuple[pd.DataFrame, str, bool]:
"""
About function
--------------
This function validates the uploaded store from the store master table
Parameters
----------
uploaded_stores: the actual file user has uploaded
store_identifier:
applicability_criteria:
-------
dataframe of valid stores, message, success_flag
"""
uploaded_stores_list = list(uploaded_stores[store_identifier].unique())
total_stores = len(uploaded_stores_list)
temp_uploaded_stores_list = uploaded_stores_list[:]
temp_uploaded_stores_list.append(-1)
uploaded_stores_mapped = self.get_uploaded_stores_info(stores_list = temp_uploaded_stores_list, applicability_criteria=applicability_criteria)
if uploaded_stores_mapped.shape[0] == 0:
return uploaded_stores_mapped, "All uploaded stores are not present in Store Master!!", False
stores_not_mapped = set(uploaded_stores_list) - set(uploaded_stores_mapped[self._config["store_mstr_columns"]["partner_id"]].unique())
stores_mapped = uploaded_stores_mapped.shape[0]
percentage_mapped = round((uploaded_stores_mapped.shape[0])*100/total_stores, 2)
message = "Out of {total_stores}, {store_mapped} got mapped which is around {percentage_mapped}".format(total_stores=total_stores, store_mapped=stores_mapped, percentage_mapped=percentage_mapped)
return uploaded_stores_mapped, message, True
@final
def validate_uploaded_stores_active_stores(self, stores_df, max_date_data_available, active_stores_filter_type="both") -> Tuple[pd.DataFrame, pd.DataFrame, str, bool]:
"""
About function
--------------
This function validates the uploaded store information and calculate the number of stores that are active in other test
Parameters
----------
stores_df: dataframe of the stores information that have been uploaded (must have config[store_mstr_columns][partner_id])
max_date_data_available: string value of the maximum date data maintained in the sales table. Format 'yyyy-mm-dd'
active_stores_filter_type: variable to keep a check on if we want to remove both test and control stores or test store only
default value is 'both';
-------
dataframe of filtered stores, message, success_flag
"""
filtered_stores = self.filter_active_test_control_stores(
stores_master_df=stores_df,
remove_type=active_stores_filter_type,
max_week_data_available=max_date_data_available)
if filtered_stores.shape[0] == 0:
return filtered_stores, "All uploaded stores are participating in other tests", False
total_stores = stores_df[self._config["store_mstr_columns"]["partner_id"]].nunique()
active_stores = total_stores-filtered_stores[self._config["store_mstr_columns"]["partner_id"]].nunique()
percent_active = round(active_stores*100/total_stores, 2)
message = "Out of {total_stores} valid stores, {active_stores} are active which is around {percentage} % cannot be used as test/control stores".format(total_stores=total_stores, active_stores=active_stores, percentage=percent_active)
return filtered_stores, message, True | /rtm_fast_unification-0.0.1-py3-none-any.whl/rtm_fast_unification/DSCode/library/sql/stores_master.py | 0.834171 | 0.358016 | stores_master.py | pypi |
import sys
import traceback
from DSCode.common_utilities_registry import *
def get_sales_object(config, test_id):
"""
About function
--------------
This function returns the appropriate sales object for a market
In markets config there must be a key "Constructors" and this key will have
a dictionary. In that dictionary there will be one key of "Sales".
Note: dont set value of "Sales" key as Sales master class,
developer needs to inherit Sales master class in common utility
Example
-------
US:{"Constructors": {
"Sales": "FAST_US_Sales",
"Stores": "FAST_US_Stores",
"Tool": "Fast_US_Tool"},
....
}
Parameters
----------
config: config set for the market,
test_id: test_id of the current test
Return values
-------
sales object
"""
if 'Sales' in config['Constructors']:
sales_object = getattr(sys.modules[__name__],
config["Constructors"]['Sales'])(config=config,
test_id=test_id)
return sales_object
raise Exception("An Error has occured while creating sales object: {}"\
.format(traceback.format_exc()))
def get_store_object(config, test_id):
"""
About function
--------------
This function returns the appropriate store object for a market
In markets config there must be a key "Constructors" and this key will have
a dictionary. In that dictionary there will be one key of "Store".
Note: dont set value of "Store" key as Store master class,
developer needs to inherit Store master class in common utility
Example
-------
US:{"Constructors": {
"Sales": "FAST_US_Sales",
"Stores": "FAST_US_Stores",
"Tool": "Fast_US_Tool"},
....
}
Parameters
----------
config: config set for the market,
test_id: test_id of the current test
Return values
-------
Store object
"""
if 'Stores' in config['Constructors']:
stores_object = getattr(sys.modules[__name__],
config["Constructors"]['Stores'])(config=config,
test_id=test_id)
return stores_object
raise Exception("An Error has occured while creating stores object: {}"\
.format(traceback.format_exc())) | /rtm_fast_unification-0.0.1-py3-none-any.whl/rtm_fast_unification/DSCode/library/object_creation/create_object.py | 0.501221 | 0.229524 | create_object.py | pypi |
from datetime import datetime
from typing import Tuple
import pandas as pd
class TargetEstimate:
"""
A class to represent features of TargetEstimate.
...
Attributes
----------
config : configuration present in config_data either for a region or overall
region: key present in config
sales_object : Object of sales class
store_object : Object of store class
Methods
-------
data_extract(): to calculate required sales/volume and get the store details in population
calculate_rsv(): calculate the RSV/POS/Sales/Volume value required
and estimate number of stores in population
get_breakeven_lift(): estimates the breakeven lift% value
get_cost(): estimates the cost of implementing RTM activity if breakeven lift is known
"""
def __init__(self, config, region, sales_object, store_object) -> None:
"""
Constructs all the necessary attributes for the rsv estimate object.
Parameters
----------
config : configuration present in config_data either for a region or overall
region: key present in config
sales_object : Object of sales class
store_implementation : Object of store class
"""
self._config = config[region] if region in config else config
self._sales_object = sales_object
self._store_object = store_object
self._metadata = self._config["metadata"]["test_configuration"]
self._tarvarmapping = self._config["weekly_target_variable"]
self._storemstrmapping = self._config["store_mstr_columns"]
self._rsvestimate = 0.0
self._weekly_target_data = pd.DataFrame()
self._breakevenliftpercentage = 0.0
def data_extract(self, target_variable, timeframestart, timeframeend, storelist,
applicability_criteria,
uploaded_file_df=None) -> Tuple[pd.DataFrame, list, str, bool]:
"""
About function
--------------
This function fetches the population stores details (filter selected/uploaded)
along with calculating the required sales for the stores
calls
1) filter_population
2) get_sales_calculate_rsv
Parameters
----------
target_variable: weekly sales column which is needs to be estimates (sales or volume),
timeframestart: date from which preperiod starts,
timeframeend: date on which preperiod ends,
stores: list of stores for which sales to be calculated; by default pass empty list
applicability_criteria: the product and stores attributes selected
at tool in dictionary format,
uploaded_file_df: optional parameter; its for DS people to use
uploaded store as population in case not connected to DB
Return values
-------
dataframe with CBU/overall sales,
list of weeks on which sales is calculated,
message
success flag
"""
if (timeframestart is not None) & (timeframeend is not None) \
& (target_variable is not None):
timeframeend_date = datetime.strptime(
timeframeend, '%Y-%m-%d').date()
timeframestart_date = datetime.strptime(
timeframestart, '%Y-%m-%d').date()
timeframe_weeknumbers = self._sales_object\
.find_weeks(timeframestart_date, timeframeend_date)
if len(storelist) == 0:
stores_master_df = self._store_object\
.filter_population(applicability_criteria=applicability_criteria,
storelist=storelist,
uploaded_file_df=uploaded_file_df)
if stores_master_df.shape[0] == 0:
return pd.DataFrame(), list(),\
"No stores found matching population criteria", False
storelist = list(
stores_master_df[self._storemstrmapping["partner_id"]].unique())
self._weekly_target_data = pd.DataFrame()
consideryearweeks = list()
self._weekly_target_data, consideryearweeks \
= self._sales_object.get_sales_calculate_rsv(
stores=storelist,
target_variable=target_variable,
applicability_criteria=applicability_criteria,
consideryearweeks=timeframe_weeknumbers)
if self._weekly_target_data.shape[0] == 0:
return self._weekly_target_data, consideryearweeks,\
"No sales found between the timeperiod selected!!", False
return self._weekly_target_data, consideryearweeks,\
"Successfully Calculated!!", True
return pd.DataFrame(), list(),\
"One of these parameters is None timeframestart, timeframeend, target_variable", False
def calculate_rsv(self, target_variable) -> Tuple[float, int]:
"""
About function
--------------
This function calculates sum of sales on the target variable passed
Parameters
----------
target_variable: weekly sales column which is needs to be estimates (sales or volume),
Return values
-------
total sales or volume,
number of stores in population
"""
store_count = self._weekly_target_data[self._tarvarmapping["partner_id"]].nunique(
)
self._rsvestimate = self._weekly_target_data[target_variable].sum().round(
2)
return self._rsvestimate, store_count
def get_breakeven_lift(self, rsv_estimate, cost, num_of_teststores,
applicability_criteria, uploaded_file_df=None) -> Tuple[float, str]:
"""
About function
--------------
This function estimates the break even lift
Parameters
----------
rsv_estimate: total sales/volume sold in annual RSV period;
got from calculate_rsv function
cost: esimated cost of activity on population stores,
num_of_teststores: number of stores considering in the test
applicability_criteria: the product and stores attributes selected
at tool in dictionary format,
uploaded_file_df: optional parameter; its for DS people to use
uploaded store as population in case not connected to DB
Return values
-------
floating estimated breakeven lift,
message,
booelan success flag
"""
if (rsv_estimate is not None) & (cost is not None):
stores_master_df = self._store_object.filter_population(
applicability_criteria=applicability_criteria,
uploaded_file_df=uploaded_file_df)
print("Breakeven Lift - Population size: ", stores_master_df.shape)
# Get the proportion of stores to be sampled for each banner
if ("rawconvfactors" in self._metadata) & (len(self._metadata['rawconvfactors']) > 0):
banner_label = self._tarvarmapping["banner"]
partner_label = self._tarvarmapping["partner_id"]
count_df = stores_master_df\
.groupby(banner_label)[partner_label]\
.count()\
.reset_index()\
.rename(columns={partner_label: "Count"})
count_df["prop"] = count_df["Count"]/count_df["Count"].sum()
count_df["stores_proportioned"] = count_df["prop"] * num_of_teststores
count_df["stores_proportioned"] = count_df["stores_proportioned"].round(2)
bannerwisestoresdict = dict(zip(count_df[banner_label],
count_df["stores_proportioned"]))
rawconvfactors = self._metadata["rawconvfactors"]
numerator = sum([bannerwisestoresdict[k]*v for k,
v in rawconvfactors.items() if k in bannerwisestoresdict.keys()])
denominator = sum(list(bannerwisestoresdict.values()))
conversionfactor = numerator / denominator
else:
conversionfactor = 1
cost = cost/conversionfactor
self._breakevenliftpercentage = (cost/rsv_estimate)*100
return self._breakevenliftpercentage, "Calculated breakeven lift successfully!!"
return 0, "Parameter missing! Either Cost or RSV value not passed to function"
def get_cost(self, rsv_estimate=None, breakevenliftpercentage=None) -> float:
"""
About function
--------------
This function estimates the cost of implementing the RTM activity on population stores.
This function to be used when breakevenlift is known but cost is unknow
Parameters
----------
rsv_estimate: total sales or volume in annual RSV period
breakevenliftpercentage: known break even lift
Return values
-------
floating value of cost
"""
if (rsv_estimate is not None) & (breakevenliftpercentage is not None):
self._rsvestimate = rsv_estimate
cost = (breakevenliftpercentage*rsv_estimate)/100
return round(cost, 2)
return 0 | /rtm_fast_unification-0.0.1-py3-none-any.whl/rtm_fast_unification/DSCode/library/ds/feature/target_estimation/target_estimate_master.py | 0.935693 | 0.456046 | target_estimate_master.py | pypi |
from datetime import datetime
from typing import Tuple
import pandas as pd
class RSVEstimate:
"""
A class to represent features of RSVEstimate.
...
Attributes
----------
config : configuration present in config_data either for a region or overall
region: key present in config
sales_implementation : Object of sales class
store_implementation : Object of store class
Methods
-------
data_extract(): to calculate required sales/volume and get the store details in population
calculate_rsv(): calculate the RSV value required and estimate number of stores in population
get_breakeven_lift(): estimates the breakeven lift% value
get_cost(): estimates the cost of implementing RTM activity if breakeven lift is known
"""
def __init__(self, config, region, sales_implementation, store_implemenation) -> None:
"""
Constructs all the necessary attributes for the rsv estimate object.
Parameters
----------
config : configuration present in config_data either for a region or overall
region: key present in config
sales_implementation : Object of sales class
store_implementation : Object of store class
"""
self._config = config[region] if region in config else config
self._sales_implementation = sales_implementation
self._store_implemenation = store_implemenation
self._metadata = self._config["metadata"]["test_configuration"]
self._tarvarmapping = self._config["weekly_target_variable"]
self._storemstrmapping = self._config["store_mstr_columns"]
self._rsvestimate = 0.0
self._weekly_target_data = pd.DataFrame()
self._breakevenliftpercentage = 0.0
def data_extract(self, target_variable, timeframestart, timeframeend, storelist,
applicability_criteria,
uploaded_file_df=None) -> Tuple[pd.DataFrame, list, str, bool]:
"""
About function
--------------
This function fetches the population stores details (filter selected/uploaded)
along with calculating the required sales for the stores
calls
1) filter_population
2) get_sales_calculate_rsv
Parameters
----------
target_variable: weekly sales column which is needs to be estimates (sales or volume),
timeframestart: date from which preperiod starts,
timeframeend: date on which preperiod ends,
stores: list of stores for which sales to be calculated; by default pass empty list
applicability_criteria: the product and stores attributes selected
at tool in dictionary format,
uploaded_file_df: optional parameter; its for DS people to use
uploaded store as population in case not connected to DB
Return values
-------
dataframe with CBU/overall sales,
list of weeks on which sales is calculated,
message
success flag
"""
if (timeframestart is not None) & (timeframeend is not None) \
& (target_variable is not None):
timeframeend_date = datetime.strptime(
timeframeend, '%Y-%m-%d').date()
timeframestart_date = datetime.strptime(
timeframestart, '%Y-%m-%d').date()
timeframe_weeknumbers = self._sales_implementation\
.find_weeks(timeframestart_date, timeframeend_date)
if len(storelist) == 0:
stores_master_df = self._store_implemenation\
.filter_population(applicability_criteria=applicability_criteria,
storelist=storelist,
uploaded_file_df=uploaded_file_df)
if stores_master_df.shape[0] == 0:
return pd.DataFrame(), list(),\
"No stores found matching population criteria", False
storelist = list(
stores_master_df[self._storemstrmapping["partner_id"]].unique())
self._weekly_target_data = pd.DataFrame()
consideryearweeks = list()
self._weekly_target_data, consideryearweeks \
= self._sales_implementation.get_sales_calculate_rsv(
stores=storelist,
target_variable=target_variable,
applicability_criteria=applicability_criteria,
consideryearweeks=timeframe_weeknumbers)
if self._weekly_target_data.shape[0] == 0:
return self._weekly_target_data, consideryearweeks,\
"No sales found between the timeperiod selected!!", False
return self._weekly_target_data, consideryearweeks,\
"Successfully Calculated!!", True
return pd.DataFrame(), list(),\
"One of these parameters is None timeframestart, timeframeend, target_variable", False
def calculate_rsv(self, target_variable) -> Tuple[float, int]:
"""
About function
--------------
This function calculates sum of sales on the target variable passed
Parameters
----------
target_variable: weekly sales column which is needs to be estimates (sales or volume),
Return values
-------
total sales or volume,
number of stores in population
"""
store_count = self._weekly_target_data[self._tarvarmapping["partner_id"]].nunique(
)
self._rsvestimate = self._weekly_target_data[target_variable].sum().round(
2)
return self._rsvestimate, store_count
def get_breakeven_lift(self, rsv_estimate, cost, num_of_teststores,
applicability_criteria, uploaded_file_df=None) -> Tuple[float, str]:
"""
About function
--------------
This function estimates the break even lift
Parameters
----------
rsv_estimate: total sales/volume sold in annual RSV period;
got from calculate_rsv function
cost: esimated cost of activity on population stores,
num_of_teststores: number of stores considering in the test
applicability_criteria: the product and stores attributes selected
at tool in dictionary format,
uploaded_file_df: optional parameter; its for DS people to use
uploaded store as population in case not connected to DB
Return values
-------
floating estimated breakeven lift,
message,
booelan success flag
"""
if (rsv_estimate is not None) & (cost is not None):
stores_master_df = self._store_implemenation.filter_population(
applicability_criteria=applicability_criteria,
uploaded_file_df=uploaded_file_df)
print("Breakeven Lift - Population size: ", stores_master_df.shape)
# Get the proportion of stores to be sampled for each banner
if ("rawconvfactors" in self._metadata) & (len(self._metadata['rawconvfactors']) > 0):
banner_label = self._tarvarmapping["banner"]
partner_label = self._tarvarmapping["partner_id"]
count_df = stores_master_df\
.groupby(banner_label)[partner_label]\
.count()\
.reset_index()\
.rename(columns={partner_label: "Count"})
count_df["prop"] = count_df["Count"]/count_df["Count"].sum()
count_df["stores_proportioned"] = count_df["prop"] * num_of_teststores
count_df["stores_proportioned"] = count_df["stores_proportioned"].round(2)
bannerwisestoresdict = dict(zip(count_df[banner_label],
count_df["stores_proportioned"]))
rawconvfactors = self._metadata["rawconvfactors"]
numerator = sum([bannerwisestoresdict[k]*v for k,
v in rawconvfactors.items() if k in bannerwisestoresdict.keys()])
denominator = sum(list(bannerwisestoresdict.values()))
conversionfactor = numerator / denominator
else:
conversionfactor = 1
cost = cost/conversionfactor
self._breakevenliftpercentage = (cost/rsv_estimate)*100
return self._breakevenliftpercentage, "Calculated breakeven lift successfully!!"
return 0, "Parameter missing! Either Cost or RSV value not passed to function"
def get_cost(self, rsv_estimate=None, breakevenliftpercentage=None) -> float:
"""
About function
--------------
This function estimates the cost of implementing the RTM activity on population stores.
This function to be used when breakevenlift is known but cost is unknow
Parameters
----------
rsv_estimate: total sales or volume in annual RSV period
breakevenliftpercentage: known break even lift
Return values
-------
floating value of cost
"""
if (rsv_estimate is not None) & (breakevenliftpercentage is not None):
self._rsvestimate = rsv_estimate
cost = (breakevenliftpercentage*rsv_estimate)/100
return round(cost, 2)
return 0 | /rtm_fast_unification-0.0.1-py3-none-any.whl/rtm_fast_unification/DSCode/library/ds/feature/rsv_estimation/rsv_estimate_master.py | 0.931742 | 0.459682 | rsv_estimate_master.py | pypi |
from typing import Tuple, final
import numpy as np
import pandas as pd
import statsmodels.api as sm
from DSCode.library.ds_common_functions import gower_matrix
from scipy import stats
from sklearn.preprocessing import StandardScaler
class CntrlStoreSelectionFeature:
def __init__(self, config, region,sales_object, store_object,test_id) -> None:
self._config = config[region] if region in config else config
self._sales_object = sales_object #Ceab sales object
self._store_object = store_object
self._metadata = self._config["metadata"]
self._tarvarmapping = self._config["weekly_target_variable"]
self._storemstrmapping = self._config["store_mstr_columns"]
self._test_id = test_id
self._control_pool = []
def data_extract(self, applicability_criteria, target_variable, test_type, store_list, uploaded_file_df=None)->Tuple[pd.DataFrame, pd.DataFrame,list,pd.DataFrame, pd.DataFrame,str, bool]:
test_master = self._store_object.read_test_master_table_by_test_ids(test_id=self._test_id)
test_master = test_master[test_master['test_id'] == self._test_id]
if test_master.shape[0] == 0:
return pd.DataFrame(), pd.DataFrame(),[],pd.DataFrame(),pd.DataFrame(),\
"""No records found for the current test in Test Master table!!""", False
stores_master_df = self._store_object.filter_population(applicability_criteria=applicability_criteria, storelist = store_list, uploaded_file_df = uploaded_file_df)
if stores_master_df.shape[0] == 0:
return pd.DataFrame(), pd.DataFrame(),[], pd.DataFrame(), pd.DataFrame(),"No stores found in the population", False
annualrsvlifts, valid_sales_stores, _, _, consideryearweeks,\
message, success_flag = self._sales_object.get_annual_rsv_lifts(
target_variable=target_variable,
test_master_df = test_master,
stores = list(stores_master_df[self._storemstrmapping["partner_id"]].unique()),
applicability_criteria=applicability_criteria,
test_type=test_type,
)
if success_flag is False:
return pd.DataFrame(), pd.DataFrame(),[],pd.DataFrame(), pd.DataFrame(),message, False
return annualrsvlifts, valid_sales_stores,consideryearweeks,test_master, stores_master_df, "Sales computed Successfully!", True
@final
def _get_feature_thresholds(self, teststores, controlstores, features)-> Tuple[dict]:
"""
# This function corresponds to the test vs population summary module in the tool - No uk specific code in it
# Note this function is currently not in use in the latest version of the FAST Tool
"""
threshold_dict = {}
metadata = self._config["metadata"]["test_planning"]
for feature in features:
std1 = teststores[feature].std()
std2 = controlstores[feature].std()
samples1 = teststores[feature].shape[0]
samples2 = controlstores[feature].shape[0]
numerator = np.power((std1*std1/samples1 + std2*std2/samples2), 2)
denominator = (np.power((std1*std1/samples1), 2)/(samples1-1) +
np.power((std2*std2/samples2), 2)/(samples2-1))
degfreedom = numerator/denominator
pval = metadata["test_vs_control_pvalue"]
criticalvalue = stats.t.ppf(1-pval/2, degfreedom)
difference_in_means = criticalvalue * \
np.sqrt((std1*std1/samples1 + std2*std2/samples2))
threshold_dict[feature] = difference_in_means
return threshold_dict
@final
def prepare_test_control_stores(self, dfA=None, dfB=None, teststoreid=None, gowerdistances=None, num_cntrl_rejected=None, calltype=None,
reqcontrolstores=None, corrbased=None, rejected_with_control_left=None):
"""
prepare_test_control_stores
"""
dfB["Gower_Distance"] = gowerdistances
dfB = dfB.sort_values(by="Gower_Distance", ascending=True)
dfB["Similarity_Measure"] = 1 - dfB["Gower_Distance"]
if num_cntrl_rejected is None:
if calltype == "old":
dfB = dfB.head(1)
if calltype == "new":
if corrbased == 1:
top5_percent_stores = dfB[dfB['Similarity_Measure'] > (
dfB['Similarity_Measure'].max() - 0.05)]
if top5_percent_stores.shape[0] >= reqcontrolstores:
dfB = top5_percent_stores
else:
dfB = dfB.head(reqcontrolstores)
else:
dfB = dfB.head(reqcontrolstores)
else:
dfB = dfB[~(dfB[self._storemstrmapping["partner_id"]].isin(
rejected_with_control_left[teststoreid]))]
if calltype == "old":
dfB = dfB.head(1)
if calltype == "new":
reqcontrolstores = 1 # always for recompute scenario when corrbased=0
if corrbased == 1:
top5_percent_stores = dfB[dfB['Similarity_Measure'] > (
dfB['Similarity_Measure'].max() - 0.05)].shape[0]
if top5_percent_stores > reqcontrolstores:
reqcontrolstores = top5_percent_stores
dfB = dfB.head(reqcontrolstores)
filteredteststoredf = dfA[dfA[self._storemstrmapping["partner_id"]]
== teststoreid]
for col in self._metadata['test_planning']["teststores_columns"]:
dfB["Test_store_" + col] = filteredteststoredf[col].values[0]
return dfB
@final
def _prepare_test_control_stores_vecotrize(self, useA=None, useB=None, test_df=None, control_df=None, calltype=None,
reqcontrolstores=None, corrbased=None):
"""
_prepare_test_control_stores_vecotrize
"""
gowermatrix = gower_matrix(useA, useB)
test_df.rename(columns={
self._storemstrmapping["partner_id"]: 'Test_store_'+self._storemstrmapping["partner_id"]}, inplace=True)
test_df['key'] = 1
test_control = test_df.merge(control_df, on='key')
test_control.drop(columns=['key'], inplace=True)
test_control['Gower_Distance'] = gowermatrix.flatten(order='A')
if calltype == "old":
test_control = test_control.sort_values(
by="Gower_Distance", ascending=True)
test_control = test_control.groupby(
'Test_store_'+self._storemstrmapping["partner_id"]).head(1).reset_index(drop=True)
if calltype == "new":
test_control = test_control.sort_values(
by="Gower_Distance", ascending=True)
if corrbased == 1:
min_gower_dist_pair = test_control.drop_duplicates(
subset=['Test_store_'+self._storemstrmapping["partner_id"]])
min_gower_dist_pair['Gower_Distance'] = min_gower_dist_pair['Gower_Distance']+0.05
min_gower_dist_pair = min_gower_dist_pair.drop(
columns=[self._storemstrmapping["partner_id"]]).rename(columns={'Gower_Distance': 'Min_Gower_dist'})
test_control = test_control.merge(
min_gower_dist_pair, on=['Test_store_'+self._storemstrmapping["partner_id"]])
test_control['flag'] = test_control['Gower_Distance'] < test_control['Min_Gower_dist']
top_5_percent_store = test_control.groupby(
['Test_store_'+self._storemstrmapping["partner_id"]])['flag'].sum().reset_index()
top_5_percent_store = top_5_percent_store[top_5_percent_store['flag'] >= reqcontrolstores][[
'Test_store_'+self._storemstrmapping["partner_id"]]]
test_control = test_control.merge(
top_5_percent_store, how='left', on='Test_store_'+self._storemstrmapping["partner_id"], indicator=True)
df1 = test_control[(test_control['_merge'] == 'both') & (
test_control['flag'] == True)]
df2 = test_control[test_control['_merge'] == 'left_only'].groupby(
'Test_store_'+self._storemstrmapping["partner_id"]).head(reqcontrolstores)
test_control = pd.concat([df1, df2], sort=False, ignore_index=True)
test_control.drop(columns=['_merge', 'flag', 'Min_Gower_dist'], inplace=True)
else:
test_control = test_control.groupby(
'Test_store_'+self._storemstrmapping["partner_id"]).head(reqcontrolstores).reset_index(drop=True)
test_control["Similarity_Measure"] = 1-test_control["Gower_Distance"]
test_control["Similarity_Measure"] = test_control["Similarity_Measure"].round(2)
test_control["Gower_Distance"] = test_control["Gower_Distance"].round(2)
return test_control
@final
def _get_test_control_stores_correlation(self, dfA=None, dfB=None, test_control_stores=None, weekcolumns=None, num_cntrl_rejected=None, corrbased=None, reqcontrolstores=None):
"""
get_test_control_stores_correlation
"""
print(" in get_test_control_stores_correlation")
dfA = dfA[dfA[self._storemstrmapping["partner_id"]].isin(
test_control_stores["Test_store_"+self._storemstrmapping["partner_id"]].unique())]
dfB = dfB[dfB[self._storemstrmapping["partner_id"]].isin(
test_control_stores[self._storemstrmapping["partner_id"]].unique())]
A = dfA[weekcolumns].values.T
B = dfB[weekcolumns].values.T
# time1 = time.process_time()
N = B.shape[0]
sA = A.sum(0)
sB = B.sum(0)
p1 = N*np.einsum('ij,ik->kj', A, B)
p2 = sA*sB[:, None]
p3 = N*((B**2).sum(0)) - (sB**2)
p4 = N*((A**2).sum(0)) - (sA**2)
pcorr = ((p1 - p2)/np.sqrt(p4*p3[:, None]))
test_store_dict = dict(zip(dfA[self._storemstrmapping["partner_id"]].values.tolist(), range(dfA[self._storemstrmapping["partner_id"]].nunique())))
control_store_dict = dict(zip(dfB[self._storemstrmapping["partner_id"]].values.tolist(), range(dfB[self._storemstrmapping["partner_id"]].nunique())))
test_control_stores['Correlation'] = test_control_stores[['Test_store_'+self._storemstrmapping["partner_id"], self._storemstrmapping["partner_id"]]]\
.apply(lambda x: pcorr[control_store_dict[x[self._storemstrmapping["partner_id"]]]][test_store_dict[x['Test_store_'+self._storemstrmapping["partner_id"]]]], axis=1)
test_control_stores = test_control_stores.sort_values(
by=["Test_store_"+self._storemstrmapping["partner_id"], "Similarity_Measure"], ascending=False)
if corrbased == 1:
test_control_stores = test_control_stores.sort_values(
by=["Test_store_"+self._storemstrmapping["partner_id"], "Correlation"], ascending=False)
if num_cntrl_rejected is None:
test_control_stores = test_control_stores.groupby(
["Test_store_"+self._storemstrmapping["partner_id"]], as_index=False, sort=False).head(reqcontrolstores)
else:
test_control_stores = test_control_stores.groupby(
["Test_store_"+self._storemstrmapping["partner_id"]]).apply(lambda x: x.head(1)).reset_index(drop=True)
test_control_stores[['Gower_Distance', 'Similarity_Measure', 'Correlation']] = test_control_stores[[
'Gower_Distance', 'Similarity_Measure', 'Correlation']].round(2)
return test_control_stores
def _get_max_required_control_stores(self, reqcontrolstores, applicability_criteria)->int:
if ("advanced_control_mapping" in applicability_criteria)and \
len(applicability_criteria['advanced_control_mapping'].values()) > 0:
return max(reqcontrolstores, max(int(val) for val in applicability_criteria['advanced_control_mapping'].values()))
return reqcontrolstores
def _handle_control_per_store_attribute(self, control_stores, one_to_one=False,
control_per_store_attribute=None)->Tuple[pd.DataFrame, str, bool]:
if control_per_store_attribute is not None:
if 'store_attribute' not in self._config["feature_parameter"]['advanced_control_mapping']:
return pd.DataFrame(), "Error in config!! store_attribute key missing from config['feature_parameter']['advanced_control_mapping']", False
req_store_attribute = 'Test_store_'+self._config["feature_parameter"]['advanced_control_mapping']['store_attribute']
req_columns = ['Test_store_'+self._storemstrmapping['partner_id'], req_store_attribute]
if len(set(req_columns).intersection(set(control_stores.columns))) != len(req_columns):
return pd.DataFrame(), "control store passed to function doesnot have following attributes: {}".format(req_columns), False
test_store_store_attribute_dict = dict(
zip(control_stores['Test_store_'+self._storemstrmapping['partner_id']],
control_stores[req_store_attribute]))
cs_updated = pd.DataFrame()
for store_identifier in list(test_store_store_attribute_dict.keys()):
if test_store_store_attribute_dict[store_identifier] in (list(control_per_store_attribute.keys())):
j = control_per_store_attribute[test_store_store_attribute_dict[store_identifier]]
else:
j = 1
cs_updated = pd.concat([
control_stores.loc[
(control_stores['Test_store_'+self._storemstrmapping['partner_id']] == store_identifier)
].sort_values(by=['Similarity_Difference'],
ascending=False).head(j),
cs_updated],
ignore_index=True)
df1 = cs_updated.groupby("Test_store_" + self._storemstrmapping["partner_id"],
as_index=False,
group_keys=False).apply(lambda x: x.iloc[0])
df2 = cs_updated.groupby("Test_store_" + self._storemstrmapping["partner_id"],
as_index=False,
group_keys=False).apply(lambda x: x.iloc[1:])
else:
df1 = control_stores.groupby("Test_store_" + self._storemstrmapping["partner_id"],
as_index=False,
group_keys=False).apply(lambda x: x.iloc[0])
df2 = control_stores.groupby("Test_store_" + self._storemstrmapping["partner_id"],
as_index=False,
group_keys=False).apply(lambda x: x.iloc[1:])
if one_to_one == True:
df1["Checked_Flag"] = 1
df2["Checked_Flag"] = 0
df1["is_recommended"] = 1
df2["is_recommended"] = 0
else:
df1["Checked_Flag"] = 1
df2["Checked_Flag"] = 1
df1["is_recommended"] = 1
df2["is_recommended"] = 1
return pd.concat([df1, df2]), "Handled control stores per test stores", True
def identify_control_stores_util(self, teststores, business_categories,stores_master_df, annualrsvliftdf, consideryearweeks, valid_sales_stores, summary_sales_weeks, sales_weeks, compare_variables,target_variable, max_date_data_available, control_store_pool, reqcontrolstores):
if control_store_pool is not None and len(control_store_pool)>0:
self._control_pool = control_store_pool
stores_master_df = self._store_object.filter_active_test_control_stores(stores_master_df=stores_master_df.copy(deep=True),
remove_type=self._config["feature_parameter"]["active_store_filter_type"],
max_week_data_available=max_date_data_available)
if stores_master_df.shape[0] ==0 :
return pd.DataFrame(), "All stores are actively participating in other test", False
stores_master_df = stores_master_df[stores_master_df[self._storemstrmapping["partner_id"]].isin(valid_sales_stores[self._tarvarmapping['partner_id']].unique())]
filtered = valid_sales_stores[valid_sales_stores[self._tarvarmapping['week']].isin(consideryearweeks[summary_sales_weeks:])]
pivoteddf = pd.pivot_table(filtered, index=[self._tarvarmapping["partner_id"], self._tarvarmapping["banner"]], columns=self._tarvarmapping['week'], values=target_variable).reset_index().rename_axis(None, axis=1)
weekcolumns = [col for col in pivoteddf.columns.tolist() if col not in [self._tarvarmapping["partner_id"], self._tarvarmapping["banner"]]]
stores_master_df = stores_master_df.merge(pivoteddf, on=[self._tarvarmapping["partner_id"], self._tarvarmapping["banner"]])
filtercolumns = [self._tarvarmapping["partner_id"]] + [target_variable+' Year 1', target_variable+' Year 2', target_variable+' Lift']
if self._config["feature_parameter"]["is_product_present"] == 1:
filtercolumns.extend(["CBU_Category_"+target_variable+' Year 1', "CBU_Category_"+target_variable+' Year 2', "CBU_Category_"+target_variable+" Lift"])
compare_variables_cbu_category = compare_variables.copy()
compare_variables_cbu_category.extend(["CBU_Category_"+target_variable+' Year 1', "CBU_Category_"+target_variable+' Year 2',
"CBU_Category_"+target_variable+" Lift"])
if (len(business_categories)!=0) & (len(business_categories)<self._metadata['test_planning']["business_categories_count"]):
common_category_specific = list(set(self._metadata['test_planning']["business_category_specific_compare"]) & set(compare_variables))
if len(common_category_specific)>0:
features_list = [[j+"_"+i for j in common_category_specific] for i in business_categories]
category_specific_features = [item for elem in features_list for item in elem]
compare_variables.extend(category_specific_features)
compare_variables_cbu_category.extend(category_specific_features)
stores_master_df = stores_master_df.merge(annualrsvliftdf[filtercolumns], left_on=self._storemstrmapping["partner_id"], right_on=self._tarvarmapping["partner_id"])
if stores_master_df.shape[0] == 0:
return pd.DataFrame(), "Population stores do not have sales", False
compare_variables.extend([target_variable+" Year 1", target_variable+" Year 2", target_variable+" Lift"])
# Scaling Store Features Column values on the Entire Population set
scaler = StandardScaler()
nonscalingcolumns = [str_col for str_col in stores_master_df.columns if stores_master_df[str_col].dtypes == 'object']
nonscalingcolumns = list(set(nonscalingcolumns) - set([self._storemstrmapping['partner_id']]))
scale_cols = [item for item in compare_variables if item not in nonscalingcolumns]
if stores_master_df.shape[0] == 0:
return pd.DataFrame(), "All population stores are actively participating in other test", False
if len(scale_cols) > 0:
scaler = scaler.fit(stores_master_df[scale_cols])
teststores = stores_master_df[stores_master_df[self._storemstrmapping["partner_id"]].isin(
teststores[self._tarvarmapping['partner_id']].unique())]
# ELIMINATING THE TESTSTORES FROM POPULATION
stores_master_df = stores_master_df[~(stores_master_df[self._storemstrmapping["partner_id"]].isin(
teststores[self._storemstrmapping["partner_id"]]))]
# IF Control Store Pool Available then filter for only those stores
if control_store_pool is not None:
stores_master_df = stores_master_df[stores_master_df[self._storemstrmapping["partner_id"]].isin(control_store_pool)]
if stores_master_df.shape[0] == 0:
return pd.DataFrame(),"No valid control stores satisfying the criteria to proceed further.", False
# Adding Additional Check for Req Control Stores
if reqcontrolstores > stores_master_df[self._storemstrmapping["partner_id"]].nunique():
reqcontrolstores = stores_master_df[self._storemstrmapping["partner_id"]].nunique()
refA = teststores.copy(deep=True)
refB = stores_master_df.copy(deep=True)
useA = refA[compare_variables].copy(deep=True)
useB = refB[compare_variables].copy(deep=True)
if len(scale_cols) > 0:
useA[scale_cols] = scaler.transform(useA[scale_cols])
useB[scale_cols] = scaler.transform(useB[scale_cols])
filter_columns = set(self._metadata['test_planning']["teststores_columns"])
del annualrsvliftdf, valid_sales_stores
# Vectorize implementation
control_df = pd.DataFrame(columns=[self._storemstrmapping["partner_id"]],
data=refB[self._storemstrmapping["partner_id"]].values)
control_df['key'] = 1
control_stores = self._prepare_test_control_stores_vecotrize(useA=useA,
useB=useB,
test_df=refA[[self._storemstrmapping["partner_id"]]],
control_df=control_df, calltype="new",
reqcontrolstores=reqcontrolstores,
corrbased=1)
control_stores = control_stores.merge(refB[filter_columns], on=[self._storemstrmapping["partner_id"]])
teststores_column_rename = ["Test_store_" + col for col in self._metadata['test_planning']["teststores_columns"]]
teststores_df = refA[self._metadata['test_planning']["teststores_columns"]]
teststores_df.columns = teststores_column_rename
control_stores = control_stores.merge(teststores_df,
on=['Test_store_' + self._storemstrmapping["partner_id"]])
# Add CBU_Category Similarity Scores
test_store_dict = dict(zip(refA[self._storemstrmapping["partner_id"]].values.tolist(
), range(0, refA[self._storemstrmapping["partner_id"]].nunique(), 1)))
control_store_dict = dict(zip(refB[self._storemstrmapping["partner_id"]].values.tolist(
), range(0, refB[self._storemstrmapping["partner_id"]].nunique(), 1)))
control_stores['Gower_Distance'] = control_stores['Gower_Distance'].round(2)
if self._config["feature_parameter"]["is_product_present"] == 1:
useA_cbu_category = refA[compare_variables_cbu_category].copy(
deep=True)
useB_cbu_category = refB[compare_variables_cbu_category].copy(
deep=True)
gowermatrix_cbu = gower_matrix(useA_cbu_category, useB_cbu_category)
control_stores['Gower_Distance(CBU)'] = control_stores[['Test_store_'+self._storemstrmapping["partner_id"], self._storemstrmapping["partner_id"]]]\
.apply(lambda x: gowermatrix_cbu[test_store_dict[x['Test_store_'+self._storemstrmapping["partner_id"]]]][control_store_dict[x[self._storemstrmapping["partner_id"]]]], axis=1)
control_stores['Similarity_Measure(CBU)'] = 1 - \
control_stores['Gower_Distance(CBU)']
control_stores['Similarity_Difference'] = control_stores[
"Similarity_Measure(CBU)"]-control_stores['Similarity_Measure']
control_stores[['Gower_Distance(CBU)', 'Similarity_Measure(CBU)', 'Similarity_Difference']] = control_stores[['Gower_Distance(CBU)', 'Similarity_Measure(CBU)',
'Similarity_Difference']].round(2)
control_stores.sort_values(
by=['Similarity_Difference'], ascending=False, inplace=True)
else:
control_stores.sort_values(
by=['Similarity_Measure'], ascending=False, inplace=True)
control_stores = self._get_test_control_stores_correlation(dfA=refA.copy(deep=True),
dfB=refB.copy(
deep=True),
test_control_stores=control_stores.copy(
deep=True),
weekcolumns=weekcolumns,
num_cntrl_rejected=None,
corrbased=1, reqcontrolstores=reqcontrolstores)
control_stores['Gower_Distance'] = control_stores['Gower_Distance'].round(2)
control_stores['Similarity_Measure'] = control_stores['Similarity_Measure'].round(2)
return control_stores, "Control stores are generated successfully!", True
def test_control_similarity_measurement(self, test_control_pairs, prewindow_target_data, target_variable, postwindow_target_data):
metrics_dict = {}
test_stores_pre = test_control_pairs.merge(prewindow_target_data,
left_on=["Test_store_" + self._storemstrmapping["partner_id"],
"Test_store_" + self._storemstrmapping["banner"]],
right_on=[
self._tarvarmapping["partner_id"], self._tarvarmapping["banner"]],
how="left")
test_group_pre = test_stores_pre.groupby(self._tarvarmapping["week"])[
target_variable].mean().reset_index().rename(
columns={target_variable: 'Average_' + target_variable})
test_group_pre['Window'] = 'Pre'
test_group_pre['Group'] = 'Test'
# test group postperiod weekly target data
test_stores_post = test_control_pairs.merge(postwindow_target_data,
left_on=["Test_store_" + self._storemstrmapping["partner_id"],
"Test_store_" + self._storemstrmapping["banner"]],
right_on=[self._tarvarmapping["partner_id"],
self._tarvarmapping["banner"]],
how="left")
test_group_post = test_stores_post.groupby(self._tarvarmapping["week"])[
target_variable].mean().reset_index().rename(
columns={target_variable: 'Average_' + target_variable})
test_group_post['Window'] = 'Post'
test_group_post['Group'] = 'Test'
# control group preperiod weekly target data
control_stores_pre = test_control_pairs.merge(prewindow_target_data,
left_on=[self._storemstrmapping["partner_id"],
self._storemstrmapping["banner"]],
right_on=[self._tarvarmapping["partner_id"],
self._tarvarmapping["banner"]],
how="left")
control_group_pre = control_stores_pre.groupby(self._tarvarmapping["week"])[
target_variable].mean().reset_index().rename(
columns={target_variable: 'Average_' + target_variable})
control_group_pre['Window'] = 'Pre'
control_group_pre['Group'] = 'Control'
# control group postperiod weekly target data
control_stores_post = test_control_pairs.merge(postwindow_target_data,
left_on=[self._storemstrmapping["partner_id"],
self._storemstrmapping["banner"]],
right_on=[self._tarvarmapping["partner_id"],
self._tarvarmapping["banner"]],
how="left")
control_group_post = control_stores_post.groupby(self._tarvarmapping["week"])[
target_variable].mean().reset_index().rename(columns={target_variable: 'Average_' + target_variable})
control_group_post['Window'] = 'Post'
control_group_post['Group'] = 'Control'
# Pre and post period test and control group averages
combined_avg = pd.concat([test_group_pre, test_group_post, control_group_pre, control_group_post],
axis=0).reset_index(drop=True)
combined_avg['Average_' + target_variable] = round(
combined_avg['Average_' + target_variable], 2)
combined_avg["Week"] = combined_avg["Week"].astype(int)
combined_avg["Week"] = combined_avg["Week"].apply(
lambda x: str(x)[:4] + " Week " + str('%02d' % int(str(x)[-2:])))
# Average similarity & correlation
testcontrolstores = test_control_pairs.copy(deep=True)
avg_similarity = testcontrolstores['Similarity_Measure'].mean()
avg_correlation = testcontrolstores['Correlation'].mean()
metrics_dict["Avg_Similarity"] = str(round(avg_similarity, 2))
metrics_dict["Avg_Correlation"] = str(round(avg_correlation, 2))
return metrics_dict, combined_avg, "Calculated Successfully", True
def recompute_control_stores_util(self, target_variable, reqcontrolstores, test_control_stores, stores_master_df, max_date_data_available, annualrsvliftdf, valid_sales_stores,
consideryearweeks, compare_variables, include_cbu_features, business_categories):
accepted = test_control_stores.groupby(
"Test_store_"+self._storemstrmapping["partner_id"]).filter(lambda x: (x['Checked_Flag'] == 1).any())
rejected = test_control_stores[~test_control_stores["Test_store_"+self._storemstrmapping["partner_id"]].isin(
accepted["Test_store_"+self._storemstrmapping["partner_id"]])]
if rejected.shape[0] == 0:
return pd.DataFrame(), "Please unselect all the control stores for a test store to recompute.", False
rejected["is_recommended"]=0
num_cntrl_rejected = rejected.groupby(
"Test_store_"+self._storemstrmapping["partner_id"]).aggregate({self._storemstrmapping["partner_id"]:"nunique"}).reset_index()
stores_master_df = self._store_object.filter_active_test_control_stores(stores_master_df=stores_master_df.copy(deep=True),
remove_type=self._config["feature_parameter"]["active_store_filter_type"],
max_week_data_available=max_date_data_available)
stores_master_df = stores_master_df[stores_master_df[self._storemstrmapping["partner_id"]].isin(
valid_sales_stores[self._tarvarmapping["partner_id"]].unique())]
# -----------------------------------------------New code-------------------------------------------------------------
# check
filtered = valid_sales_stores[valid_sales_stores[self._tarvarmapping['week']].isin(consideryearweeks[:])]
pivoteddf = pd.pivot_table(filtered, index=[self._tarvarmapping["partner_id"], self._tarvarmapping["banner"]], columns=self._tarvarmapping['week'], values=target_variable).reset_index().rename_axis(None, axis=1)
weekcolumns = [col for col in pivoteddf.columns.tolist() if col not in [self._tarvarmapping["partner_id"], self._tarvarmapping["banner"]]]
stores_master_df = stores_master_df.merge(pivoteddf, on=[self._tarvarmapping["partner_id"], self._tarvarmapping["banner"]])
# ------------------------------------------------New code-------------------------------------------------------------
filter_columns = [self._tarvarmapping["partner_id"]] + [target_variable+' Year 1', target_variable+' Year 2', target_variable+' Lift']
if self._config["feature_parameter"]["is_product_present"] == 1:
filter_columns.extend(["CBU_Category_"+target_variable+' Year 1', "CBU_Category_"+target_variable+' Year 2',
"CBU_Category_"+target_variable+" Lift"])
compare_variables_cbu_category = compare_variables.copy()
compare_variables_cbu_category.extend(["CBU_Category_"+target_variable+' Year 1', "CBU_Category_"+target_variable+' Year 2',
"CBU_Category_"+target_variable+" Lift"])
if (len(business_categories)!=0) & (len(business_categories)<self._metadata['test_planning']["business_categories_count"]):
common_category_specific = list(set(self._metadata['test_planning']["business_category_specific_compare"]) & set(compare_variables))
if len(common_category_specific)>0:
features_list = [[j+"_"+i for j in common_category_specific] for i in business_categories]
category_specific_features = [item for elem in features_list for item in elem]
compare_variables.extend(category_specific_features)
compare_variables_cbu_category.extend(category_specific_features)
stores_master_df = stores_master_df.merge(
annualrsvliftdf[filter_columns], left_on=self._storemstrmapping["partner_id"], right_on=self._tarvarmapping["partner_id"])
if include_cbu_features == 1:
compare_variables.extend([target_variable+' Year 1', target_variable+" Year 2", target_variable+" Lift",
"CBU_Category_"+target_variable+' Year 1', "CBU_Category_"+target_variable+' Year 2',
"CBU_Category_"+target_variable+" Lift"])
else:
compare_variables.extend(
[target_variable+' Year 1', target_variable+" Year 2", target_variable+" Lift"])
# Scaling Store Features Column values on the Entire Population set
scaler = StandardScaler()
nonscalingcolumns = [str_col for str_col in stores_master_df.columns if stores_master_df[str_col].dtypes == 'object']
nonscalingcolumns = list(set(nonscalingcolumns) - set([self._storemstrmapping['partner_id']]))
scale_cols = [item for item in compare_variables if item not in nonscalingcolumns]
if len(scale_cols) > 0:
scaler = scaler.fit(stores_master_df[scale_cols])
teststores = stores_master_df[stores_master_df[self._storemstrmapping["partner_id"]].isin(
test_control_stores["Test_store_"+self._storemstrmapping["partner_id"]].unique())]
# ELIMINATING THE TESTSTORES FROM POPULATION
stores_master_df = stores_master_df[~(stores_master_df[self._storemstrmapping["partner_id"]].isin(teststores[self._storemstrmapping["partner_id"]]))]
refA = teststores.copy(deep=True)
refB = stores_master_df.copy(deep=True)
useA = refA[compare_variables].copy(deep=True)
useB = refB[compare_variables].copy(deep=True)
if len(scale_cols) > 0:
useA[scale_cols] = scaler.transform(useA[scale_cols])
useB[scale_cols] = scaler.transform(useB[scale_cols])
gowermatrix = gower_matrix(useA, useB)
rejected_with_control_left = {}
# Filtering out Test stores which have no more control store left to be mapped
teststores_with_exhausted_control = num_cntrl_rejected[num_cntrl_rejected[self._storemstrmapping["partner_id"]]+1 > refB.shape[0]].index.tolist()
rejected.loc[~rejected["Test_store_"+self._storemstrmapping["partner_id"]].isin(teststores_with_exhausted_control), "is_recommended"] = 0
num_cntrl_rejected = num_cntrl_rejected.to_dict()
rejected_with_control_left = rejected[~rejected["Test_store_" +
self._storemstrmapping["partner_id"]].isin(teststores_with_exhausted_control)]
if rejected_with_control_left.shape[0] == 0:
return pd.DataFrame(),"All Control Stores are exhausted for all the Test stores"
rejected_with_control_left = rejected_with_control_left.groupby(
"Test_store_"+self._storemstrmapping["partner_id"])[self._storemstrmapping["partner_id"]].unique()
rejected_with_control_left = rejected_with_control_left.to_dict()
# Identifying similar stores
filter_columns = self._metadata['test_planning']["teststores_columns"].copy()
df_list = []
for test_pid, row in zip(refA[self._storemstrmapping["partner_id"]], gowermatrix):
if test_pid in rejected_with_control_left.keys():
df_list.append(df_list.append(self.prepare_test_control_stores(dfA=refA[filter_columns].copy(deep=True),
dfB=refB[filter_columns].copy(deep=True),
teststoreid=test_pid, gowerdistances=row,
num_cntrl_rejected=num_cntrl_rejected, calltype="new",
rejected_with_control_left=rejected_with_control_left, corrbased=1,
reqcontrolstores=reqcontrolstores)))
control_stores = pd.concat(df_list)
control_stores = control_stores[~control_stores["Test_store_" +
self._storemstrmapping["partner_id"]].isin(teststores_with_exhausted_control)]
control_stores["Checked_Flag"] = 1
control_stores["is_recommended"] = 1
control_stores["test_id"] = self._test_id
# Add CBU_Category Similarity Scores
if self._config["feature_parameter"]["is_product_present"] == 1:
useA_cbu_category = refA[compare_variables_cbu_category].copy(deep=True)
useB_cbu_category = refB[compare_variables_cbu_category].copy(deep=True)
gowermatrix_cbu = gower_matrix(useA_cbu_category, useB_cbu_category)
test_store_dict = dict(zip(refA[self._storemstrmapping["partner_id"]].values.tolist(
), range(0, refA[self._storemstrmapping["partner_id"]].nunique(), 1)))
control_store_dict = dict(zip(refB[self._storemstrmapping["partner_id"]].values.tolist(
), range(0, refB[self._storemstrmapping["partner_id"]].nunique(), 1)))
control_stores['Gower_Distance(CBU)'] = control_stores[['Test_store_'+self._storemstrmapping["partner_id"], self._storemstrmapping["partner_id"]]]\
.apply(lambda x: gowermatrix_cbu[test_store_dict[x['Test_store_'+self._storemstrmapping["partner_id"]]]][control_store_dict[x[self._storemstrmapping["partner_id"]]]], axis=1)
control_stores['Similarity_Measure(CBU)'] = 1 - \
control_stores['Gower_Distance(CBU)']
control_stores['Similarity_Difference'] = control_stores[
"Similarity_Measure(CBU)"] - control_stores['Similarity_Measure']
control_stores[['Gower_Distance(CBU)', 'Similarity_Measure(CBU)', 'Similarity_Difference']] = control_stores[['Gower_Distance(CBU)', 'Similarity_Measure(CBU)',
'Similarity_Difference']].round(2)
control_stores.sort_values(
by=['Similarity_Difference'], ascending=False, inplace=True)
control_stores = self._get_test_control_stores_correlation(dfA=refA.copy(deep=True), dfB=refB.copy(deep=True),
test_control_stores=control_stores.copy(
deep=True),
weekcolumns=weekcolumns, num_cntrl_rejected=num_cntrl_rejected, corrbased=1)
control_stores = pd.concat([control_stores, rejected, accepted])
return control_stores, "Successfully recomputed!!", True
def control_summary_util(self, stores_master_df, test_control_mapping, summary_sales_weeks, consideryearweeks, weekly_target_sales, business_categories, compare_variables, target_variable)->Tuple[dict,dict,dict, str, bool]:
# Create variables
variables_metrics_dict = {}
feature_thresholds_dict = {}
feature_bounds_dict = {}
test_stores = stores_master_df[stores_master_df[self._storemstrmapping["partner_id"]].isin(test_control_mapping["Test_store_"+self._storemstrmapping["partner_id"]])]
control_stores = stores_master_df.merge(test_control_mapping[[self._storemstrmapping["partner_id"], self._storemstrmapping["banner"]]], on=[
self._storemstrmapping["partner_id"], self._storemstrmapping["banner"]])
weeks = consideryearweeks[summary_sales_weeks:]
weeklyrsvdatayear = weekly_target_sales[weekly_target_sales[self._tarvarmapping["week"]].isin(
weeks)]
weeklyrsvdatayear["Year"] = "Year1"
# To Free Space
del weekly_target_sales
aggdict = {k: sum for k in [
self._tarvarmapping['rsv'], self._tarvarmapping['volume']]}
groupbycolumns = [self._tarvarmapping["partner_id"]] + \
[self._tarvarmapping["banner"]]+[self._tarvarmapping['year']]
annualrsvdatayear = weeklyrsvdatayear.groupby(
groupbycolumns).agg(aggdict).reset_index()
mergecolumns = [self._tarvarmapping["partner_id"]] + \
[self._tarvarmapping['rsv'], self._tarvarmapping['volume']]
test_stores = test_stores.merge(annualrsvdatayear[mergecolumns],
left_on=self._storemstrmapping["partner_id"],
right_on=self._tarvarmapping["partner_id"])
control_stores = control_stores.merge(annualrsvdatayear[mergecolumns],
left_on=self._storemstrmapping["partner_id"],
right_on=self._tarvarmapping["partner_id"])
if (len(business_categories)!=0) & (len(business_categories)<self._metadata['test_planning']["business_categories_count"]):
common_category_specific = list(set(self._metadata['test_planning']["business_category_specific_compare"]) & set(compare_variables))
if len(common_category_specific)>0:
features_list = [[j+"_"+i for j in common_category_specific] for i in business_categories]
category_specific_features = [item for elem in features_list for item in elem]
compare_variables.extend(category_specific_features)
compare_variables.append(target_variable)
allstores = pd.concat([test_stores, control_stores])
variable_features = allstores[compare_variables].nunique(
)[allstores[compare_variables].nunique() > 1].index.to_list()
compare_variables = list(
set(compare_variables).intersection(variable_features))
for col in compare_variables:
if test_stores[col].dtype == 'object':
pass
variables_metrics_dict[col] = {}
tStat, pVal = stats.ttest_ind(
test_stores[col], control_stores[col], nan_policy='omit')
variables_metrics_dict[col]["Test Mean"] = round(
test_stores[col].mean(), 2)
variables_metrics_dict[col]["Control Mean"] = round(
control_stores[col].mean(), 2)
variables_metrics_dict[col]["Test Std Dev"] = round(
test_stores[col].std(), 2)
variables_metrics_dict[col]["Control Std Dev"] = round(
control_stores[col].std(), 2)
xcols = [x for x in compare_variables if x != target_variable]
X_train = allstores[xcols].values
y_train = allstores[target_variable].values.ravel()
X_train = sm.add_constant(X_train)
model = sm.OLS(y_train, X_train)
results = model.fit()
summary_df = results.summary2().tables[1]
summary_df.index = ['Constant'] + list(xcols)
pvalue_dict = dict(
zip(summary_df.index.values.tolist(), summary_df["P>|t|"].values.tolist()))
# Calculate feature thresholds
feature_thresholds_dict = self._get_feature_thresholds(
test_stores, control_stores, compare_variables)
for key, value in feature_thresholds_dict.items():
feature_bounds_dict[key] = [
variables_metrics_dict[key]["Test Mean"]-value, variables_metrics_dict[key]["Test Mean"]+value]
return variables_metrics_dict,feature_thresholds_dict,feature_bounds_dict, "Successfully calculated!!", True
def test_control_upload_util(self, filtered_rsv_stores_df, valid_sales_stores, stores_master_df, consideryearweeks, target_variable, applicability_criteria, store_features, test_control_stores):
store_features.extend([target_variable + " Year 1", target_variable + " Year 2", target_variable + " Lift"])
store_features_cbu_category = store_features.copy()
store_features_cbu_category.extend(
["CBU_Category_" + target_variable + ' Year 1',
"CBU_Category_" + target_variable + ' Year 2',
"CBU_Category_" + target_variable + " Lift"])
tv = test_control_stores[test_control_stores["Test_store_"+self._storemstrmapping["partner_id"]].isin(filtered_rsv_stores_df[self._storemstrmapping["partner_id"]])]
if len(tv) == 0:
return pd.DataFrame(), "Test stores uploaded are not present in Store Master database", False
# valid controlstores
cv = test_control_stores[test_control_stores["Control_store_"+self._storemstrmapping["partner_id"]].isin(filtered_rsv_stores_df[self._storemstrmapping["partner_id"]])]
if len(cv) == 0:
return pd.DataFrame(), "Control stores uploaded are not present in Store Master database", False
to_drop = tv[~tv["Control_store_"+self._storemstrmapping["partner_id"]].isin(cv["Control_store_"+self._storemstrmapping["partner_id"]])]
# Final valid test-control pairs
filtered_testcontrol_stores = tv[~tv.isin(to_drop)].dropna()
#message = "No of test-control pairs satisfying the criteria to proceed further are {}".format(filtered_testcontrol_stores.shape[0])
if filtered_testcontrol_stores.shape[0] == 0:
message = "No test-control pairs satisfying the criteria to proceed further."
return pd.DataFrame(), message, False
filtered_testcontrol_stores['order'] = list(range(filtered_testcontrol_stores.shape[0]))
filtered = valid_sales_stores[valid_sales_stores[self._tarvarmapping["week"]].isin(consideryearweeks[self._sales_object.get_summary_sales_weeks(applicability_criteria):])]
pivoteddf = pd.pivot_table(filtered,
index=[self._storemstrmapping["partner_id"],
self._storemstrmapping["banner"]],
columns=self._tarvarmapping["week"],
values=target_variable).reset_index().rename_axis(None, axis=1)
filtered_rsv_stores_df = filtered_rsv_stores_df.merge(pivoteddf, on=[self._tarvarmapping["partner_id"], self._tarvarmapping["banner"]])
scaler = StandardScaler()
nonscalingcolumns = [str_col for str_col in stores_master_df.columns if stores_master_df[str_col].dtypes == 'object']
nonscalingcolumns = list(set(nonscalingcolumns) - set([self._storemstrmapping['partner_id']]))
scale_cols = [item for item in store_features if item not in nonscalingcolumns]
if len(scale_cols) > 0:
scaler = scaler.fit(filtered_rsv_stores_df[scale_cols])
teststores = filtered_rsv_stores_df.merge(
filtered_testcontrol_stores,
left_on=self._storemstrmapping["partner_id"],
right_on='Test_store_'+self._storemstrmapping["partner_id"],
how='right')
controlstores = filtered_rsv_stores_df.merge(
filtered_testcontrol_stores,
left_on=self._storemstrmapping["partner_id"],
right_on='Control_store_'+self._storemstrmapping["partner_id"],
how='right')
controlstores = controlstores.set_index('order')
controlstores = controlstores.reindex(index=teststores['order'])
controlstores = controlstores.reset_index()
# checks that the order of test-control pairs in both files matches
# # # Weekly sales for all stores for the past 1 year (52 weeks)
cols = ["order", self._storemstrmapping['partner_id'], self._storemstrmapping['banner'], 'Test_store_' +
self._storemstrmapping['partner_id'], 'Control_store_'+self._storemstrmapping['partner_id']]
mergecols = [self._storemstrmapping['partner_id'], self._storemstrmapping['banner']]
test_stores_wksales = teststores[cols].merge(pivoteddf, on=mergecols)
control_stores_wksales = controlstores[cols].merge(pivoteddf, on=mergecols)
control_stores_wksales = control_stores_wksales.set_index('order')
control_stores_wksales = control_stores_wksales.reindex(index=test_stores_wksales['order'])
control_stores_wksales = control_stores_wksales.reset_index()
corrlist = []
for j in range(controlstores.shape[0]):
array1 = np.array(test_stores_wksales.loc[j, test_stores_wksales.columns[~test_stores_wksales.columns.isin(cols)]].astype(float))
array2 = np.array(control_stores_wksales.loc[j, control_stores_wksales.columns[~control_stores_wksales.columns.isin(cols)]].astype(float))
corrlist.append(round(pd.np.corrcoef(array1, array2)[0][1], 2))
teststores["Correlation"] = corrlist
# population stores after excluding teststores
pop_stores = filtered_rsv_stores_df[~filtered_rsv_stores_df[self._storemstrmapping['partner_id']].isin(
teststores['Test_store_'+self._storemstrmapping['partner_id']])]
# Similarity Calculation
refA = teststores.copy(deep=True)
refB = pop_stores.copy(deep=True)
useA = refA[store_features].copy(deep=True)
useB = refB[store_features].copy(deep=True)
if len(scale_cols) > 0:
useA[scale_cols] = scaler.transform(useA[scale_cols])
useB[scale_cols] = scaler.transform(useB[scale_cols])
gowermatrix = gower_matrix(useA, useB)
useA = refA[store_features_cbu_category].copy(deep=True)
useB = refB[store_features_cbu_category].copy(deep=True)
gowermatrix_cbu = gower_matrix(useA, useB)
# Identifying similar stores
df_list = list()
for i in range(refA.shape[0]):
teststoreid = refA[self._storemstrmapping["partner_id"]][i]
gowerdistances = gowermatrix[i]
gowerdistances_cbu = gowermatrix_cbu[i]
dfA = refA.copy(deep=True)
dfB = refB.copy(deep=True)
dfB["Gower_Distance"] = list(gowerdistances)
dfB["Gower_Distance(CBU)"] = list(gowerdistances_cbu)
#dfB = dfB.sort_values(by="Gower_Distance",ascending=True)
filteredteststoredf = dfA.loc[i,:].reset_index().T.reset_index(drop=True)
filteredteststoredf.columns = filteredteststoredf.iloc[0, :]
filteredteststoredf = filteredteststoredf.drop(0)
filteredteststoredf = filteredteststoredf.reset_index(drop=True)
for col in self._metadata['test_planning']["teststores_columns"]:
dfB["Test_store_"+col] = filteredteststoredf[col].values[0]
dfB["Gower_Distance"] = dfB["Gower_Distance"].apply(
lambda x: round(x, 2))
dfB["Similarity_Measure"] = dfB["Gower_Distance"].apply(lambda x: 1-x)
dfB["Gower_Distance(CBU)"] = dfB["Gower_Distance(CBU)"].round(2)
dfB["Similarity_Measure(CBU)"] = dfB["Gower_Distance(CBU)"].apply(
lambda x: 1 - x)
dfB["Similarity_Measure(CBU)"] = dfB["Similarity_Measure(CBU)"].round(
2)
dfB['Similarity_Difference'] = dfB['Similarity_Measure(CBU)'] - \
dfB['Similarity_Measure']
dfB['Similarity_Difference'] = dfB['Similarity_Difference'].round(2)
df_append = dfB[dfB[self._storemstrmapping['partner_id']].values ==
filteredteststoredf['Control_store_'+self._storemstrmapping['partner_id']].values]
df_append['Correlation'] = filteredteststoredf['Correlation'].values[0]
df_list.append(df_append)
control_test_pairs = pd.concat(df_list)
control_test_pairs['Checked_Flag'] = 1
return control_test_pairs, "Control stores computed Successfully", True | /rtm_fast_unification-0.0.1-py3-none-any.whl/rtm_fast_unification/DSCode/library/ds/feature/cntrl_store_gen/cntrl_stores_master.py | 0.728845 | 0.357175 | cntrl_stores_master.py | pypi |
import sys
import traceback
from ds.ds_code_registry import *
from ds.config_data_registry import config
def get_tool_object(region, test_id):
"""
About function
--------------
This function returns the appropriate FAST tool flow object for a market
In markets config there must be a key "Constructors" and this key will have
a dictionary. In that dictionary there will be one key of "Tool".
Example
-------
US:{"Constructors": {
"Sales": "FAST_US_Sales",
"Stores": "FAST_US_Stores",
"Tool": "Fast_US_Tool"},
....
}
Parameters
----------
region: region or market name by which DS developer has registered code to library,
test_id: test_id of the current test
Return values
-------
tool flow object
"""
config_copy = config[region].copy() if region in config else config.copy()
if 'Tool' in config_copy['Constructors']:
tool_object = getattr(sys.modules[__name__],
config_copy["Constructors"]['Tool'])(config=config_copy,
region=region,
test_id=test_id)
return tool_object
raise Exception("An Error has occured while creating stores object: {}"\
.format(traceback.format_exc()))
def get_tool_msrmt_object(region, test_id):
"""
About function
--------------
This function returns the appropriate FAST tool flow object for a market
In markets config there must be a key "Constructors" and this key will have
a dictionary. In that dictionary there will be one key of "Tool".
Example
-------
US:{"Constructors": {
"Sales": "FAST_US_Sales",
"Stores": "FAST_US_Stores",
"Tool": "Fast_US_Tool"},
....
}
Parameters
----------
region: region or market name by which DS developer has registered code to library,
test_id: test_id of the current test
Return values
-------
tool flow object
"""
config_copy = config[region].copy() if region in config else config.copy()
if 'Tool' in config_copy['Constructors']:
tool_object = getattr(sys.modules[__name__],
config_copy["Constructors"]['ToolMeasurement'])( fast_tool_plan = get_tool_object(region, test_id),
config=config_copy,
region=region,
test_id=test_id)
return tool_object
raise Exception("An Error has occured while creating stores object: {}"\
.format(traceback.format_exc())) | /rtm_fast-0.0.2-py3-none-any.whl/rtm_fast/ds/tool_objects/tool_selection.py | 0.574634 | 0.187207 | tool_selection.py | pypi |
from typing import Tuple
import pandas as pd
from ds.library.ds_code_test_plan import FastTool
from ds.library.ds_code_test_measurement import FastToolMeasurement
class FastToolUS(FastTool):
"""
A class to represent features of FastToolUS.
Attributes
----------
config : configuration present in config_data either for a region or overall
region: key present in config
sales_object : Object of sales class
store_object : Object of store class
Methods
-------
data_extract(): to calculate required sales/volume and get the store details in population
calculate_rsv(): calculate the RSV/POS/Sales/Volume value required
and estimate number of stores in population
get_breakeven_lift(): estimates the breakeven lift% value
get_cost(): estimates the cost of implementing RTM activity if breakeven lift is known
"""
def __init__(self, config, region, test_id):
super().__init__(config=config, region=region, test_id=test_id)
def calculate_rsv_estimate(self, target_variable, timeframestart, timeframeend,\
storelist, applicability_criteria,uploaded_file_df=None):
if "channel" not in applicability_criteria:
return -1, -1, "Pass channel in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
rsvestimate, store_count, message, flag = super().calculate_rsv_estimate(
target_variable = target_variable,
timeframestart = timeframestart,
timeframeend = timeframeend,
storelist = storelist,
applicability_criteria = applicability_criteria,
uploaded_file_df=uploaded_file_df)
return rsvestimate, store_count, message, flag
def set_sales_weeks(self, applicability_criteria):
"""This function in US market, returns the value of sales week parameter
to be used based on channel value passed"""
return self._config['metadata']['test_configuration']\
['sales_weeks'][applicability_criteria['channel']]
def set_lift_sales_weeks(self, applicability_criteria):
"""This function in US market, returns the value of sales week for lift calculation,
parameter to be used based on channel value passed"""
return self._config['metadata']['test_configuration']\
['sales_lifts_sales_weeks'][applicability_criteria['channel']]
def set_summary_sales_weeks(self, applicability_criteria):
"""This function in US market, returns the value of sales week for summary calculation,
parameter to be used based on channel value passed"""
return self._config['metadata']\
['test_planning']['summary_sales_weeks']\
[applicability_criteria['channel']]
def set_test_vs_pop_comp(self, applicability_criteria):
"""This function in US market, returns the list of store attributes to be used for
comparing test and population stores
parameter to be used based on channel and team value passed"""
return self._config['metadata']\
["test_planning"]["test_vs_population_compare"]\
[applicability_criteria['team']]\
[applicability_criteria['channel']].copy()
def set_test_vs_pop_comp_sum(self, applicability_criteria):
"""This function in US market, returns the list of store attributes to be used for
comparing test and population stores for summary page,
parameter to be used based on channel and team value passed"""
return self._config['metadata']['test_planning']\
['test_vs_population_compare_summary']\
[applicability_criteria['channel']].copy()
def set_test_vs_cntrl_comp(self, applicability_criteria):
"""This function in US market, returns the list of store attributes to be used for
comparing test and control stores
parameter to be used based on channel and team value passed"""
return self._config['metadata']\
["test_planning"]["test_vs_control_compare"]\
[applicability_criteria['team']]\
[applicability_criteria['channel']].copy()
def set_test_vs_cntrl_comp_sum(self, applicability_criteria):
"""This function in US market, returns the list of store attributes to be used for
comparing test and control stores for summary page
parameter to be used based on channel and team value passed"""
return self._config['metadata']\
["test_planning"]["test_vs_control_compare_summary"]\
[applicability_criteria['channel']].copy()
def get_test_parameter(self, confidence_level, margin_of_error, num_of_teststores,\
target_variable, test_type, applicability_criteria,\
uploaded_file_df=None) -> Tuple[float, str, bool]:
if "channel" not in applicability_criteria:
return 0, "Pass channel", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
applicability_criteria['sales_weeks'] = self\
.set_sales_weeks(applicability_criteria)
applicability_criteria['sales_lifts_sales_weeks'] = self\
.set_lift_sales_weeks(applicability_criteria)
test_parameter, message, success_flag = super()\
.get_test_parameter( confidence_level=confidence_level,
margin_of_error=margin_of_error,
num_of_teststores=num_of_teststores,
target_variable=target_variable,
test_type=test_type,
applicability_criteria=applicability_criteria,
uploaded_file_df=uploaded_file_df,
)
return test_parameter, message, success_flag
def power_marginoferror_calculation(self, num_of_teststores, target_variable, \
test_type, applicability_criteria, \
uploaded_file_df=None) -> Tuple[float, float, float, str, bool]:
if "channel" not in applicability_criteria:
return 0, 0, 0, "Pass channel", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
applicability_criteria['sales_weeks'] = self\
.set_sales_weeks(applicability_criteria)
applicability_criteria['sales_lifts_sales_weeks'] = self\
.set_lift_sales_weeks(applicability_criteria)
num_of_teststores, power_moferr_df, margin_of_error, message, success_flag = super()\
.power_marginoferror_calculation(num_of_teststores = num_of_teststores,
target_variable=target_variable,
test_type=test_type,
applicability_criteria=applicability_criteria,
uploaded_file_df=uploaded_file_df
)
return num_of_teststores, power_moferr_df, margin_of_error, message, success_flag
def teststores_sample_size(self, margin_of_error, target_variable, test_type,\
applicability_criteria, uploaded_file_df=None) -> Tuple[float, float, str, bool]:
if "channel" not in applicability_criteria:
return 0, 0, 0, "Pass channel", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
applicability_criteria['sales_weeks'] = self.set_sales_weeks(applicability_criteria)
applicability_criteria['sales_lifts_sales_weeks'] = self\
.set_lift_sales_weeks(applicability_criteria)
number_test_stores_req, power_stores_df, message, success_flag = super()\
.teststores_sample_size(
margin_of_error=margin_of_error,
target_variable=target_variable,
test_type=test_type,
applicability_criteria=applicability_criteria,
uploaded_file_df=uploaded_file_df)
return number_test_stores_req, power_stores_df, message, success_flag
def identify_test_stores(self, num_of_teststores, target_variable, test_type,\
applicability_criteria, stratification_variables, uploaded_file_df=None) \
-> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, \
pd.DataFrame, pd.DataFrame, list, str, bool]:
if "channel" not in applicability_criteria:
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(),\
[],"Pass channel in applicability criteria", False
if "team" not in applicability_criteria:
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), \
[], "Pass team in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
applicability_criteria['sales_weeks'] = self\
.set_sales_weeks(applicability_criteria)
applicability_criteria['sales_lifts_sales_weeks'] = self\
.set_lift_sales_weeks(applicability_criteria)
applicability_criteria['test_vs_population_compare'] = self\
.set_test_vs_pop_comp(applicability_criteria)
teststores, stores_master_df, annualrsvlifts, valid_sales_stores,\
weekly_total_sales, consideryearweeks, message, success_flag = super()\
.identify_test_stores(num_of_teststores = num_of_teststores,
target_variable=target_variable,
test_type=test_type,
applicability_criteria=applicability_criteria,
stratification_variables = stratification_variables,
uploaded_file_df=uploaded_file_df)
return teststores, stores_master_df, annualrsvlifts, valid_sales_stores,\
weekly_total_sales, consideryearweeks, message, success_flag
def test_population_mapping(self, teststores, target_variable, test_type,\
applicability_criteria, uploaded_file_df=None) -> Tuple[pd.DataFrame, str, bool]:
if "channel" not in applicability_criteria:
return pd.DataFrame(), "Pass channel in applicability criteria", False
if "team" not in applicability_criteria:
return pd.DataFrame(), "Pass team in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
applicability_criteria['sales_weeks'] = self\
.set_sales_weeks(applicability_criteria)
applicability_criteria['sales_lifts_sales_weeks'] = self\
.set_lift_sales_weeks(applicability_criteria)
applicability_criteria['summary_sales_weeks'] = self\
.set_summary_sales_weeks(applicability_criteria)
applicability_criteria['test_vs_population_compare'] = self\
.set_test_vs_pop_comp(applicability_criteria)
return super().test_population_mapping( teststores=teststores,
target_variable=target_variable,
test_type=test_type,
applicability_criteria=applicability_criteria,
uploaded_file_df=uploaded_file_df)
def test_store_summary(self, teststores, target_variable, test_type, \
applicability_criteria, uploaded_file_df=None) -> Tuple[dict, dict, dict, str, bool]:
if "channel" not in applicability_criteria:
return {},{},{}, "Pass channel in applicability criteria", False
if "team" not in applicability_criteria:
return {},{},{}, "Pass team in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
applicability_criteria['sales_weeks'] = self\
.set_sales_weeks(applicability_criteria)
applicability_criteria['sales_lifts_sales_weeks'] = self\
.set_lift_sales_weeks(applicability_criteria)
applicability_criteria['summary_sales_weeks'] = self\
.set_summary_sales_weeks(applicability_criteria)
applicability_criteria['test_vs_population_compare_summary'] = self\
.set_test_vs_pop_comp_sum(applicability_criteria)
return super().test_store_summary(teststores = teststores,
target_variable=target_variable,
test_type=test_type,
applicability_criteria=applicability_criteria,
uploaded_file_df=uploaded_file_df)
def test_store_comparison_summary(self, test_stores, target_variable, test_type, \
applicability_criteria, uploaded_file_df=None) -> Tuple[pd.DataFrame, dict, str, bool]:
if "channel" not in applicability_criteria:
return pd.DataFrame(), {}, "Pass channel in applicability criteria", False
if "team" not in applicability_criteria:
return pd.DataFrame(), {}, "Pass team in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
combined_avg, metrics_dict, message, success_flag = super().test_store_comparison_summary(
test_stores = test_stores,
target_variable = target_variable,
test_type = test_type,
applicability_criteria = applicability_criteria,
uploaded_file_df=uploaded_file_df)
return combined_avg, metrics_dict, message, success_flag
def test_stores_format_check(self, target_variable, num_of_teststores, test_type, applicability_criteria, teststores_data, uploaded_file_df=None) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, list, str, int, bool]:
if "channel" not in applicability_criteria:
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(),\
[],"Pass channel in applicability criteria", False
if "team" not in applicability_criteria:
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), \
[], "Pass team in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
applicability_criteria['sales_weeks'] = self\
.set_sales_weeks(applicability_criteria)
applicability_criteria['sales_lifts_sales_weeks'] = self\
.set_lift_sales_weeks(applicability_criteria)
applicability_criteria['test_vs_population_compare'] = self\
.set_test_vs_pop_comp(applicability_criteria)
teststores,annualrsvlifts,valid_sales_stores,consideryearweeks,message,\
num_of_teststores,success_flag = super().test_stores_format_check(target_variable, num_of_teststores, test_type, applicability_criteria, teststores_data, uploaded_file_df)
return teststores,annualrsvlifts,valid_sales_stores,consideryearweeks,message,\
num_of_teststores,success_flag
def manual_teststores_selection(self, test_type, target_variable, applicability_criteria, uploaded_file_df=None):
if "channel" not in applicability_criteria:
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(),\
[],"Pass channel in applicability criteria", False
if "team" not in applicability_criteria:
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), \
[], "Pass team in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
applicability_criteria['sales_weeks'] = self\
.set_sales_weeks(applicability_criteria)
applicability_criteria['sales_lifts_sales_weeks'] = self\
.set_lift_sales_weeks(applicability_criteria)
applicability_criteria['test_vs_population_compare'] = self\
.set_test_vs_pop_comp(applicability_criteria)
teststores,valid_sales_stores,consideryearweeks,num_of_teststores,margin_of_error,\
confidence_interval,power_of_test,message,success_flag \
= super().manual_teststores_selection(test_type, target_variable, applicability_criteria, uploaded_file_df)
return teststores,valid_sales_stores,consideryearweeks,num_of_teststores,margin_of_error,\
confidence_interval,power_of_test,message,success_flag
"""CONTROL STORES"""
def identify_control_stores(self, teststores, target_variable, applicability_criteria,
test_type,one_to_one=True, business_categories=[],
reqcontrolstores=1, control_store_pool=None,
len_control_pool=None, uploaded_file_df=None)\
-> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, str, bool]:
if "channel" not in applicability_criteria:
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(),\
"Pass channel in applicability criteria", False
if "team" not in applicability_criteria:
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(),\
"Pass team in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
applicability_criteria['sales_weeks'] = self\
.set_sales_weeks(applicability_criteria)
applicability_criteria['sales_lifts_sales_weeks'] = self\
.set_lift_sales_weeks(applicability_criteria)
applicability_criteria['test_vs_control_compare'] = self\
.set_test_vs_cntrl_comp(applicability_criteria)
return super().identify_control_stores(teststores = teststores,
target_variable = target_variable,
applicability_criteria = applicability_criteria.copy(),
test_type = test_type, one_to_one = one_to_one,
business_categories = business_categories,
reqcontrolstores = reqcontrolstores,
control_store_pool = control_store_pool,
len_control_pool = len_control_pool,
uploaded_file_df = uploaded_file_df)
def average_weekly_target_similarity_correlation(self, test_control_data,
target_variable,
applicability_criteria,
business_categories=[])\
-> Tuple[dict, pd.DataFrame, str, bool]:
if "channel" not in applicability_criteria:
return {},pd.DataFrame(),\
"Pass channel in applicability criteria", False
if "team" not in applicability_criteria:
return {},pd.DataFrame(),\
"Pass team in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
return super().average_weekly_target_similarity_correlation(test_control_data = test_control_data,
target_variable = target_variable,
applicability_criteria = applicability_criteria,
business_categories = business_categories)
def control_store_summary(self, test_type, test_control_mapping_stores, business_categories,\
target_variable, applicability_criteria, uploaded_file_df=None)\
-> Tuple[dict, dict, dict, str, bool]:
if "channel" not in applicability_criteria:
return {},{},{},\
"Pass channel in applicability criteria", False
if "team" not in applicability_criteria:
return {},{},{},\
"Pass team in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
applicability_criteria['sales_weeks'] = self\
.set_sales_weeks(applicability_criteria)
applicability_criteria['summary_sales_weeks'] = self\
.set_summary_sales_weeks(applicability_criteria)
applicability_criteria['test_vs_control_compare_summary'] = self\
.set_test_vs_cntrl_comp_sum(applicability_criteria)
return super().control_store_summary(test_type = test_type,
test_control_mapping_stores = test_control_mapping_stores,
business_categories = business_categories,
target_variable = target_variable,
applicability_criteria = applicability_criteria.copy(),
uploaded_file_df = uploaded_file_df)
def manual_upload_control_store_pool(self,control_store_pool_data,
teststores, target_variable,
applicability_criteria, test_type,
business_categories=[],
reqcontrolstores=1,
one_to_one=True, uploaded_file_df=None)\
-> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, str, bool]:
if "channel" not in applicability_criteria:
return pd.DataFrame(),pd.DataFrame(),pd.DataFrame(),pd.DataFrame(),\
"Pass channel in applicability criteria", False
if "team" not in applicability_criteria:
return pd.DataFrame(),pd.DataFrame(),pd.DataFrame(),pd.DataFrame(),\
"Pass team in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
applicability_criteria['sales_weeks'] = self\
.set_sales_weeks(applicability_criteria)
applicability_criteria['test_vs_control_compare'] = self\
.set_test_vs_cntrl_comp(applicability_criteria)
return super().manual_upload_control_store_pool(control_store_pool_data = control_store_pool_data,
teststores = teststores,
test_type= test_type,
target_variable = target_variable,
applicability_criteria = applicability_criteria,
business_categories = business_categories,
reqcontrolstores = reqcontrolstores,
one_to_one = one_to_one,
uploaded_file_df =uploaded_file_df)
def recompute_control_stores(self, test_control_stores, target_variable, business_categories,\
include_cbu_features,reqcontrolstores, applicability_criteria,\
test_type,uploaded_file_df=None)\
-> Tuple[pd.DataFrame,pd.DataFrame,pd.DataFrame,pd.DataFrame,str,bool]:
if "channel" not in applicability_criteria:
return pd.DataFrame(),pd.DataFrame(),pd.DataFrame(),pd.DataFrame(),\
"Pass channel in applicability criteria", False
if "team" not in applicability_criteria:
return pd.DataFrame(),pd.DataFrame(),pd.DataFrame(),pd.DataFrame(),\
"Pass team in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
applicability_criteria['sales_weeks'] = self\
.set_sales_weeks(applicability_criteria)
applicability_criteria['sales_lifts_sales_weeks'] = self\
.set_lift_sales_weeks(applicability_criteria)
applicability_criteria['test_vs_control_compare'] = self\
.set_test_vs_cntrl_comp(applicability_criteria)
return super().recompute_control_stores(test_control_stores = test_control_stores,
target_variable = target_variable,
business_categories = business_categories,
include_cbu_features = include_cbu_features,
reqcontrolstores = reqcontrolstores,
test_type = test_type,
applicability_criteria = applicability_criteria,
uploaded_file_df = uploaded_file_df)
def manual_teststore_controlstore_upload(self, target_variable, test_control_store_data, test_type,
applicability_criteria, uploaded_file_df=None, business_categories = None,)-> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, str, bool]:
if "channel" not in applicability_criteria:
return pd.DataFrame(),pd.DataFrame(),pd.DataFrame(),\
"Pass channel in applicability criteria", False
if "team" not in applicability_criteria:
return pd.DataFrame(),pd.DataFrame(),pd.DataFrame(),\
"Pass team in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
applicability_criteria['sales_weeks'] = self\
.set_sales_weeks(applicability_criteria)
applicability_criteria['sales_lifts_sales_weeks'] = self\
.set_lift_sales_weeks(applicability_criteria)
applicability_criteria['summary_sales_weeks'] = self.set_summary_sales_weeks(applicability_criteria)
applicability_criteria['test_vs_control_compare'] = self\
.set_test_vs_cntrl_comp(applicability_criteria)
control_test_pairs, stores_master_df, annualrsvlifts, message, success_flag = super().manual_teststore_controlstore_upload(target_variable = target_variable,
test_control_store_data = test_control_store_data,
test_type = test_type,
applicability_criteria = applicability_criteria,
uploaded_file_df=uploaded_file_df, business_categories = business_categories)
if success_flag is True:
control_test_pairs = control_test_pairs.merge(stores_master_df[[self._storemstrmapping['partner_id'], 'TDLinx_No', 'StoreNumber']].rename(columns={self._storemstrmapping['partner_id']:'Test_store_'+self._storemstrmapping['partner_id'], 'StoreNumber':'Test_store_StoreNumber', 'TDLinx_No':'Test_store_TDLinx_No'}), on='Test_store_'+self._storemstrmapping['partner_id'])
control_test_pairs = control_test_pairs.merge(stores_master_df[[self._storemstrmapping['partner_id'], 'TDLinx_No', 'StoreNumber']].rename(columns={'StoreNumber':'Control_store_StoreNumber', 'TDLinx_No':'Control_store_TDLinx_No'}), on=self._storemstrmapping['partner_id'])
return control_test_pairs, stores_master_df, annualrsvlifts, message, success_flag
class FastToolMsrmtUS(FastToolMeasurement):
def __init__(self, fast_tool_plan, config, region, test_id):
super().__init__(fast_tool_plan = fast_tool_plan, config = config, region = region, test_id = test_id)
def get_test_vs_control_linegraph(self, teststores, target_variable, test_type, applicability_criteria, weeks_before=None, weeks_after=None,control_stores_sales_method='Approach1', business_categories=None) -> Tuple[dict, str, bool]:
if "channel" not in applicability_criteria:
return dict(), "Pass channel in applicability criteria", False
if "businessType" not in applicability_criteria:
return dict(), "Pass channel in applicability criteria", False
if "team" not in applicability_criteria:
return dict(), "Pass team in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
return super().get_test_vs_control_linegraph(teststores = teststores,
target_variable = target_variable,
test_type = test_type,
applicability_criteria = applicability_criteria,
control_stores_sales_method = control_stores_sales_method,
business_categories = business_categories,
weeks_after=weeks_after,
weeks_before=weeks_before)
def _get_cost(self, test_master_table, population_store_weekly_sales=None, target_variable=None)->float:
rsv_estimate = population_store_weekly_sales[target_variable].sum()
break_even_lift = float(test_master_table['break_even_lift'].values[0])
return self._fast_tool_plan.get_cost(rsv_estimate=rsv_estimate, breakevenliftpercentage=break_even_lift)
def get_target_variable_analysis_results(self, teststores, target_variable, test_type, applicability_criteria,control_stores_sales_method='Approach1',
outlier_column=None, business_categories=None, uploaded_file_df=None)-> Tuple[float, str, pd.DataFrame, pd.DataFrame, dict, dict, str, bool]:
if "channel" not in applicability_criteria:
return 0.0, "", pd.DataFrame(), pd.DataFrame(), dict(), dict(), "Pass channel in applicability criteria", False
if "businessType" not in applicability_criteria:
return 0.0, "", pd.DataFrame(), pd.DataFrame(), dict(), dict(), "Pass channel in applicability criteria", False
if "team" not in applicability_criteria:
return 0.0, "", pd.DataFrame(), pd.DataFrame(), dict(), dict(), "Pass team in applicability criteria", False
applicability_criteria['sales_table'] = self._config["tables"]\
["weekly_mstr"]\
[applicability_criteria['channel']]
applicability_criteria['store_table'] = self._config["tables"]["store_mstr"]
applicability_criteria['product_table'] = self._config["tables"]["weekly_data_table"]
return super().get_target_variable_analysis_results(teststores = teststores,
target_variable = target_variable,
test_type = test_type,
applicability_criteria = applicability_criteria,
control_stores_sales_method=control_stores_sales_method,
outlier_column=outlier_column,
business_categories=business_categories,
uploaded_file_df=uploaded_file_df) | /rtm_fast-0.0.2-py3-none-any.whl/rtm_fast/ds/regions/US/ds_code.py | 0.847337 | 0.374733 | ds_code.py | pypi |
from typing import Tuple
import pandas as pd
from ds.library.sql.sales_master import Sales
from ds.library.sql.stores_master import Stores
class FastSalesUS(Sales):
"""
A class to represent features of TargetEstimate.
...
Attributes
----------
config : configuration present in config_data either for a region or overall
region: key present in config
sales_object : Object of sales class
store_object : Object of store class
Methods
-------
data_extract(): to calculate required sales/volume and get the store details in population
calculate_rsv(): calculate the RSV/POS/Sales/Volume value required
and estimate number of stores in population
get_breakeven_lift(): estimates the breakeven lift% value
get_cost(): estimates the cost of implementing RTM activity if breakeven lift is known
"""
def __init__(self, config, test_id) -> None:
super().__init__(config=config, test_id=test_id)
self._cbu_sales = pd.DataFrame()
self._overall = pd.DataFrame()
def get_cbu_sales(self, stores, applicability_criteria, weeks):
"""
About function
--------------
This function interacts with weekly sales table and calculates the
sales of selected products (total sales of products) at store in the given weeks
Parameters
----------
stores: list of store identifier values for which sales need to be calculated
applicability_criteria: key-value pairs of the filters (product and store) selection
made in the tool week: list of week values in which sales needs to be calculated
Return values
-------
dataframe with weekly sales of the stores
"""
if "businessType" not in applicability_criteria:
raise Exception("businessType not passed to applicability criteria")
applicability_criteria['consumption_value'].append("")
applicability_criteria['seasonal_value'].append("")
applicability_criteria['category_value'].append("")
applicability_criteria['brands_value'].append("")
applicability_criteria['pack_value'].append("")
applicability_criteria['store_value'] = tuple(stores)
applicability_criteria['week_value'] = tuple(weeks)
applicability_criteria['category_value'] = tuple(applicability_criteria['category_value'])
applicability_criteria['brands_value'] = tuple(applicability_criteria['brands_value'])
applicability_criteria['pack_value'] = tuple(applicability_criteria['pack_value'])
applicability_criteria['consumption_value'] = tuple(
applicability_criteria['consumption_value']
)
applicability_criteria['seasonal_value'] = tuple(applicability_criteria['seasonal_value'])
if applicability_criteria['businessType'] == "MW":
query = """SELECT Week, StoreId,ROUND(SUM(POS),2) as POS,ROUND(SUM(Volume),2) as Volume,
'{channel}' as StoreClassification
FROM {sales_table} as sales JOIN {product_table} as products ON products.UPC=sales.UPC
AND products.StoreClassification='{channel}'
AND Category IN {category_value}
AND brands IN {brands_value}
AND PackType IN {pack_value}
AND Consumption IN {consumption_value}
AND seasonalPackaging IN {seasonal_value}
AND IsMars = 1 AND Week IN {week_value}
AND StoreId IN {store_value}
GROUP BY Week, StoreId"""
else:
query = """SELECT Week, StoreId,ROUND(SUM(POS),2) as POS,ROUND(SUM(Volume),2) as Volume,
'{channel}' as StoreClassification
FROM {sales_table} as sales JOIN {product_table} as products ON products.UPC=sales.UPC
AND products.StoreClassification='{channel}'
AND Category IN {category_value}
AND brands IN {brands_value}
AND PackType IN {pack_value}
AND Consumption IN {consumption_value}
AND seasonalPackaging IN {seasonal_value}
AND Week IN {week_value}
AND StoreId IN {store_value}
GROUP BY Week, StoreId"""
self._cbu_sales = self.execute_sql_query(query, applicability_criteria)
return self._cbu_sales
def get_overall_sales(self, stores, weeks, applicability_criteria=None):
"""
About function
--------------
This function interacts with weekly sales table and calculates the overall sales
(doesnt consider product attributes) at store in the given weeks
Parameters
----------
stores: list of store identifier values for which sales need to be calculated
applicability_criteria: key-value pairs of the filters (product and store) selection
made in the tool
week: list of week values in which sales needs to be calculated
Return values
-------
dataframe with weekly sales of the stores
"""
if "businessType" not in applicability_criteria:
raise Exception("businessType not passed to applicability criteria")
applicability_criteria['store_value'] = tuple(stores)
applicability_criteria['week_value'] = tuple(weeks)
if applicability_criteria['businessType'] == "MW":
query = """SELECT Week, StoreId, ROUND(SUM(POS),2) as POS,
ROUND(SUM(Volume),2) as Volume,'{channel}' as StoreClassification
FROM {sales_table} as sales JOIN {product_table} as products ON products.UPC=sales.UPC
AND products.StoreClassification='{channel}'
AND IsMars = 1 AND Week IN {week_value}
AND StoreId IN {store_value}
GROUP BY Week, StoreId"""
else:
query = """SELECT Week, StoreId, ROUND(SUM(POS), 2) as POS,
ROUND(SUM(Volume), 2) as Volume, '{channel}' as StoreClassification
FROM {sales_table} as sales
WHERE Week IN {week_value} AND StoreId IN {store_value}
GROUP BY Week, StoreId"""
self._overall = self.execute_sql_query(query, applicability_criteria)
return self._overall
def get_total_weekly_target_data(self, test_master_df, stores_list, sales_week,target_variable,\
applicability_criteria, test_type, consideryearweeks=None) \
-> Tuple[pd.DataFrame, list, str, bool]:
"""
About function
--------------
This function gets the overall sales in the "sales week" time period or
weeks to be considered
Parameters
----------
prewindow_end: date on which preperiod ends,
stores_list: list of stores for which lift is calculated,
applicability_criteria: the product and stores attributes selected at
tool in dictionary format,
test_type: type of test from the tool selection (Activity, RTM impact, others...),
sales_week: optional parameter is the number of weeks for
which the sales to be calculated and validated ,
consideryearweeks: optional parameter a list of weeks, if want to skip
the calculation of weeks
example
-------
if want to calculate yearly lift then sales_week = 104 (52*2 number of weeks);
sales_lifts_sales_weeks = 52 (number of weeks in a year)
Return values
-------
overall sales values dataframe,
list of weeks on which sales is calculated,
message
success flag
"""
sales_week=self._config['metadata']['test_configuration']\
['sales_weeks']\
[applicability_criteria['channel']]
weekly_target_variables_file, consideryearweeks,\
message, success_flag = super().get_total_weekly_target_data(
target_variable=target_variable,
test_master_df = test_master_df,
stores_list = stores_list,
sales_week = sales_week,
applicability_criteria = applicability_criteria,
test_type = test_type,
consideryearweeks = consideryearweeks)
return weekly_target_variables_file, consideryearweeks, message, success_flag
def get_max_week_config_master(self, applicability_criteria=None):
"""
About function
--------------
This function interacts with config master table in the database and returns
max date maintained in the table
Parameters
----------
stores: list of store identifier values for which sales need to be calculated
applicability_criteria: key-value pairs of the filters (product and store)
selection made in the tool
Return values
-------
max date maintained in the table
"""
config_master = self.execute_sql_query(query="SELECT * FROM {table_name}",
data={"table_name": self._config['tables']\
['config_mstr'],
"channel":applicability_criteria["channel"]})
print(config_master)
print(config_master.columns)
return config_master[config_master['key'] == 'max_date']['week'].values[0]
class FastStoresUS(Stores):
"""
A class to represent features of TargetEstimate.
...
Attributes
----------
config : configuration present in config_data either for a region or overall
region: key present in config
sales_object : Object of sales class
store_object : Object of store class
Methods
-------
data_extract(): to calculate required sales/volume and get the store details in population
calculate_rsv(): calculate the RSV/POS/Sales/Volume value required
and estimate number of stores in population
get_breakeven_lift(): estimates the breakeven lift% value
get_cost(): estimates the cost of implementing RTM activity if breakeven lift is known
"""
def __init__(self, config, test_id) -> None:
super().__init__(config=config, test_id=test_id)
def get_filtered_stores(self, applicability_criteria):
"""
About function
--------------
This function needs to be overriden, developer needs to write the query
to get store information from the storemaster table
based on the filter selected in the applicability criteria
Parameters
----------
applicability_criteria: key-value pairs of the filters (product and store)
selection made in the tool
Return values
-------
store attributes dataframe
"""
if "team" not in applicability_criteria:
raise Exception("team is not passed to applicability criteria")
# """Returns the store information based on applicability criteria filters"""
applicability_criteria['regions_value'].append("")
applicability_criteria['territory_value'].append("")
applicability_criteria['store_name_value'].append("")
applicability_criteria['segments_value'].append("")
applicability_criteria['store_name_value'] = \
tuple(applicability_criteria['store_name_value'])
applicability_criteria['segments_value'] = \
tuple(applicability_criteria['segments_value'])
applicability_criteria['regions_value'] = \
tuple(applicability_criteria['regions_value'])
applicability_criteria['territory_value'] = \
tuple(applicability_criteria['territory_value'])
if applicability_criteria['team'] == 'RTM':
query_store_filter = """SELECT *
FROM {store_table}
WHERE StoreClassification = '{channel}'
AND IsCovered = 1
AND RegionName IN {regions_value}
AND TerritoryName IN {territory_value}
AND StoreName IN {store_name_value}
AND MasterChain IN {segments_value}"""
else:
query_store_filter = """SELECT * FROM {store_table}
WHERE StoreClassification = '{channel}'
AND RegionName IN {regions_value}
AND TerritoryName IN {territory_value}
AND StoreName IN {store_name_value}
AND MasterChain IN {segments_value}"""
return self.execute_sql_query(query_store_filter, applicability_criteria)
def get_uploaded_stores_info(self, stores_list, applicability_criteria):
"""
About function
--------------
This function needs to be overriden, developer needs to write the query to get
store information from the storemaster table
based on the list of the store identifier (config[store_mstr_columns][partner_id])
value present in stores_list
Parameters
----------
applicability_criteria: key-value pairs of the filters (product and store)
selection made in the tool
Return values
-------
store attributes dataframe
"""
applicability_criteria['store_value'] = tuple(stores_list)
query_uploaded_population = """SELECT *
FROM {store_table}
WHERE StoreId IN {store_value}
AND StoreClassification = '{channel}'"""
return self.execute_sql_query(query_uploaded_population, applicability_criteria)
def check_details_uploaded_stores(self, stores_list, applicability_criteria):
"""
This function based on the selection (StoreId, StoreNumber, TDLinxNo) will check
uploaded values with appropriate columns
"""
if stores_list is None:
stores_list = []
stores_list.append(-1)
applicability_criteria['store_value'] = tuple(stores_list)
query = "SELECT * FROM {store_table} WHERE {store_identifier_attribute} IN {store_value}"
return self.execute_sql_query(query, stores=stores_list,data= applicability_criteria)
def validate_uploaded_presence_store_master(self, uploaded_stores, \
store_identifier, applicability_criteria)->Tuple[pd.DataFrame, str, bool]:
if 'store_identifier_attribute' not in applicability_criteria:
return pd.DataFrame(),\
"Please pass the Store identifier attribute to the function", False
store_idn_att = applicability_criteria['store_identifier_attribute']
if (applicability_criteria['store_identifier_attribute'] == 'StoreNumber') \
& (('segments_value' not in applicability_criteria) \
or (len(applicability_criteria['segments_value']) == 0)):
return pd.DataFrame(), "Please pass the MasterChain info to the upload function", False
stores_list = list(uploaded_stores[store_identifier].unique())
upld_str_dtls = self.check_details_uploaded_stores(stores_list=stores_list[:],
applicability_criteria=applicability_criteria)
str_iden_clmn = self._storemstrmapping['partner_id']
banner_clmn = self._config["store_mstr_columns"]["banner"]
covered_clmn = self._storemstrmapping["is_covered"]
if upld_str_dtls.shape[0] == 0:
return upld_str_dtls, "All uploaded stores are not present in Store Master!!", False
stores_list = list(set(stores_list) - set([-1]))
message = "Out of {uploaded_stores} uploaded stores, {stores_present} in store master"\
.format(uploaded_stores=len(stores_list),
stores_present=len(set(upld_str_dtls[store_idn_att].unique())- set([-1])))
# """If different channel stores are uploaded"""
if applicability_criteria["channel"] not in upld_str_dtls[banner_clmn].unique():
diff_banner_str = upld_str_dtls[upld_str_dtls[banner_clmn]!=applicability_criteria["channel"]]
return pd.DataFrame(), "Please remove!! Stores belonging to other channels {} \n"\
.format(diff_banner_str[store_idn_att].values.tolist()), False
check_details = upld_str_dtls[upld_str_dtls[banner_clmn] == applicability_criteria["channel"]]
message = "Out of {uploaded_stores} valid stores, {stores_present} in selected channel"\
.format(uploaded_stores = len(set(upld_str_dtls[store_idn_att].unique())- set([-1])),
stores_present=check_details[store_idn_att].nunique()) + '\n' +message
upld_str_dtls = check_details
# """Fetch details of the uploaded stores"""
if (applicability_criteria["team"] == "RTM") & (0 in upld_str_dtls[covered_clmn].unique()):
return pd.DataFrame(), "Please remove!! NON-RTM stores found. List of {} that belongs to NON-RTM: {}\n"\
.format(store_idn_att,
upld_str_dtls[upld_str_dtls[covered_clmn]==0]\
[store_idn_att].unique()) + message, False
if applicability_criteria["team"] == "RTM":
check_details = upld_str_dtls[upld_str_dtls[covered_clmn] == 1]
upld_str_dtls = check_details
# """if store number are uploaded then no storenumber is mapped to multiple storeIds"""
if store_identifier == 'StoreNumber':
map_store_id = upld_str_dtls.groupby('StoreNumber').aggregate({str_iden_clmn:'nunique'}).reset_index()
if map_store_id[map_store_id[str_iden_clmn]>1].shape[0]>0:
return pd.DataFrame(), \
"Please remove!!Some Store Numbers({}) are mapped to multiple StoreIds\n"\
.format(list(map_store_id[map_store_id[str_iden_clmn]>1]\
['StoreNumber'].unique())) + message, False
return upld_str_dtls, message, True | /rtm_fast-0.0.2-py3-none-any.whl/rtm_fast/ds/regions/US/common_utility.py | 0.884539 | 0.525308 | common_utility.py | pypi |
config_uk = {
"Constructors": {
"Sales": "FastSalesUK",
"Stores": "FastStoresUK",
"Tool": "FastToolUK",
},
"feature_parameter": {
"is_product_present": 1,
"active_store_filter_type": "test",
"test_variable_dates": 0,
"control_store_buffer": 1.2,
"data_continuity_check": 1
},
"store_mstr_columns": {
"banner": "Customer_Group",
"segment": "Customer_Chain",
"territory": "Territory",
"storegrid": "Sub_Channel",
"partner_id": "Customer_Number",
"baycount": "",
"partner_id_backup": "Customer_Number",
"FSR": "Sales_Representative",
},
"Test_store_Partner_ID_backup": "Test_store_Customer_Number",
"heading_names": {
"banner": "Banner",
"segment": "Segment",
"territory": "Territory",
"storegrid": "Overall Segment",
"partner_id": "Customer Name",
"store_segment": "Store Segment",
"currency": "£",
},
"weekly_target_variable": {
"banner": "Customer_Group",
"banner_code": "Store_Number",
"partner_id": "Customer_Number",
"rsv": "RSV",
"volume": "Volume",
"week_no": "Week Number",
"year": "Year",
"segment": "Customer_Chain",
"week": "Week",
"overall_segment": "Sub_Channel",
"territory": "Territory",
"RSV": "RSV",
"cbulvl1": "CBU_Lvl1",
"packformat": "Pack_Format",
},
"tables": {
"control_store_mstr": "[FAST_UK].[Tl_Controlstore_Mstr]",
"measurement": "[FAST_UK].[Tl_Measurement_Tbl]",
"record_mstr": "[FAST_UK].[Tl_RecordMstr]",
"store_mstr": "[FAST_UK].[Tl_StoreMstr]",
"test_mstr": "[FAST_UK].[Tl_TestMstr]",
"test_store_map": "[FAST_UK].[Tl_Teststore_map]",
"weekly_mstr": "[FAST_UK].[Tl_Weekly_target_mst]",
"upload_stores": "[FAST_UK].[Tl_Upload_store_population]",
"config_mstr": "[FAST_UK].[Tl_ConfigMstr]",
"visit_mstr": "[FAST_UK].[Tl_Visit_Data]",
'pack_mstr':"[FAST_UK].[Tl_Pack_Format]",
'cbu_mstr':"[FAST_UK].[Tl_CBU_Lvl1]"
},
"metadata": {
"test_configuration": {
"sales_weeks": 104,
"sales_lifts_sales_weeks": 52,
"sales_diff_percentage": 10,
"power_of_test": 0.7,
"min_teststores": 30,
"rawconvfactors": {
"CO-OP": 0.18,
"ASDA": 0.25,
"TESCO": 0.21,
"POUNDLAND": 0.15,
"SAINSBURY": 0.18,
"MORRISONS": 0.21,
},
},
"test_planning": {
"default_stratification": ["Customer_Group"],
"test_vs_population_compare": [
"total_checkout_locations",
"Store_Size_Sq_Ft",
"Manned_Checkouts",
],
"test_vs_population_compare_summary": [
"Store_Size_Sq_Ft",
"Manned_Checkouts",
],
"sampling_iterations": 10,
"test_vs_population_pvalue": 0.8,
"test_vs_control_compare": ["Customer_Group", "Customer_Chain"],
"test_vs_control_compare_summary": [],
"business_category_specific_compare": [],
"business_categories_count": 0,
"test_vs_control_pvalue": 0.8,
"test_vs_control_similaritymeasure_difference_threshold": 0.05,
"summary_sales_weeks": 52,
"validate_datapoints_multiplier": 2,
"teststores_columns": [
"Customer_Number",
"Sales_Representative",
"Customer_Group",
"Territory",
"Store_Size_Sq_Ft",
"Customer_Chain",
],
"upload_stores_identifier":"Customer_Number",
"upload_teststores_identifier":'Test_store_Customer_Number',
"upload_controlstores_identifier":'Control_store_Customer_Number',
"user_populationstores_columns": {
"Customer_Number": "int64"
},
"user_teststores_columns": {
"Test_store_Customer_Number": "int64"
},
"control_storespool_columns": {
"Control_store_Customer_Number": "int64"
},
"confidence_level": 0.85,
"similarity_measure": 0.7,
"correlation": 0.4,
"margin_of_error": 0.04,
"power_of_test": 0.7,
"power_values": [60, 65, 70, 75, 80, 85, 90, 95],
"user_testcontrolstores_columns": {
"Test_store_Customer_Number": "int64",
"Control_store_Customer_Number": "int64",
},
"control_storespool_columns": {
"Control_store_Customer_Number": "int64"
},
},
"test_measurement": {
"probability_thresholds": [0.60, 0.85, 1],
"testmeasurement_columns": [
"Customer_Number",
"Sales_Representative",
"Customer_Group",
"Territory",
"Sub_Channel",
"Store_Size_Sq_Ft",
"Customer_Chain",
],
"user_customgroup_columns": {
"Test_store_Customer_Number": "int64",
"Group": "object",
},
},
},
"filePath": {
"TestStore": {
"file_name": "/ds/regions/UK/upload_templates/Upload_Teststores_Template_UK.xlsx"
},
"controlStore": {
"file_name": "/ds/regions/UK/upload_templates/Test_Control_Pairs_Upload_Template_UK.xlsx"
},
"controlStore_Pool": {
"file_name": "/ds/regions/UK/upload_templates/Control_Pairs_Pool_Upload_Template_UK.xlsx"
},
"RSV_STORES": {
"file_name": "/ds/regions/UK/upload_templates/Upload_Population_Template_UK.xlsx"
},
},
"excel_header": {
"test_store": "Test_store_Customer_Number",
"control_store": "Control_store_Customer_Number",
},
"report_generate": {
"common": {"region_name": "UNITED KINGDOM", "flag_name": "flag_UK.png"},
"control_compare_variable": ["Touchability_Score", "Store_Size_Sq_Ft"],
"test_compare_variable": ["Touchability_Score", "Store_Size_Sq_Ft"],
"store_feature": ["Customer Group"],
"row_span": 2,
"matching_criteria": [
"Customer_Group",
"Territory",
"Touchability_Score",
"Store_Size_Sq_Ft",
],
},
"result_grid_excel": {
"header_data": ["Week", "Category", "Metric", "Variable", "Value"],
"category_format": {
"Confectionary_Combination_1": "category1",
"Confectionary_Combination_2": "category2",
"Confectionary_Combination_3": "category3",
"Confectionary_Combination_4": "category4",
},
},
} | /rtm_fast-0.0.2-py3-none-any.whl/rtm_fast/ds/regions/UK/config_data.py | 0.408159 | 0.396535 | config_data.py | pypi |
from datetime import datetime
from typing import Tuple
import pandas as pd
from ds.library.sql.sales_master import Sales
from ds.library.sql.stores_master import Stores
class FastStoresUK(Stores):
"""
A class to represent features of TargetEstimate.
...
Attributes
----------
config : configuration present in config_data either for a region or overall
region: key present in config
sales_object : Object of sales class
store_object : Object of store class
Methods
-------
data_extract(): to calculate required sales/volume and get the store details in population
calculate_rsv(): calculate the RSV/POS/Sales/Volume value required
and estimate number of stores in population
get_breakeven_lift(): estimates the breakeven lift% value
get_cost(): estimates the cost of implementing RTM activity if breakeven lift is known
"""
def __init__(self, config, test_id) -> None:
self._config = config
super().__init__(
config=self._config, test_id=test_id
)
def filter_population(self, applicability_criteria, \
storelist=None, uploaded_file_df=None) -> pd.DataFrame:
if storelist is None:
storelist = []
stores_master_df = super().filter_population(
storelist=storelist,
applicability_criteria=applicability_criteria,
uploaded_file_df=uploaded_file_df
)
if applicability_criteria["test_type"] != "RTM Impact Test":
stores_master_df = stores_master_df[
stores_master_df['Customer_Status'].isin(applicability_criteria['Customer_Status'])
]
return stores_master_df
def get_filtered_stores(self, applicability_criteria) -> pd.DataFrame:
applicability_criteria["banners"].append("")
applicability_criteria["segments"].append("")
applicability_criteria["store_segments"].append("")
applicability_criteria["territories"].append("")
applicability_criteria["Customer_Status"].append("")
filter_store_query = """Select * from {table}
where Customer_Group IN {banners}
and Sub_Channel IN {segments}
and Customer_Chain IN {store_segments}
and Territory IN {territories}
and Customer_Status IN {Customer_Status}"""
return self.execute_sql_query(filter_store_query, data={
"table": self._config['tables']['store_mstr'],
"banners": tuple(applicability_criteria["banners"]),
"segments": tuple(applicability_criteria["segments"]),
"store_segments": tuple(applicability_criteria["store_segments"]),
"territories": tuple(applicability_criteria["territories"]),
"Customer_Status": tuple(applicability_criteria["Customer_Status"])
})
def get_uploaded_stores_info(self, stores_list, applicability_criteria) -> pd.DataFrame:
"""Returns the store information of uploaded population"""
# applicability_criteria["store_value"] = tuple(stores_list)
query_uploaded_population = "SELECT * FROM {table} WHERE Customer_Number IN {store_value}"
# table=applicability_criteria["store_table"]
return self.execute_sql_query(query_uploaded_population, data={
"table": self._config['tables']['store_mstr'],
"store_value": tuple(stores_list)})
class FastSalesUK(Sales):
"""
A class to represent features of TargetEstimate.
...
Attributes
----------
config : configuration present in config_data either for a region or overall
region: key present in config
sales_object : Object of sales class
store_object : Object of store class
Methods
-------
data_extract(): to calculate required sales/volume and get the store details in population
calculate_rsv(): calculate the RSV/POS/Sales/Volume value required
and estimate number of stores in population
get_breakeven_lift(): estimates the breakeven lift% value
get_cost(): estimates the cost of implementing RTM activity if breakeven lift is known
"""
def __init__(self, config, test_id) -> None:
self._config = config
super().__init__(
config=config, test_id=test_id
)
def get_cbu_sales(self, stores, applicability_criteria, weeks) -> pd.DataFrame:
cbu_lvl_query = (
"Select CBU_Lvl1_Mapping from {table} where CBU_Lvl1 IN {cat_list}"
)
data = {
"cat_list": tuple(applicability_criteria["cbu_lvl1_categories"]),
"table": self._config["tables"]["cbu_mstr"],
}
cbu_lvl1_mapping_list = pd.DataFrame(self.execute_sql_query(cbu_lvl_query, data=data))
cbu_lvl1_mapping_list = cbu_lvl1_mapping_list["CBU_Lvl1_Mapping"].to_list(
)
pack_format_query = (
"Select Pack_Format_Mapping from {table} where Pack_Format IN {cat_list}"
)
data = {
"cat_list": tuple(applicability_criteria["pack_lvl_categories"]),
"table": self._config["tables"]["pack_mstr"],
}
pack_format_mapping_list = pd.DataFrame(
self.execute_sql_query(pack_format_query, data=data))
pack_format_mapping_list = pack_format_mapping_list["Pack_Format_Mapping"].to_list()
pack_format_mapping_list.append(-1)
cbu_lvl1_mapping_list.append(-1)
customer_list = stores
sqlquery = """SELECT Customer_Group,Customer_Number, Week, SUM(RSV) as RSV, SUM(Volume) as Volume
FROM {table}
WHERE Week IN {weeks_val} AND CBU_Lvl1_Mapping IN {cbu_lvl_val}
AND Pack_Format_Mapping IN {pack_format_val} AND Customer_Number IN {stores_val}
GROUP By Customer_Group, Customer_Number, Week """
data = {
"table": self._config["tables"]["weekly_mstr"],
"weeks_val": tuple(weeks),
"cbu_lvl_val": tuple(cbu_lvl1_mapping_list),
"pack_format_val": tuple(pack_format_mapping_list),
"stores_val": tuple(customer_list),
}
return pd.DataFrame(self.execute_sql_query(sqlquery, data=data))
def get_overall_sales(self, stores, applicability_criteria, weeks):
sqlquery = """SELECT Customer_Group,Customer_Number, Week, SUM(RSV) as RSV,
SUM(Volume) as Volume,imputed
FROM {weekly_mstr_table}
WHERE Week IN {weeks_val} AND Customer_Number IN {stores_val}
GROUP By Customer_Group, Customer_Number, Week,imputed"""
return pd.DataFrame(self.execute_sql_query(sqlquery,
data={"weekly_mstr_table": self._config['tables']['weekly_mstr'],
"weeks_val": tuple(weeks), "stores_val": tuple(stores)}))
def get_valid_weekly_target_data(self, stores, applicability_criteria,\
target_variable, test_master_df,test_type, sales_week,\
consideryearweeks=None) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, list, str, bool]:
prewindow_start = datetime.strptime(test_master_df["pre_start"]\
.values[0], '%Y-%m-%d').date()
prewindow_end = datetime.strptime(test_master_df["pre_end"]\
.values[0], '%Y-%m-%d').date()
postwindow_start = datetime.strptime(test_master_df["testwin_start"]\
.values[0], '%Y-%m-%d').date()
postwindow_end = datetime.strptime(test_master_df["testwin_end"]\
.values[0], '%Y-%m-%d').date()
if test_type == "RTM Impact Test":
pre_window_yearweeks = self.find_weeks(prewindow_start,
prewindow_end)
post_window_yearweeks = self.find_weeks(postwindow_start,
postwindow_end)
max_week_data_available = self.get_max_week_config_master(
applicability_criteria)
post_window_yearweeks = [
i for i in post_window_yearweeks if i <= int(max_week_data_available)]
if len(applicability_criteria['banners']) == 1 and\
'POUNDLAND'.upper() in map(str.upper,
applicability_criteria['banners']):
if 201739 in pre_window_yearweeks:
pre_window_yearweeks.remove(201739)
all_weeks = set(pre_window_yearweeks).union(post_window_yearweeks)
weekly_merged_level_sales, weekly_overal_level_sales, weekly_cbu_level_sales,consideryearweeks, \
message, success_flag = super().get_valid_weekly_target_data(
stores = stores,
applicability_criteria=applicability_criteria,
target_variable=target_variable,
test_master_df = test_master_df,
test_type = test_type,
sales_week = sales_week,
consideryearweeks = all_weeks)
return weekly_merged_level_sales, weekly_overal_level_sales, weekly_cbu_level_sales,consideryearweeks, \
message, success_flag
else:
yearweeks = self.find_last104_weeks_from_baseline_end(prewindow_end)
if len(applicability_criteria['banners']) == 1 \
and 'POUNDLAND'.upper() in map(str.upper, applicability_criteria['banners']):
if 201739 in yearweeks:
yearweeks.remove(201739)
yearweeks.sort(reverse=True)
consideryearweeks = yearweeks[:sales_week]
consideryearweeks.sort(reverse=False)
weekly_merged_level_sales, weekly_overal_level_sales, weekly_cbu_level_sales,_, \
message, success_flag = super().get_valid_weekly_target_data(
stores = stores,
applicability_criteria=applicability_criteria,
target_variable=target_variable,
test_master_df = test_master_df,
test_type = test_type,
sales_week = sales_week,
consideryearweeks = consideryearweeks)
return weekly_merged_level_sales, weekly_overal_level_sales, weekly_cbu_level_sales,consideryearweeks, \
message, success_flag
def get_annual_rsv_lifts(self, target_variable, test_master_df, stores,\
applicability_criteria, test_type) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, list, str, bool]:
"""
get_annual_rsv_lifts
"""
# Getting the the target varaibles file
sales_week = self.get_sales_weeks(applicability_criteria)
sales_lifts_sales_weeks = self.get_lift_sales_weeks(applicability_criteria)
weekly_ovrl_cbu_sales,weekly_overall_sales, weekly_cbu_sales, consideryearweeks,\
message, success_flag = self.get_valid_weekly_target_data(
stores = stores,
applicability_criteria=applicability_criteria,
target_variable=target_variable,
test_master_df = test_master_df,
test_type = test_type,
sales_week = sales_week)
if success_flag is False:
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), consideryearweeks, message, False
if test_type == "RTM Impact Test":
weeks1 = consideryearweeks[0]
weeks2 = consideryearweeks[1]
else:
weeks1 = consideryearweeks[:sales_lifts_sales_weeks]
weeks2 = consideryearweeks[sales_lifts_sales_weeks:]
annualrsvdatamerged, _, success_flag = self\
._lift_calculation_util(weekly_sales = weekly_ovrl_cbu_sales,
first_half_weeks=weeks1,
second_half_weeks=weeks2,
target_variable=target_variable)
return annualrsvdatamerged, weekly_ovrl_cbu_sales,weekly_overall_sales, weekly_cbu_sales,\
consideryearweeks, "Annual Lift calculated Successfully!", True | /rtm_fast-0.0.2-py3-none-any.whl/rtm_fast/ds/regions/UK/common_utility.py | 0.848549 | 0.337633 | common_utility.py | pypi |
import numpy as np
from datetime import datetime
from scipy.sparse import issparse
def gower_matrix(data_x, data_y=None, weight=None, cat_features=None):
# function checks
X = data_x
if data_y is None: Y = data_x
else: Y = data_y
if not isinstance(X, np.ndarray):
if not np.array_equal(X.columns, Y.columns): raise TypeError("X and Y must have same columns!")
else:
if not X.shape[1] == Y.shape[1]: raise TypeError("X and Y must have same y-dim!")
if issparse(X) or issparse(Y): raise TypeError("Sparse matrices are not supported!")
x_n_rows, x_n_cols = X.shape
y_n_rows, y_n_cols = Y.shape
if cat_features is None:
if not isinstance(X, np.ndarray):
is_number = np.vectorize(lambda x: not np.issubdtype(x, np.number))
cat_features = is_number(X.dtypes)
else:
cat_features = np.zeros(x_n_cols, dtype=bool)
for col in range(x_n_cols):
if not np.issubdtype(type(X[0, col]), np.number):
cat_features[col]=True
else:
cat_features = np.array(cat_features)
# print(cat_features)
if not isinstance(X, np.ndarray): X = np.asarray(X)
if not isinstance(Y, np.ndarray): Y = np.asarray(Y)
Z = np.concatenate((X,Y))
x_index = range(0,x_n_rows)
y_index = range(x_n_rows,x_n_rows+y_n_rows)
Z_num = Z[:,np.logical_not(cat_features)]
num_cols = Z_num.shape[1]
num_ranges = np.zeros(num_cols)
num_max = np.zeros(num_cols)
for col in range(num_cols):
col_array = Z_num[:, col].astype(np.float32)
max = np.nanmax(col_array)
min = np.nanmin(col_array)
if np.isnan(max):
max = 0.0
if np.isnan(min):
min = 0.0
num_max[col] = max
num_ranges[col] = np.absolute((1 - min / max)) if (max != 0) else 0.0
# This is to normalize the numeric values between 0 and 1.
Z_num = np.divide(Z_num ,num_max,out=np.zeros_like(Z_num), where=num_max!=0)
Z_cat = Z[:,cat_features]
if weight is None:
weight = np.ones(Z.shape[1])
#print(weight)
weight_cat=weight[cat_features]
weight_num=weight[np.logical_not(cat_features)]
out = np.zeros((x_n_rows, y_n_rows), dtype=np.float32)
weight_sum = weight.sum()
X_cat = Z_cat[x_index,]
X_num = Z_num[x_index,]
Y_cat = Z_cat[y_index,]
Y_num = Z_num[y_index,]
# print(X_cat,X_num,Y_cat,Y_num)
for i in range(x_n_rows):
j_start= i
if x_n_rows != y_n_rows:
j_start = 0
# call the main function
res = gower_get(X_cat[i,:],
X_num[i,:],
Y_cat[j_start:y_n_rows,:],
Y_num[j_start:y_n_rows,:],
weight_cat,
weight_num,
weight_sum,
cat_features,
num_ranges,
num_max)
#print(res)
out[i,j_start:]=res
if x_n_rows == y_n_rows: out[i:,j_start]=res
return out
def str_to_date(date_str):
return datetime.strptime(str(date_str),'%Y-%m-%d').date()
def gower_get(xi_cat,xi_num,xj_cat,xj_num,feature_weight_cat,
feature_weight_num,feature_weight_sum,categorical_features,
ranges_of_numeric,max_of_numeric ):
# categorical columns
sij_cat = np.where(xi_cat == xj_cat,np.zeros_like(xi_cat),np.ones_like(xi_cat))
sum_cat = np.multiply(feature_weight_cat,sij_cat).sum(axis=1)
# numerical columns
abs_delta=np.absolute(xi_num-xj_num)
sij_num=np.divide(abs_delta, ranges_of_numeric, out=np.zeros_like(abs_delta), where=ranges_of_numeric!=0)
sum_num = np.multiply(feature_weight_num,sij_num).sum(axis=1)
sums= np.add(sum_cat,sum_num)
sum_sij = np.divide(sums,feature_weight_sum)
return sum_sij
def smallest_indices(ary, n):
"""Returns the n largest indices from a numpy array."""
#n += 1
flat = np.nan_to_num(ary.flatten(), nan=999)
indices = np.argpartition(-flat, -n)[-n:]
indices = indices[np.argsort(flat[indices])]
#indices = np.delete(indices,0,0)
values = flat[indices]
return {'index': indices, 'values': values}
def gower_topn(data_x, data_y=None, weight=None, cat_features=None, n = 5):
if data_x.shape[0] >= 2: TypeError("Only support `data_x` of 1 row. ")
dm = gower_matrix(data_x, data_y, weight, cat_features)
return smallest_indices(np.nan_to_num(dm[0], nan=1),n) | /rtm_fast-0.0.2-py3-none-any.whl/rtm_fast/ds/library/ds_common_functions.py | 0.41182 | 0.473049 | ds_common_functions.py | pypi |
from datetime import datetime, timedelta
from typing import Tuple
import pandas as pd
from ds.library.ds_common_functions import str_to_date
from .utility.sql_utility import SqlUtility
class Sales (SqlUtility):
"""
A class to represent features of Sales.
...
Attributes
----------
Methods
-------
"""
def __init__(self, config, test_id=None):
"""
Constructs all the necessary attributes for the rsv estimate object.
Parameters
----------
config : configuration present in config_data either for a region or overall
test_id: the id given to current test
"""
super().__init__(config)
self._test_id = test_id
self._tarvarmapping = self._config["weekly_target_variable"]
self._metadata = self._config["metadata"]["test_configuration"]
def get_sales_weeks(self, applicability_criteria) -> int:
"""
About function
--------------
This function returns the sales weeks set from config of the region
This checks applicability criteria first if there is "sales_weeks" key in it
Parameters
----------
applicability_criteria: key-value pairs of the filters (product and store)
selection made in the tool
example
-------
if want to calculate yearly lift then sales_week = 104 (52*2 number of weeks);
sales_lifts_sales_weeks = 52 (number of weeks in a year)
Return values
-------
integer value of weeks to be considered in sales calculation
"""
if 'sales_weeks' in applicability_criteria:
return applicability_criteria['sales_weeks']
return self._metadata["sales_weeks"]
def get_lift_sales_weeks(self, applicability_criteria) -> int:
"""
About function
--------------
This function returns the number of weeks to be considered for lift calculation
This checks applicability criteria first if there is "sales_lifts_sales_weeks" key in it
Parameters
----------
applicability_criteria: key-value pairs of the filters (product and store)
selection made in the tool
example
-------
if want to calculate yearly lift then sales_week = 104 (52*2 number of weeks);
sales_lifts_sales_weeks = 52 (number of weeks in a year)
Return values
-------
integer value of weeks to be considered for lift calculation
"""
if 'sales_lifts_sales_weeks' in applicability_criteria:
return applicability_criteria['sales_lifts_sales_weeks']
return self._metadata['sales_lifts_sales_weeks']
def get_summary_sales_weeks(self, applicability_criteria):
"""
About function
--------------
It returns the number of weeks for which sales need to be calculated
to calculate the summary.
It checks both applicability criteria and config.
First priority is given to applicability criteria
Parameters
----------
applicability_criteria: key-value pairs of the filters (product and store)
selection made in the tool
Return values
-------
integer value of weeks to be considered for summary calculations
"""
if 'summary_sales_weeks' in applicability_criteria:
return applicability_criteria['summary_sales_weeks']
return self._config['metadata']['test_planning']\
['summary_sales_weeks']
def get_cbu_sales(self, stores, applicability_criteria, weeks) -> pd.DataFrame:
"""
About function
--------------
This function interacts with weekly sales table and calculates the
sales and volume of selected products(total sales of products)
at store in the given weeks
Parameters
----------
stores: list of store identifier values for which sales need to be calculated
applicability_criteria: key-value pairs of the filters (product and store)
selection made in the tool
week: list of week values in which sales needs to be calculated
Return values
-------
dataframe with weekly sales of the stores
"""
print("{} {} {} {}".format(self._test_id,stores, applicability_criteria, weeks))
return pd.DataFrame()
def get_overall_sales(self, stores, weeks, applicability_criteria=None) -> pd.DataFrame:
"""
About function
--------------
This function interacts with weekly sales table and calculates the overall
sales and volume (doesnt consider product attributes) at store in the given weeks
Parameters
----------
stores: list of store identifier values for which sales need to be calculated
applicability_criteria: key-value pairs of the filters (product and store)
selection made in the tool
week: list of week values in which sales needs to be calculated
Return values
-------
dataframe with weekly sales of the stores
"""
print("{} {} {} {} ".format(self._test_id,stores, applicability_criteria, weeks))
return pd.DataFrame()
def get_max_week_config_master(self, applicability_criteria=None) -> str:
"""
About function
--------------
This function interacts with config master table in the database and returns
max date maintained in the config table
Parameters
----------
stores: list of store identifier values for which sales need to be calculated
applicability_criteria: key-value pairs of the filters (product and store)
selection made in the tool
Return values
-------
max date maintained in the table
"""
print(len(applicability_criteria.keys()))
config_master = self.execute_sql_query(query="SELECT * FROM {table_name}",
data={
"table_name": self._config['tables']['config_mstr']
})
return config_master[config_master['key_name'] == 'max_date']['week'].values[0]
def get_valid_weekly_target_data(self, stores, applicability_criteria,\
target_variable, test_master_df,test_type,\
sales_week, consideryearweeks=None) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, list, str, bool]:
"""
About function
--------------
get valid weekly target data calculates the selected product sales and overall sales;
merge them and validate for continuity check.
Set 'is_product_present' in config to 0 in case region doesnt have product attributes.
And set 'data_continuity_check' in config to 0 in case dont want to have
sales continuity checks
Parameters
----------
stores: list of store identifier values for which sales need to be calculated
applicability_criteria: key-value pairs of the filters (product and store)
selection made in the tool
target_variable: Sales column name or volume column name
prewindow_start: starting date of the prewindow
prewindow_end: end date of the prewindow
postwindow_start: starting date of the postwindow
postwindow_end: end date of the postwindow
test_type: to handle any test_type conditions
business_categories: optional parameter
consideryearweeks: optional parameter to skip the week calculation done in the function
Return values
-------
product and overall sales value merged at store and week level, message and success flag
"""
if target_variable is not None:
#Following condition is there in case there are regions that wants validate sales on
# different time period
if consideryearweeks is None:
consideryearweeks = []
if not consideryearweeks:
yearweeks = self.find_last104_weeks_from_baseline_end(
str_to_date(test_master_df['pre_end'].values[0])
)
yearweeks.sort(reverse=True)
consideryearweeks = yearweeks[:sales_week]
consideryearweeks.sort(reverse=False)
stores.append(-1)
print(test_type)
#"""Execute the overall sales query"""
weekly_overal_level_sales = self.get_overall_sales(
stores=stores,
applicability_criteria=applicability_criteria,
weeks=consideryearweeks)
#"""if region supports product attributes"""
if weekly_overal_level_sales.shape[0] == 0:
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), consideryearweeks, "Overall sales for stores not found", False
store_identifier = self._tarvarmapping['partner_id']
weekly_cbu_level_sales = pd.DataFrame()
if self._config["feature_parameter"]["is_product_present"] == 1:
#"""Execute the CBU sales query """
weekly_cbu_level_sales = self.get_cbu_sales(
stores=stores,
applicability_criteria=applicability_criteria,
weeks=consideryearweeks
)
if weekly_cbu_level_sales.shape[0] == 0:
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), consideryearweeks,"CBU sales for stores not found", False
rsv_lbl = self._tarvarmapping["rsv"]
vol_lbl = self._tarvarmapping['volume']
#"""Renaming the CBU query target variables to required columnes"""
weekly_cbu_level_sales.rename(columns={rsv_lbl: "CBU_Category_"+rsv_lbl,
vol_lbl: 'CBU_Category_'+vol_lbl},
inplace=True)
#"""Join the CBU query results and Overall query results"""
weekly_merged_level_sales = weekly_overal_level_sales\
.merge(weekly_cbu_level_sales,
on=[store_identifier,
self._tarvarmapping["banner"],
self._tarvarmapping['week']])
#"""Remove stores with 0 sales value"""
if target_variable == rsv_lbl:
eliminatestores1 = weekly_merged_level_sales[weekly_merged_level_sales[
"CBU_Category_"+target_variable] == 0][store_identifier].unique()
else:
eliminatestores1 = weekly_merged_level_sales[weekly_merged_level_sales[
"CBU_Category_"+target_variable] == 0][store_identifier].unique()
weekly_merged_level_sales = weekly_merged_level_sales[~(
weekly_merged_level_sales[store_identifier]\
.isin(eliminatestores1))]
else:
weekly_merged_level_sales = weekly_overal_level_sales
#"""Data Continuity check"""
if self._config['feature_parameter']['data_continuity_check'] == 1:
weekcountsdf = weekly_merged_level_sales\
.groupby(store_identifier)[self._tarvarmapping["week"]]\
.nunique()\
.reset_index()\
.rename(columns={self._tarvarmapping["week"]: "Week_Count"})
eliminatestores2 = weekcountsdf[weekcountsdf["Week_Count"]
< sales_week][store_identifier].unique()
if len(eliminatestores2) > 0:
#"""Eliminate stores that may not have continuous data"""
weekly_merged_level_sales = weekly_merged_level_sales[~(
weekly_merged_level_sales[store_identifier].isin(eliminatestores2))]
if weekly_merged_level_sales.shape[0] == 0:
return weekly_merged_level_sales, weekly_overal_level_sales,\
weekly_cbu_level_sales, consideryearweeks,\
"No store match with continuity criteria! Modify parameter selected", \
False
if weekly_merged_level_sales.shape[0] == 0:
return weekly_merged_level_sales, weekly_overal_level_sales,\
weekly_cbu_level_sales, consideryearweeks,\
"No common week-store pair found in overall and cbu sales",\
False
print("Unique weeks",
weekly_merged_level_sales[self._tarvarmapping['week']].nunique())
return weekly_merged_level_sales, weekly_overal_level_sales, weekly_cbu_level_sales,consideryearweeks, \
"Valid Sales calculated Successfully!!", True
def get_sales_calculate_rsv(self, stores, target_variable, \
applicability_criteria, consideryearweeks) -> Tuple[pd.DataFrame, list]:
'''
get sales calculate rsv calls the cbu sales/overall sales and
calculate the total sales in the selected time period and population stores.
Set 'is_product_present' in config to 0 in case region doesnt have product attributes.
Parameters:
-----------
stores: list of store identifier values for which sales need to be calculated
applicability_criteria: key-value pairs of the filters (product and store)
selection made in the tool
consideryearweeks: to calculate sales in that period
Returns:
--------
product and overall sales value merged at store
week level,
message,
success flag
'''
if target_variable is not None:
applicability_criteria['week_value'] = tuple(consideryearweeks)
stores.append(-1)
start_time = datetime.now()
if self._config["feature_parameter"]["is_product_present"] == 1:
weekly_overal_level_sales = self.get_cbu_sales(
stores=stores,
applicability_criteria=applicability_criteria,
weeks=consideryearweeks)
else:
weekly_overal_level_sales = self.get_overall_sales(
stores=stores,
applicability_criteria=applicability_criteria,
weeks=consideryearweeks)
print("Time taken (get_sales_calculate_rsv) sales: {} seconds".format(
(datetime.now()-start_time).total_seconds()))
return weekly_overal_level_sales, consideryearweeks
return pd.DataFrame(), []
def _lift_calculation_util(self, weekly_sales, first_half_weeks,
second_half_weeks, target_variable) -> Tuple[pd.DataFrame, str, bool]:
"""
About function
--------------
utility function that calculates the lift(growth in cbu and overall sales) for stores
Parameters
----------
weekly_sales: dataframe that has weekly store sales
first_half_weeks: list of week values present in first half of time frame
second_half_weeks: list of week values present in second half of time frame
target_variable: key-value pairs of the filters (product and store)
selection made in the tool
Return values
-------
dataframe with lift CBU and overall lift values, message and success flag
"""
weekly_rsv_year1 = weekly_sales[weekly_sales[self._tarvarmapping["week"]]\
.isin(first_half_weeks)]
weekly_rsv_year2 = weekly_sales[weekly_sales[self._tarvarmapping["week"]]\
.isin(second_half_weeks)]
weekly_rsv_year1[self._tarvarmapping["year"]] = "Year1"
weekly_rsv_year2[self._tarvarmapping["year"]] = "Year2"
aggdict = {k: sum for k in [self._tarvarmapping["rsv"],
self._tarvarmapping["volume"]]}
groupbycolumns = [self._tarvarmapping["partner_id"],
self._tarvarmapping["banner"],
self._tarvarmapping["year"]]
rsv_lbl =self._tarvarmapping["rsv"]
vol_lbl = self._tarvarmapping["volume"]
if self._config['feature_parameter']["is_product_present"] == 1:
aggdict.update({k: sum for k in [
"CBU_Category_"+rsv_lbl,
"CBU_Category_"+vol_lbl]})
annualrsvdatayear1 = weekly_rsv_year1\
.groupby(groupbycolumns)\
.agg(aggdict)\
.reset_index()
annualrsvdatayear2 = weekly_rsv_year2\
.groupby(groupbycolumns)\
.agg(aggdict)\
.reset_index()
annualrsvdatayear1[rsv_lbl] = annualrsvdatayear1[rsv_lbl]\
.round(2)
annualrsvdatayear2[rsv_lbl] = annualrsvdatayear2[rsv_lbl]\
.round(2)
annualrsvdatayear1[vol_lbl] = annualrsvdatayear1[vol_lbl]\
.round(2)
annualrsvdatayear2[vol_lbl] = annualrsvdatayear2[vol_lbl]\
.round(2)
annualrsvdatayear1colsdict = {rsv_lbl: rsv_lbl +' Year 1',
vol_lbl: vol_lbl + ' Year 1'}
annualrsvdatayear2colsdict = {rsv_lbl: rsv_lbl +' Year 2',
vol_lbl: vol_lbl+' Year 2'}
if self._config['feature_parameter']["is_product_present"] == 1:
cbu_rsv_lbl = "CBU_Category_"+self._tarvarmapping["rsv"]
cbu_vol_lbl = "CBU_Category_"+self._tarvarmapping["volume"]
annualrsvdatayear1[cbu_rsv_lbl] = annualrsvdatayear1[cbu_rsv_lbl].round(2)
annualrsvdatayear2[cbu_rsv_lbl] = annualrsvdatayear2[cbu_rsv_lbl].round(2)
annualrsvdatayear1[cbu_vol_lbl] = annualrsvdatayear1[cbu_vol_lbl].round(2)
annualrsvdatayear2[cbu_vol_lbl] = annualrsvdatayear2[cbu_vol_lbl].round(2)
annualrsvdatayear1colsdict.update({cbu_rsv_lbl: cbu_rsv_lbl + ' Year 1',
cbu_vol_lbl: cbu_vol_lbl + " Year 1"})
annualrsvdatayear2colsdict.update({cbu_rsv_lbl: cbu_rsv_lbl+' Year 2',
cbu_vol_lbl: cbu_vol_lbl+" Year 2"})
annualrsvdatayear1.rename(
columns=annualrsvdatayear1colsdict, inplace=True)
annualrsvdatayear2.rename(
columns=annualrsvdatayear2colsdict, inplace=True)
mergecols = [self._tarvarmapping["partner_id"],
self._tarvarmapping["banner"]]
annualrsvdatamerged = annualrsvdatayear1.merge(
annualrsvdatayear2,
on=mergecols)
annualrsvdatamerged.drop(labels=[self._tarvarmapping["year"]+"_x",
self._tarvarmapping["year"]+"_y"],
axis=1,
inplace=True)
salesfilter = ((annualrsvdatamerged[target_variable+" Year 1"] > 0)
& (annualrsvdatamerged[target_variable+" Year 2"] > 0))
annualrsvdatamerged = annualrsvdatamerged[salesfilter]
trg_var_yr1_lbl =target_variable+" Year 1"
trg_var_yr2_lbl =target_variable+" Year 2"
trg_lift_lbl = target_variable+" Lift"
annualrsvdatamerged[trg_lift_lbl] = (annualrsvdatamerged[trg_var_yr2_lbl] -
annualrsvdatamerged[trg_var_yr1_lbl])\
/annualrsvdatamerged[trg_var_yr1_lbl]
annualrsvdatamerged[target_variable +
" Lift"] = annualrsvdatamerged[target_variable+" Lift"].round(2)
if self._config['feature_parameter']["is_product_present"] == 1:
cbu_year2_sales_lbl = "CBU_Category_"+target_variable+" Year 2"
cbu_year1_sales_lbl = "CBU_Category_"+target_variable+" Year 1"
cbu_lift_lbl = "CBU_Category_"+target_variable+" Lift"
annualrsvdatamerged[cbu_lift_lbl] = (annualrsvdatamerged[cbu_year2_sales_lbl] -
annualrsvdatamerged[cbu_year1_sales_lbl])\
/annualrsvdatamerged[cbu_year1_sales_lbl]
annualrsvdatamerged[cbu_lift_lbl] = \
annualrsvdatamerged[cbu_lift_lbl].round(2)
return annualrsvdatamerged, "Successfully calculated lift values", True
def get_annual_rsv_lifts(self, target_variable, test_master_df, stores, \
applicability_criteria, test_type)\
-> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, list, str, bool]:
"""
About function
--------------
get annual rsv lifts calculate the lift of the stores passed
calls the get_valid_weekly_target_data and divides the sales of stores into two
time periods (division of weeks done based on the value of 'sales_lifts_sales_weeks')
Parameters
----------
target_variable: weekly sales column which is needs to be estimates (sales or volume),
prewindow_start: date from which preperiod starts,
prewindow_end: date on which preperiod ends,
postwindow_start:date from which postperiod starts,
postwindow_end: date on which postperiod ends,
stores: list of stores for which lift is calculated,
applicability_criteria: the product and stores attributes selected at tool in
dictionary format,
test_type: type of test from the tool selection (Activity, RTM impact, others...),
example
-------
if want to calculate yearly lift then sales_week = 104 (52*2 number of weeks);
sales_lifts_sales_weeks = 52 (number of weeks in a year)
Return values
-------
dataframe with lift CBU and overall lift values, list of weeks on which
sales is calculated,message and success flag
"""
# Getting the the target varaibles file
sales_week = self.get_sales_weeks(applicability_criteria)
sales_lifts_sales_weeks = self.get_lift_sales_weeks(applicability_criteria)
weekly_ovrl_cbu_sales,weekly_overall_sales, weekly_cbu_sales, consideryearweeks,\
message, success_flag = self.get_valid_weekly_target_data(
stores=stores,
applicability_criteria=applicability_criteria,
target_variable=target_variable,
test_master_df = test_master_df,
test_type=test_type,
sales_week=sales_week)
weeks1 = consideryearweeks[:sales_lifts_sales_weeks]
weeks2 = consideryearweeks[sales_lifts_sales_weeks:]
if success_flag is False:
return pd.DataFrame(), weekly_ovrl_cbu_sales, consideryearweeks, message, success_flag
annualrsvdatamerged, _, success_flag = self\
._lift_calculation_util(weekly_sales=weekly_ovrl_cbu_sales,
first_half_weeks=weeks1,
second_half_weeks=weeks2,
target_variable=target_variable)
return annualrsvdatamerged, weekly_ovrl_cbu_sales, weekly_overall_sales, weekly_cbu_sales, consideryearweeks,\
"Annual Lift calculated Successfully!", True
def get_total_weekly_target_data(self, test_master_df, stores_list,sales_week, target_variable,
applicability_criteria,test_type,
consideryearweeks = None)\
-> Tuple[pd.DataFrame, list, str, bool]:
"""
About function
--------------
This function gets the overall sales in the "sales week" time
period or weeks to be considered
Parameters
----------
prewindow_end: date on which preperiod ends,
stores_list: list of stores for which lift is calculated,
applicability_criteria: the product and stores attributes
selected at tool in dictionary format,
test_type: type of test from the tool selection
(Activity, RTM impact, others...),
sales_week: optional parameter is the number of weeks for which the sales
to be calculated and validated,
consideryearweeks: optional parameter a list of weeks, if want to skip the
calculation of weeks
example
-------
if want to calculate yearly lift then sales_week = 104 (52*2 number of weeks);
sales_lifts_sales_weeks = 52 (number of weeks in a year)
Return values
-------
overall sales values dataframe,
list of weeks on which sales is calculated,
message
success flag
"""
print(test_type, target_variable)
if consideryearweeks is None:
consideryearweeks = []
if not consideryearweeks:
yearweeks = self.find_last104_weeks_from_baseline_end(
datetime.strptime(test_master_df['pre_end'].values[0], '%Y-%m-%d').date())
yearweeks.sort(reverse=True)
consideryearweeks = yearweeks[:sales_week]
consideryearweeks.sort(reverse=False)
stores_list.append(-1)
weekly_overal_level_sales = self.get_overall_sales(
stores=stores_list,
applicability_criteria=applicability_criteria,
weeks=consideryearweeks)
if weekly_overal_level_sales.shape[0] == 0:
return weekly_overal_level_sales, consideryearweeks, "No Sales found", False
return weekly_overal_level_sales, consideryearweeks, "Sales calculated successfully!", True
def get_weekly_targetvariables_data(self, target_variable, test_master_df,
stores, applicability_criteria) \
-> Tuple[pd.DataFrame, pd.DataFrame, list, list, str, bool]:
"""
About function
--------------
This function fetches the sales in prewindow and postwindow selected
and returns respective sales
Parameters
----------
stores: list of stores for which lift is calculated,
applicability_criteria: the product and stores attributes
selected at tool in dictionary format,
test_master_df: is a dataframe that have records of the current
test from test measurement table
Return values
-------
overall sales values dataframe,
list of weeks on which sales is calculated,
message
success flag
"""
pre_window_weeknumbers = self.find_weeks(
str_to_date(test_master_df["pre_start"].values[0]),
str_to_date(test_master_df["pre_end"].values[0])
)
pre_window_weeknumbers = list(map(int, pre_window_weeknumbers))
post_window_weeknumbers = self.find_weeks(
str_to_date(test_master_df["testwin_start"].values[0]),
str_to_date(test_master_df["testwin_end"].values[0])
)
post_window_weeknumbers = list(map(int, post_window_weeknumbers))
weeks_req = post_window_weeknumbers[:]
weeks_req.extend(pre_window_weeknumbers)
if self._config['feature_parameter']["is_product_present"] == 1:
weekly_target_data = self.get_cbu_sales(
stores=stores[:],
applicability_criteria=applicability_criteria,
weeks=weeks_req)
else:
weekly_target_data = self.get_overall_sales(
stores=stores[:],
applicability_criteria=applicability_criteria,
weeks=weeks_req)
if target_variable not in weekly_target_data.columns.tolist():
return pd.DataFrame(), pd.DataFrame(), [], []
# Select for relevant CBU and weeks
prewindow_filter = (
(weekly_target_data[self._tarvarmapping["week"]].isin(pre_window_weeknumbers)))
postwindow_filter = (
(weekly_target_data[self._tarvarmapping["week"]].isin(post_window_weeknumbers)))
prewindow_target_data = weekly_target_data[prewindow_filter][[
self._tarvarmapping["partner_id"], self._tarvarmapping["banner"],
self._tarvarmapping["week"], self._tarvarmapping['rsv'],
self._tarvarmapping['volume']]]
postwindow_target_data = weekly_target_data[postwindow_filter][[
self._tarvarmapping["partner_id"], self._tarvarmapping["banner"],
self._tarvarmapping["week"], self._tarvarmapping['rsv'],
self._tarvarmapping['volume']]]
return prewindow_target_data, postwindow_target_data, pre_window_weeknumbers, \
post_window_weeknumbers, "Sales Calculated successfully!!", True
def get_pre_post_sales_test_measurement(self, target_variable, test_control_stores_with_time_period,
applicability_criteria, stores_list, weeks_req=None, weeks_before=None, weeks_after = None)->Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, list, str, bool]:
if weeks_req is None:
weeks_req = []
if weeks_before is None:
weeks_before = 0
if weeks_after is None:
weeks_after = 0
columns_req = ['pre_start', 'pre_end', 'testwin_start','testwin_end','Test_store_'+self._tarvarmapping['partner_id'], 'Test_store_'+self._tarvarmapping['banner'],
self._tarvarmapping['partner_id'], self._tarvarmapping['banner']]
if len(set(columns_req).intersection(set(test_control_stores_with_time_period.columns)))<len(columns_req):
return pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), list(), "Either of these columns not passed {}".format(columns_req), False
stores_date_info = test_control_stores_with_time_period.to_dict(orient='records')
for record in stores_date_info:
pre_start = datetime.strptime(record['pre_start'], '%Y-%m-%d').date() \
-timedelta(weeks=weeks_before)
pre_end = datetime.strptime(record['pre_end'], '%Y-%m-%d').date() \
-timedelta(weeks=weeks_before)
testwin_start = datetime.strptime(record['testwin_start'], '%Y-%m-%d').date() \
+timedelta(weeks=weeks_after)
testwin_end = datetime.strptime(record['testwin_end'], '%Y-%m-%d').date() \
+timedelta(weeks=weeks_after)
pre_window_weeknumbers = self.find_weeks(pre_start,
pre_end)
pre_window_weeknumbers = list(map(int, pre_window_weeknumbers))
post_window_weeknumbers = self.find_weeks(testwin_start,
testwin_end)
post_window_weeknumbers = list(map(int, post_window_weeknumbers))
weeks_req.extend(pre_window_weeknumbers)
weeks_req.extend(post_window_weeknumbers)
record['pre_period_weeks_required'] = pre_window_weeknumbers
record['post_period_weeks_required'] = post_window_weeknumbers
weeks_req = list(set(weeks_req))
stores_list = list(set(stores_list))
if self._config["feature_parameter"]["is_product_present"] is 1:
weekly_sales = self.get_cbu_sales(
stores=stores_list[:],
applicability_criteria=applicability_criteria,
weeks=weeks_req[:])
else:
weekly_sales = self.get_overall_sales(
stores=stores_list[:],
applicability_criteria=applicability_criteria,
weeks=weeks_req[:])
if weekly_sales.shape[0] == 0:
return pd.DataFrame(), pd.DataFrame(), list()
req_pre_sales, req_post_sales = self._convert_sales_pre_post(weekly_sales = weekly_sales.drop(self._tarvarmapping['banner'], axis=1),
stores_date_info_dict_list = stores_date_info,
target_variable = target_variable,
test_control_map_table = test_control_stores_with_time_period)
req_post_sales = req_post_sales.merge(test_control_stores_with_time_period[['Test_store_'+self._tarvarmapping['partner_id'], 'Test_store_'+self._tarvarmapping['banner'],
self._tarvarmapping['partner_id'], self._tarvarmapping['banner']]],
on=['Test_store_'+self._tarvarmapping['partner_id'],self._tarvarmapping['partner_id']])
req_pre_sales = req_pre_sales.merge(test_control_stores_with_time_period[['Test_store_'+self._tarvarmapping['partner_id'], 'Test_store_'+self._tarvarmapping['banner'],
self._tarvarmapping['partner_id'], self._tarvarmapping['banner']]],
on=['Test_store_'+self._tarvarmapping['partner_id'],self._tarvarmapping['partner_id']])
return req_pre_sales, req_post_sales, weekly_sales, stores_date_info, "sales computed successfully!!", True | /rtm_fast-0.0.2-py3-none-any.whl/rtm_fast/ds/library/sql/sales_master.py | 0.847021 | 0.443781 | sales_master.py | pypi |
from datetime import datetime
from typing import Tuple, final
import pandas as pd
from .utility.sql_utility import SqlUtility
class Stores(SqlUtility):
def __init__(self, config, test_id):
super().__init__(config)
self._test_id = test_id
self._metadata = self._config["metadata"]
self._storemstrmapping = self._config["store_mstr_columns"]
def set_test_id(self, test_id):
"""Sets the current test_id"""
self._test_id = test_id
def get_filtered_stores(self, applicability_criteria)->pd.DataFrame:
"""
About function
--------------
This function needs to be overriden, developer needs to write the query to get store information from the storemaster table
based on the filter selected in the applicability criteria
Parameters
----------
applicability_criteria: key-value pairs of the filters (product and store) selection made in the tool
Return values
-------
store attributes dataframe
"""
pass
def get_uploaded_stores_info(self, stores_list, applicability_criteria)->pd.DataFrame:
"""
About function
--------------
This function needs to be overriden, developer needs to write the query to get store information from the storemaster table
based on the list of the store identifier (config[store_mstr_columns][partner_id]) value present in stores_list
Parameters
----------
applicability_criteria: key-value pairs of the filters (product and store) selection made in the tool
Return values
-------
store attributes dataframe
"""
pass
def filter_population_uploaded_stores(self, uploaded_file_df=None) -> list:
"""
About function
--------------
Once the uploaded population stores are stored in the database table 'config['tables']['upload_stores']' (storing of uploaded stores will be done by UI team)
now these stores will be considered as the population stores everytime (store filters in the applicability criteria will be considered)
Note: config['tables']['upload_stores'] this table will be maintaining store identifier refer config[store_mstr_columns][partner_id]
Parameters
----------
applicability_criteria: key-value pairs of the filters (product and store) selection made in the tool
uploaded_file_df: optional dataframe for DS person to work on upload functionality; it will have same attributes like config['tables']['upload_stores']
Return values
-------
list of store identifiers
"""
if (self._test_id is None) or ("upload_stores" not in self._config['tables']):
storelist = []
else:
if uploaded_file_df is None:
storelistquery = """SELECT store_id as {column_name} FROM {db}
WHERE test_id_id = {testid}"""
storelistqueryDf = self.execute_sql_query(query=storelistquery, data={"column_name": self._config["store_mstr_columns"]['partner_id'],
"db": self._config['tables']['upload_stores'],
"testid": self._test_id})
else:
storelistqueryDf = uploaded_file_df
storelistqueryDf = storelistqueryDf[storelistqueryDf['test_id_id'] == self._test_id]
storelist = [] if storelistqueryDf.shape[0] == 0 else list(storelistqueryDf[self._config["store_mstr_columns"]['partner_id']].unique())
return storelist
def filter_population(self, applicability_criteria, storelist=[], uploaded_file_df = None)->pd.DataFrame:
"""
About function
--------------
All the tool features depends on the population of stores, this function fetches the store records that meet either of the following:
1) Store that were uploaded by user
2) if stores were not uploaded then this function will use the store filter selected in applicability criteria to get stores
Parameters
----------
applicability_criteria: key-value pairs of the filters (product and store) selection made in the tool
storelist: optional parameter in case the developer needs to fetch the attributes of known store identifier
uploaded_file_df: optional dataframe for DS person to work on upload functionality; it will have same attributes like config['tables']['upload_stores']
Return values
-------
dataframe of store attributes
"""
if not storelist:
storelist = self.filter_population_uploaded_stores(uploaded_file_df=uploaded_file_df)
if not storelist:
stores_master_df = self.get_filtered_stores( applicability_criteria=applicability_criteria)
else:
storelist.append(-1)
stores_master_df = self.get_uploaded_stores_info(
stores_list = storelist,
applicability_criteria=applicability_criteria)
return stores_master_df
def read_test_master_table_by_test_ids(self, test_id) -> pd.DataFrame:
"""
About function
--------------
This function reads all the details regarding the test from test master table refer config[tables][test_mstr]
Parameters
----------
test_id: could be a list of test ids or a single test id
-------
dataframe of test master filtered for specified test_id/s
"""
if isinstance(test_id, list) ==False:
test_id = [test_id]
test_id.append(-1)
return self.execute_sql_query(query="SELECT * FROM {test_master_table} WHERE is_active=1 and is_deleted = 0 AND test_id IN {test_ids}", data={'test_master_table': self._config['tables']['test_mstr'], "test_ids":tuple(test_id)})
def read_test_measurement_table_by_test_ids(self, test_id):
"""
About function
--------------
This function reads all the details from the test measturement table refer config[tables][test_mstr]
Parameters
----------
test_id: could be a list of test ids or a single test id
-------
dataframe of test measurement filtered for specified test_id/s
"""
if isinstance(test_id, list) ==False:
test_id = [test_id]
test_id.append(-1)
temp = self.execute_sql_query(query="SELECT * FROM {test_measurement_table} WHERE is_active=1 and is_deleted = 0 AND test_id IN {test_ids}", data={'test_measurement_table': self._config['tables']['measurement'], "test_ids":tuple(test_id)})
if temp.shape[0] == 0:
temp = pd.DataFrame(columns=['test_id_id'])
return temp
def read_test_master_table_active_tests(self, date):
"""
About function
--------------
This function returns all tests records that have postwindow end date greater than input date. Table refer config[tables][test_mstr]
Parameters
----------
date: string value of format 'yyyy-mm-dd'
-------
dataframe of test measurement
"""
temp = self.execute_sql_query(query="SELECT * FROM {test_master_table} WHERE is_active=1 and is_deleted = 0 AND testwin_end> '{date}'", data={'test_master_table': self._config['tables']['test_mstr'], "date":date})
if temp.shape[0] == 0:
temp = pd.DataFrame(columns=['test_id'])
return temp
def read_test_map_table_by_test_ids(self, test_id)->pd.DataFrame:
"""
About function
--------------
This function reads all the details from the test store map table refer config[tables][test_store_map];
This table stores the list of stores uploaded for the test
Parameters
----------
test_id: could be a list of test ids or a single test id
-------
dataframe of test store map filtered for specified test_id/s
"""
if isinstance(test_id, list) ==False:
test_id = [test_id]
test_id.append(-1)
temp = self.execute_sql_query(query="SELECT * FROM {test_map_table} WHERE is_active=1 and is_deleted = 0 AND test_id_id IN {test_id_ids}", data={"test_map_table": self._config['tables']['test_store_map'], "test_id_ids":tuple(test_id)})
if temp.shape[0] == 0:
temp = pd.DataFrame(columns=['created_on','modified_on','is_active','deleted_at','is_deleted','storemap_id','teststore_id','created_by_id','test_id_id','updated_by_id'])
return temp
def read_test_map_table_active_test_variable_dates(self, date) -> pd.DataFrame:
"""
About function
--------------
This function reads all the details from the test store map table where postwindow end is greater than the date;
Table name refer config[tables][test_store_map];
This table stores the list of stores uploaded for the test
Parameters
----------
test_id: could be a list of test ids or a single test id
-------
dataframe of test store map filtered for specified test_id/s
"""
temp = self.execute_sql_query(query="SELECT * FROM {test_map_table} WHERE is_active=1 and is_deleted = 0 AND testwin_end > '{date}'", data={"test_map_table": self._config['tables']['test_store_map'], "date":date})
if temp.shape[0] == 0:
temp = pd.DataFrame(columns=['created_on','modified_on','is_active','deleted_at','is_deleted','storemap_id','teststore_id','created_by_id','test_id_id','updated_by_id'])
return temp
def read_control_store_by_test_ids(self, test_id)->Tuple[str, bool]:
"""
About function
--------------
This function reads all the details from the control store master table where test id is in the list/value passed;
Table name refer config[tables][control_store_mstr];
This table stores the list of stores uploaded for the test
Parameters
----------
test_id: could be a list of test ids or a single test id
-------
dataframe of test store map filtered for specified test_id/s
"""
if isinstance(test_id, list) ==False:
test_id = [test_id]
test_id.append(-1)
temp = self.execute_sql_query(query="SELECT * FROM {control_store_master} WHERE is_active = 1 and is_deleted = 0 AND test_id_id IN {test_id_ids}", data={"control_store_master": self._config['tables']['control_store_mstr'], "test_id_ids":tuple(test_id)})
if temp.shape[0] == 0:
temp = pd.DataFrame(columns=['is_active','is_deleted','constore_id','created_by_id','test_id_id','updated_by_id'])
return temp
@final
def filter_active_test_control_stores(self, stores_master_df=None, remove_type=None, max_week_data_available=None):
"""
About function
--------------
This function removes the test or control stores from the stores_master_df passed
Parameters
----------
stores_master_df: population stores or dataframe of the stores
remove_type: default is value set in config; based on that it either removes the active test stores or both active test and control stores,
max_week_data_available: string 'yyyy-mm-dd' maximum date for which data is available in the database
Return value
-------
message, success_flag
"""
# Need to make Change here as per interaction with Database (starting here)
storemstrmapping = self._config["store_mstr_columns"]
if self._config["feature_parameter"]["test_variable_dates"] == 1:
test_map_df = self.read_test_map_table_active_test_variable_dates(max_week_data_available)
active_test = list(test_map_df['test_id_id'].unique())
temp_test = list(self.read_test_master_table_by_test_ids(active_test)['test_id'].unique())
active_test = set(temp_test).intersection(set(active_test))
else:
print("Fetching active test from test master table")
active_test = list(self.read_test_master_table_active_tests(max_week_data_available)['test_id'].unique())
active_test = list(set(active_test) - set([self._test_id]))
if len(active_test)>0:
test_map_df = self.read_test_map_table_by_test_ids(active_test)
else:
return stores_master_df
active_test = list(set(active_test) - set([self._test_id]))
if len(active_test) != 0:
filtered_stores_df = stores_master_df[~stores_master_df[self._storemstrmapping['partner_id']].isin(
test_map_df[test_map_df['test_id_id'].isin(active_test)]['teststore_id'].unique())]
test_control_pair_df = self.read_control_store_by_test_ids(active_test)
if test_control_pair_df.shape[0] > 0 and remove_type == 'both':
"""get the control stores info from the active tests details"""
active_control_stores = test_control_pair_df[test_control_pair_df["test_id_id"].isin(
active_test)]
"""Filter the control stores"""
if active_control_stores.shape[0] != 0:
filtered_stores_df = filtered_stores_df[~filtered_stores_df[storemstrmapping["partner_id"]].isin(
active_control_stores[storemstrmapping['partner_id']].values.tolist())]
stores_master_df = filtered_stores_df
return stores_master_df
@final
def validate_uploaded_stores_format(self, reference_file, uploaded_file, columns) -> Tuple[str, bool]:
"""
About function
--------------
This function validates the uploaded store file with the columns passed
Parameters
----------
uploaded_file: the actual file user has uploaded
uploaded_file: template file
columns: by default it is extracted from the config file
-------
message, success_flag
"""
if reference_file.shape[1] == 0:
return "Reference file doesnt have any columns!!", False
if uploaded_file.shape[1] == 0:
return "Uploaded file doesnt have any columns!!", False
if uploaded_file.shape[0] == 0:
return "Uploaded file is empty!!", False
check_number_columns = uploaded_file.shape[1] == reference_file.shape[1]
if check_number_columns is False:
return "Please refer template. Number of columns in uploaded file does not match with template", False
check_column_names = sorted(
uploaded_file.columns) == sorted(reference_file.columns)
if check_column_names is False:
return "Please refer template. Name of columns in uploaded file does not match with template", False
actual_file_column_format = dict(
uploaded_file.loc[:, sorted(uploaded_file.columns)].dtypes)
reference_file_column_format = dict(sorted(columns))
if actual_file_column_format != reference_file_column_format:
return "Please ensure that uploaded matches the following datatypes: {}".format(reference_file_column_format), False
check_format = actual_file_column_format == reference_file_column_format
if check_number_columns & check_column_names & check_format:
return "Uploaded file follows the template!!", True
return "Uploaded file doesn't follow the template!!", False
def validate_uploaded_presence_store_master(self, uploaded_stores, store_identifier, applicability_criteria)->Tuple[pd.DataFrame, str, bool]:
"""
About function
--------------
This function validates the uploaded store from the store master table
Parameters
----------
uploaded_stores: the actual file user has uploaded
store_identifier:
applicability_criteria:
-------
dataframe of valid stores, message, success_flag
"""
uploaded_stores_list = list(uploaded_stores[store_identifier].unique())
total_stores = len(uploaded_stores_list)
temp_uploaded_stores_list = uploaded_stores_list[:]
temp_uploaded_stores_list.append(-1)
uploaded_stores_mapped = self.get_uploaded_stores_info(stores_list = temp_uploaded_stores_list, applicability_criteria=applicability_criteria)
if uploaded_stores_mapped.shape[0] == 0:
return uploaded_stores_mapped, "All uploaded stores are not present in Store Master!!", False
stores_not_mapped = set(uploaded_stores_list) - set(uploaded_stores_mapped[self._config["store_mstr_columns"]["partner_id"]].unique())
stores_mapped = uploaded_stores_mapped.shape[0]
percentage_mapped = round((uploaded_stores_mapped.shape[0])*100/total_stores, 2)
message = "Out of {total_stores}, {store_mapped} got mapped which is around {percentage_mapped}".format(total_stores=total_stores, store_mapped=stores_mapped, percentage_mapped=percentage_mapped)
return uploaded_stores_mapped, message, True
@final
def validate_uploaded_stores_active_stores(self, stores_df, max_date_data_available, active_stores_filter_type="both") -> Tuple[pd.DataFrame, pd.DataFrame, str, bool]:
"""
About function
--------------
This function validates the uploaded store information and calculate the number of stores that are active in other test
Parameters
----------
stores_df: dataframe of the stores information that have been uploaded (must have config[store_mstr_columns][partner_id])
max_date_data_available: string value of the maximum date data maintained in the sales table. Format 'yyyy-mm-dd'
active_stores_filter_type: variable to keep a check on if we want to remove both test and control stores or test store only
default value is 'both';
-------
dataframe of filtered stores, message, success_flag
"""
filtered_stores = self.filter_active_test_control_stores(
stores_master_df=stores_df,
remove_type=active_stores_filter_type,
max_week_data_available=max_date_data_available)
if filtered_stores.shape[0] == 0:
return filtered_stores, "All uploaded stores are participating in other tests", False
total_stores = stores_df[self._config["store_mstr_columns"]["partner_id"]].nunique()
active_stores = total_stores-filtered_stores[self._config["store_mstr_columns"]["partner_id"]].nunique()
percent_active = round(active_stores*100/total_stores, 2)
message = "Out of {total_stores} valid stores, {active_stores} are active which is around {percentage} % cannot be used as test/control stores".format(total_stores=total_stores, active_stores=active_stores, percentage=percent_active)
return filtered_stores, message, True | /rtm_fast-0.0.2-py3-none-any.whl/rtm_fast/ds/library/sql/stores_master.py | 0.834171 | 0.358016 | stores_master.py | pypi |
import sys
import traceback
from ds.common_utilities_registry import *
def get_sales_object(config, test_id):
"""
About function
--------------
This function returns the appropriate sales object for a market
In markets config there must be a key "Constructors" and this key will have
a dictionary. In that dictionary there will be one key of "Sales".
Note: dont set value of "Sales" key as Sales master class,
developer needs to inherit Sales master class in common utility
Example
-------
US:{"Constructors": {
"Sales": "FAST_US_Sales",
"Stores": "FAST_US_Stores",
"Tool": "Fast_US_Tool"},
....
}
Parameters
----------
config: config set for the market,
test_id: test_id of the current test
Return values
-------
sales object
"""
if 'Sales' in config['Constructors']:
sales_object = getattr(sys.modules[__name__],
config["Constructors"]['Sales'])(config=config,
test_id=test_id)
return sales_object
raise Exception("An Error has occured while creating sales object: {}"\
.format(traceback.format_exc()))
def get_store_object(config, test_id):
"""
About function
--------------
This function returns the appropriate store object for a market
In markets config there must be a key "Constructors" and this key will have
a dictionary. In that dictionary there will be one key of "Store".
Note: dont set value of "Store" key as Store master class,
developer needs to inherit Store master class in common utility
Example
-------
US:{"Constructors": {
"Sales": "FAST_US_Sales",
"Stores": "FAST_US_Stores",
"Tool": "Fast_US_Tool"},
....
}
Parameters
----------
config: config set for the market,
test_id: test_id of the current test
Return values
-------
Store object
"""
if 'Stores' in config['Constructors']:
stores_object = getattr(sys.modules[__name__],
config["Constructors"]['Stores'])(config=config,
test_id=test_id)
return stores_object
raise Exception("An Error has occured while creating stores object: {}"\
.format(traceback.format_exc())) | /rtm_fast-0.0.2-py3-none-any.whl/rtm_fast/ds/library/object_creation/create_object.py | 0.501709 | 0.222215 | create_object.py | pypi |
from datetime import datetime
from typing import Tuple
import pandas as pd
class TargetEstimate:
"""
A class to represent features of TargetEstimate.
...
Attributes
----------
config : configuration present in config_data either for a region or overall
region: key present in config
sales_object : Object of sales class
store_object : Object of store class
Methods
-------
data_extract(): to calculate required sales/volume and get the store details in population
calculate_rsv(): calculate the RSV/POS/Sales/Volume value required
and estimate number of stores in population
get_breakeven_lift(): estimates the breakeven lift% value
get_cost(): estimates the cost of implementing RTM activity if breakeven lift is known
"""
def __init__(self, config, region, sales_object, store_object) -> None:
"""
Constructs all the necessary attributes for the rsv estimate object.
Parameters
----------
config : configuration present in config_data either for a region or overall
region: key present in config
sales_object : Object of sales class
store_implementation : Object of store class
"""
self._config = config[region] if region in config else config
self._sales_object = sales_object
self._store_object = store_object
self._metadata = self._config["metadata"]["test_configuration"]
self._tarvarmapping = self._config["weekly_target_variable"]
self._storemstrmapping = self._config["store_mstr_columns"]
self._rsvestimate = 0.0
self._weekly_target_data = pd.DataFrame()
self._breakevenliftpercentage = 0.0
def data_extract(self, target_variable, timeframestart, timeframeend, storelist,
applicability_criteria,
uploaded_file_df=None) -> Tuple[pd.DataFrame, list, str, bool]:
"""
About function
--------------
This function fetches the population stores details (filter selected/uploaded)
along with calculating the required sales for the stores
calls
1) filter_population
2) get_sales_calculate_rsv
Parameters
----------
target_variable: weekly sales column which is needs to be estimates (sales or volume),
timeframestart: date from which preperiod starts,
timeframeend: date on which preperiod ends,
stores: list of stores for which sales to be calculated; by default pass empty list
applicability_criteria: the product and stores attributes selected
at tool in dictionary format,
uploaded_file_df: optional parameter; its for DS people to use
uploaded store as population in case not connected to DB
Return values
-------
dataframe with CBU/overall sales,
list of weeks on which sales is calculated,
message
success flag
"""
if (timeframestart is not None) & (timeframeend is not None) \
& (target_variable is not None):
timeframeend_date = datetime.strptime(
timeframeend, '%Y-%m-%d').date()
timeframestart_date = datetime.strptime(
timeframestart, '%Y-%m-%d').date()
timeframe_weeknumbers = self._sales_object\
.find_weeks(timeframestart_date, timeframeend_date)
if len(storelist) == 0:
stores_master_df = self._store_object\
.filter_population(applicability_criteria=applicability_criteria,
storelist=storelist,
uploaded_file_df=uploaded_file_df)
if stores_master_df.shape[0] == 0:
return pd.DataFrame(), list(),\
"No stores found matching population criteria", False
storelist = list(
stores_master_df[self._storemstrmapping["partner_id"]].unique())
self._weekly_target_data = pd.DataFrame()
consideryearweeks = list()
self._weekly_target_data, consideryearweeks \
= self._sales_object.get_sales_calculate_rsv(
stores=storelist,
target_variable=target_variable,
applicability_criteria=applicability_criteria,
consideryearweeks=timeframe_weeknumbers)
if self._weekly_target_data.shape[0] == 0:
return self._weekly_target_data, consideryearweeks,\
"No sales found between the timeperiod selected!!", False
return self._weekly_target_data, consideryearweeks,\
"Successfully Calculated!!", True
return pd.DataFrame(), list(),\
"One of these parameters is None timeframestart, timeframeend, target_variable", False
def calculate_rsv(self, target_variable) -> Tuple[float, int]:
"""
About function
--------------
This function calculates sum of sales on the target variable passed
Parameters
----------
target_variable: weekly sales column which is needs to be estimates (sales or volume),
Return values
-------
total sales or volume,
number of stores in population
"""
store_count = self._weekly_target_data[self._tarvarmapping["partner_id"]].nunique(
)
self._rsvestimate = self._weekly_target_data[target_variable].sum().round(
2)
return self._rsvestimate, store_count
def get_breakeven_lift(self, rsv_estimate, cost, num_of_teststores,
applicability_criteria, uploaded_file_df=None) -> Tuple[float, str]:
"""
About function
--------------
This function estimates the break even lift
Parameters
----------
rsv_estimate: total sales/volume sold in annual RSV period;
got from calculate_rsv function
cost: esimated cost of activity on population stores,
num_of_teststores: number of stores considering in the test
applicability_criteria: the product and stores attributes selected
at tool in dictionary format,
uploaded_file_df: optional parameter; its for DS people to use
uploaded store as population in case not connected to DB
Return values
-------
floating estimated breakeven lift,
message,
booelan success flag
"""
if (rsv_estimate is not None) & (cost is not None):
stores_master_df = self._store_object.filter_population(
applicability_criteria=applicability_criteria,
uploaded_file_df=uploaded_file_df)
print("Breakeven Lift - Population size: ", stores_master_df.shape)
# Get the proportion of stores to be sampled for each banner
if ("rawconvfactors" in self._metadata) & (len(self._metadata['rawconvfactors']) > 0):
banner_label = self._tarvarmapping["banner"]
partner_label = self._tarvarmapping["partner_id"]
count_df = stores_master_df\
.groupby(banner_label)[partner_label]\
.count()\
.reset_index()\
.rename(columns={partner_label: "Count"})
count_df["prop"] = count_df["Count"]/count_df["Count"].sum()
count_df["stores_proportioned"] = count_df["prop"] * num_of_teststores
count_df["stores_proportioned"] = count_df["stores_proportioned"].round(2)
bannerwisestoresdict = dict(zip(count_df[banner_label],
count_df["stores_proportioned"]))
rawconvfactors = self._metadata["rawconvfactors"]
numerator = sum([bannerwisestoresdict[k]*v for k,
v in rawconvfactors.items() if k in bannerwisestoresdict.keys()])
denominator = sum(list(bannerwisestoresdict.values()))
conversionfactor = numerator / denominator
else:
conversionfactor = 1
cost = cost/conversionfactor
self._breakevenliftpercentage = (cost/rsv_estimate)*100
return self._breakevenliftpercentage, "Calculated breakeven lift successfully!!"
return 0, "Parameter missing! Either Cost or RSV value not passed to function"
def get_cost(self, rsv_estimate=None, breakevenliftpercentage=None) -> float:
"""
About function
--------------
This function estimates the cost of implementing the RTM activity on population stores.
This function to be used when breakevenlift is known but cost is unknow
Parameters
----------
rsv_estimate: total sales or volume in annual RSV period
breakevenliftpercentage: known break even lift
Return values
-------
floating value of cost
"""
if (rsv_estimate is not None) & (breakevenliftpercentage is not None):
self._rsvestimate = rsv_estimate
cost = (breakevenliftpercentage*rsv_estimate)/100
return round(cost, 2)
return 0 | /rtm_fast-0.0.2-py3-none-any.whl/rtm_fast/ds/library/ds/feature/target_estimation/target_estimate_master.py | 0.935693 | 0.456046 | target_estimate_master.py | pypi |
from datetime import datetime
from typing import Tuple
import pandas as pd
class RSVEstimate:
"""
A class to represent features of RSVEstimate.
...
Attributes
----------
config : configuration present in config_data either for a region or overall
region: key present in config
sales_implementation : Object of sales class
store_implementation : Object of store class
Methods
-------
data_extract(): to calculate required sales/volume and get the store details in population
calculate_rsv(): calculate the RSV value required and estimate number of stores in population
get_breakeven_lift(): estimates the breakeven lift% value
get_cost(): estimates the cost of implementing RTM activity if breakeven lift is known
"""
def __init__(self, config, region, sales_implementation, store_implemenation) -> None:
"""
Constructs all the necessary attributes for the rsv estimate object.
Parameters
----------
config : configuration present in config_data either for a region or overall
region: key present in config
sales_implementation : Object of sales class
store_implementation : Object of store class
"""
self._config = config[region] if region in config else config
self._sales_implementation = sales_implementation
self._store_implemenation = store_implemenation
self._metadata = self._config["metadata"]["test_configuration"]
self._tarvarmapping = self._config["weekly_target_variable"]
self._storemstrmapping = self._config["store_mstr_columns"]
self._rsvestimate = 0.0
self._weekly_target_data = pd.DataFrame()
self._breakevenliftpercentage = 0.0
def data_extract(self, target_variable, timeframestart, timeframeend, storelist,
applicability_criteria,
uploaded_file_df=None) -> Tuple[pd.DataFrame, list, str, bool]:
"""
About function
--------------
This function fetches the population stores details (filter selected/uploaded)
along with calculating the required sales for the stores
calls
1) filter_population
2) get_sales_calculate_rsv
Parameters
----------
target_variable: weekly sales column which is needs to be estimates (sales or volume),
timeframestart: date from which preperiod starts,
timeframeend: date on which preperiod ends,
stores: list of stores for which sales to be calculated; by default pass empty list
applicability_criteria: the product and stores attributes selected
at tool in dictionary format,
uploaded_file_df: optional parameter; its for DS people to use
uploaded store as population in case not connected to DB
Return values
-------
dataframe with CBU/overall sales,
list of weeks on which sales is calculated,
message
success flag
"""
if (timeframestart is not None) & (timeframeend is not None) \
& (target_variable is not None):
timeframeend_date = datetime.strptime(
timeframeend, '%Y-%m-%d').date()
timeframestart_date = datetime.strptime(
timeframestart, '%Y-%m-%d').date()
timeframe_weeknumbers = self._sales_implementation\
.find_weeks(timeframestart_date, timeframeend_date)
if len(storelist) == 0:
stores_master_df = self._store_implemenation\
.filter_population(applicability_criteria=applicability_criteria,
storelist=storelist,
uploaded_file_df=uploaded_file_df)
if stores_master_df.shape[0] == 0:
return pd.DataFrame(), list(),\
"No stores found matching population criteria", False
storelist = list(
stores_master_df[self._storemstrmapping["partner_id"]].unique())
self._weekly_target_data = pd.DataFrame()
consideryearweeks = list()
self._weekly_target_data, consideryearweeks \
= self._sales_implementation.get_sales_calculate_rsv(
stores=storelist,
target_variable=target_variable,
applicability_criteria=applicability_criteria,
consideryearweeks=timeframe_weeknumbers)
if self._weekly_target_data.shape[0] == 0:
return self._weekly_target_data, consideryearweeks,\
"No sales found between the timeperiod selected!!", False
return self._weekly_target_data, consideryearweeks,\
"Successfully Calculated!!", True
return pd.DataFrame(), list(),\
"One of these parameters is None timeframestart, timeframeend, target_variable", False
def calculate_rsv(self, target_variable) -> Tuple[float, int]:
"""
About function
--------------
This function calculates sum of sales on the target variable passed
Parameters
----------
target_variable: weekly sales column which is needs to be estimates (sales or volume),
Return values
-------
total sales or volume,
number of stores in population
"""
store_count = self._weekly_target_data[self._tarvarmapping["partner_id"]].nunique(
)
self._rsvestimate = self._weekly_target_data[target_variable].sum().round(
2)
return self._rsvestimate, store_count
def get_breakeven_lift(self, rsv_estimate, cost, num_of_teststores,
applicability_criteria, uploaded_file_df=None) -> Tuple[float, str]:
"""
About function
--------------
This function estimates the break even lift
Parameters
----------
rsv_estimate: total sales/volume sold in annual RSV period;
got from calculate_rsv function
cost: esimated cost of activity on population stores,
num_of_teststores: number of stores considering in the test
applicability_criteria: the product and stores attributes selected
at tool in dictionary format,
uploaded_file_df: optional parameter; its for DS people to use
uploaded store as population in case not connected to DB
Return values
-------
floating estimated breakeven lift,
message,
booelan success flag
"""
if (rsv_estimate is not None) & (cost is not None):
stores_master_df = self._store_implemenation.filter_population(
applicability_criteria=applicability_criteria,
uploaded_file_df=uploaded_file_df)
print("Breakeven Lift - Population size: ", stores_master_df.shape)
# Get the proportion of stores to be sampled for each banner
if ("rawconvfactors" in self._metadata) & (len(self._metadata['rawconvfactors']) > 0):
banner_label = self._tarvarmapping["banner"]
partner_label = self._tarvarmapping["partner_id"]
count_df = stores_master_df\
.groupby(banner_label)[partner_label]\
.count()\
.reset_index()\
.rename(columns={partner_label: "Count"})
count_df["prop"] = count_df["Count"]/count_df["Count"].sum()
count_df["stores_proportioned"] = count_df["prop"] * num_of_teststores
count_df["stores_proportioned"] = count_df["stores_proportioned"].round(2)
bannerwisestoresdict = dict(zip(count_df[banner_label],
count_df["stores_proportioned"]))
rawconvfactors = self._metadata["rawconvfactors"]
numerator = sum([bannerwisestoresdict[k]*v for k,
v in rawconvfactors.items() if k in bannerwisestoresdict.keys()])
denominator = sum(list(bannerwisestoresdict.values()))
conversionfactor = numerator / denominator
else:
conversionfactor = 1
cost = cost/conversionfactor
self._breakevenliftpercentage = (cost/rsv_estimate)*100
return self._breakevenliftpercentage, "Calculated breakeven lift successfully!!"
return 0, "Parameter missing! Either Cost or RSV value not passed to function"
def get_cost(self, rsv_estimate=None, breakevenliftpercentage=None) -> float:
"""
About function
--------------
This function estimates the cost of implementing the RTM activity on population stores.
This function to be used when breakevenlift is known but cost is unknow
Parameters
----------
rsv_estimate: total sales or volume in annual RSV period
breakevenliftpercentage: known break even lift
Return values
-------
floating value of cost
"""
if (rsv_estimate is not None) & (breakevenliftpercentage is not None):
self._rsvestimate = rsv_estimate
cost = (breakevenliftpercentage*rsv_estimate)/100
return round(cost, 2)
return 0 | /rtm_fast-0.0.2-py3-none-any.whl/rtm_fast/ds/library/ds/feature/rsv_estimation/rsv_estimate_master.py | 0.931742 | 0.459682 | rsv_estimate_master.py | pypi |
from typing import Tuple, final
import numpy as np
import pandas as pd
import statsmodels.api as sm
from ds.library.ds_common_functions import gower_matrix
from scipy import stats
from sklearn.preprocessing import StandardScaler
class CntrlStoreSelectionFeature:
def __init__(self, config, region,sales_object, store_object,test_id) -> None:
self._config = config[region] if region in config else config
self._sales_object = sales_object #Ceab sales object
self._store_object = store_object
self._metadata = self._config["metadata"]
self._tarvarmapping = self._config["weekly_target_variable"]
self._storemstrmapping = self._config["store_mstr_columns"]
self._test_id = test_id
self._control_pool = []
def data_extract(self, applicability_criteria, target_variable, test_type, store_list, uploaded_file_df=None)->Tuple[pd.DataFrame, pd.DataFrame,list,pd.DataFrame, pd.DataFrame,str, bool]:
test_master = self._store_object.read_test_master_table_by_test_ids(test_id=self._test_id)
test_master = test_master[test_master['test_id'] == self._test_id]
if test_master.shape[0] == 0:
return pd.DataFrame(), pd.DataFrame(),[],pd.DataFrame(),pd.DataFrame(),\
"""No records found for the current test in Test Master table!!""", False
stores_master_df = self._store_object.filter_population(applicability_criteria=applicability_criteria, storelist = store_list, uploaded_file_df = uploaded_file_df)
if stores_master_df.shape[0] == 0:
return pd.DataFrame(), pd.DataFrame(),[], pd.DataFrame(), pd.DataFrame(),"No stores found in the population", False
annualrsvlifts, valid_sales_stores, _, _, consideryearweeks,\
message, success_flag = self._sales_object.get_annual_rsv_lifts(
target_variable=target_variable,
test_master_df = test_master,
stores = list(stores_master_df[self._storemstrmapping["partner_id"]].unique()),
applicability_criteria=applicability_criteria,
test_type=test_type,
)
if success_flag is False:
return pd.DataFrame(), pd.DataFrame(),[],pd.DataFrame(), pd.DataFrame(),message, False
return annualrsvlifts, valid_sales_stores,consideryearweeks,test_master, stores_master_df, "Sales computed Successfully!", True
@final
def _get_feature_thresholds(self, teststores, controlstores, features)-> Tuple[dict]:
"""
# This function corresponds to the test vs population summary module in the tool - No uk specific code in it
# Note this function is currently not in use in the latest version of the FAST Tool
"""
threshold_dict = {}
metadata = self._config["metadata"]["test_planning"]
for feature in features:
std1 = teststores[feature].std()
std2 = controlstores[feature].std()
samples1 = teststores[feature].shape[0]
samples2 = controlstores[feature].shape[0]
numerator = np.power((std1*std1/samples1 + std2*std2/samples2), 2)
denominator = (np.power((std1*std1/samples1), 2)/(samples1-1) +
np.power((std2*std2/samples2), 2)/(samples2-1))
degfreedom = numerator/denominator
pval = metadata["test_vs_control_pvalue"]
criticalvalue = stats.t.ppf(1-pval/2, degfreedom)
difference_in_means = criticalvalue * \
np.sqrt((std1*std1/samples1 + std2*std2/samples2))
threshold_dict[feature] = difference_in_means
return threshold_dict
@final
def prepare_test_control_stores(self, dfA=None, dfB=None, teststoreid=None, gowerdistances=None, num_cntrl_rejected=None, calltype=None,
reqcontrolstores=None, corrbased=None, rejected_with_control_left=None):
"""
prepare_test_control_stores
"""
dfB["Gower_Distance"] = gowerdistances
dfB = dfB.sort_values(by="Gower_Distance", ascending=True)
dfB["Similarity_Measure"] = 1 - dfB["Gower_Distance"]
if num_cntrl_rejected is None:
if calltype == "old":
dfB = dfB.head(1)
if calltype == "new":
if corrbased == 1:
top5_percent_stores = dfB[dfB['Similarity_Measure'] > (
dfB['Similarity_Measure'].max() - 0.05)]
if top5_percent_stores.shape[0] >= reqcontrolstores:
dfB = top5_percent_stores
else:
dfB = dfB.head(reqcontrolstores)
else:
dfB = dfB.head(reqcontrolstores)
else:
dfB = dfB[~(dfB[self._storemstrmapping["partner_id"]].isin(
rejected_with_control_left[teststoreid]))]
if calltype == "old":
dfB = dfB.head(1)
if calltype == "new":
reqcontrolstores = 1 # always for recompute scenario when corrbased=0
if corrbased == 1:
top5_percent_stores = dfB[dfB['Similarity_Measure'] > (
dfB['Similarity_Measure'].max() - 0.05)].shape[0]
if top5_percent_stores > reqcontrolstores:
reqcontrolstores = top5_percent_stores
dfB = dfB.head(reqcontrolstores)
filteredteststoredf = dfA[dfA[self._storemstrmapping["partner_id"]]
== teststoreid]
for col in self._metadata['test_planning']["teststores_columns"]:
dfB["Test_store_" + col] = filteredteststoredf[col].values[0]
return dfB
@final
def _prepare_test_control_stores_vecotrize(self, useA=None, useB=None, test_df=None, control_df=None, calltype=None,
reqcontrolstores=None, corrbased=None):
"""
_prepare_test_control_stores_vecotrize
"""
gowermatrix = gower_matrix(useA, useB)
test_df.rename(columns={
self._storemstrmapping["partner_id"]: 'Test_store_'+self._storemstrmapping["partner_id"]}, inplace=True)
test_df['key'] = 1
test_control = test_df.merge(control_df, on='key')
test_control.drop(columns=['key'], inplace=True)
test_control['Gower_Distance'] = gowermatrix.flatten(order='A')
if calltype == "old":
test_control = test_control.sort_values(
by="Gower_Distance", ascending=True)
test_control = test_control.groupby(
'Test_store_'+self._storemstrmapping["partner_id"]).head(1).reset_index(drop=True)
if calltype == "new":
test_control = test_control.sort_values(
by="Gower_Distance", ascending=True)
if corrbased == 1:
min_gower_dist_pair = test_control.drop_duplicates(
subset=['Test_store_'+self._storemstrmapping["partner_id"]])
min_gower_dist_pair['Gower_Distance'] = min_gower_dist_pair['Gower_Distance']+0.05
min_gower_dist_pair = min_gower_dist_pair.drop(
columns=[self._storemstrmapping["partner_id"]]).rename(columns={'Gower_Distance': 'Min_Gower_dist'})
test_control = test_control.merge(
min_gower_dist_pair, on=['Test_store_'+self._storemstrmapping["partner_id"]])
test_control['flag'] = test_control['Gower_Distance'] < test_control['Min_Gower_dist']
top_5_percent_store = test_control.groupby(
['Test_store_'+self._storemstrmapping["partner_id"]])['flag'].sum().reset_index()
top_5_percent_store = top_5_percent_store[top_5_percent_store['flag'] >= reqcontrolstores][[
'Test_store_'+self._storemstrmapping["partner_id"]]]
test_control = test_control.merge(
top_5_percent_store, how='left', on='Test_store_'+self._storemstrmapping["partner_id"], indicator=True)
df1 = test_control[(test_control['_merge'] == 'both') & (
test_control['flag'] == True)]
df2 = test_control[test_control['_merge'] == 'left_only'].groupby(
'Test_store_'+self._storemstrmapping["partner_id"]).head(reqcontrolstores)
test_control = pd.concat([df1, df2], sort=False, ignore_index=True)
test_control.drop(columns=['_merge', 'flag', 'Min_Gower_dist'], inplace=True)
else:
test_control = test_control.groupby(
'Test_store_'+self._storemstrmapping["partner_id"]).head(reqcontrolstores).reset_index(drop=True)
test_control["Similarity_Measure"] = 1-test_control["Gower_Distance"]
test_control["Similarity_Measure"] = test_control["Similarity_Measure"].round(2)
test_control["Gower_Distance"] = test_control["Gower_Distance"].round(2)
return test_control
@final
def _get_test_control_stores_correlation(self, dfA=None, dfB=None, test_control_stores=None, weekcolumns=None, num_cntrl_rejected=None, corrbased=None, reqcontrolstores=None):
"""
get_test_control_stores_correlation
"""
print(" in get_test_control_stores_correlation")
dfA = dfA[dfA[self._storemstrmapping["partner_id"]].isin(
test_control_stores["Test_store_"+self._storemstrmapping["partner_id"]].unique())]
dfB = dfB[dfB[self._storemstrmapping["partner_id"]].isin(
test_control_stores[self._storemstrmapping["partner_id"]].unique())]
A = dfA[weekcolumns].values.T
B = dfB[weekcolumns].values.T
# time1 = time.process_time()
N = B.shape[0]
sA = A.sum(0)
sB = B.sum(0)
p1 = N*np.einsum('ij,ik->kj', A, B)
p2 = sA*sB[:, None]
p3 = N*((B**2).sum(0)) - (sB**2)
p4 = N*((A**2).sum(0)) - (sA**2)
pcorr = ((p1 - p2)/np.sqrt(p4*p3[:, None]))
test_store_dict = dict(zip(dfA[self._storemstrmapping["partner_id"]].values.tolist(), range(dfA[self._storemstrmapping["partner_id"]].nunique())))
control_store_dict = dict(zip(dfB[self._storemstrmapping["partner_id"]].values.tolist(), range(dfB[self._storemstrmapping["partner_id"]].nunique())))
test_control_stores['Correlation'] = test_control_stores[['Test_store_'+self._storemstrmapping["partner_id"], self._storemstrmapping["partner_id"]]]\
.apply(lambda x: pcorr[control_store_dict[x[self._storemstrmapping["partner_id"]]]][test_store_dict[x['Test_store_'+self._storemstrmapping["partner_id"]]]], axis=1)
test_control_stores = test_control_stores.sort_values(
by=["Test_store_"+self._storemstrmapping["partner_id"], "Similarity_Measure"], ascending=False)
if corrbased == 1:
test_control_stores = test_control_stores.sort_values(
by=["Test_store_"+self._storemstrmapping["partner_id"], "Correlation"], ascending=False)
if num_cntrl_rejected is None:
test_control_stores = test_control_stores.groupby(
["Test_store_"+self._storemstrmapping["partner_id"]], as_index=False, sort=False).head(reqcontrolstores)
else:
test_control_stores = test_control_stores.groupby(
["Test_store_"+self._storemstrmapping["partner_id"]]).apply(lambda x: x.head(1)).reset_index(drop=True)
test_control_stores[['Gower_Distance', 'Similarity_Measure', 'Correlation']] = test_control_stores[[
'Gower_Distance', 'Similarity_Measure', 'Correlation']].round(2)
return test_control_stores
def _get_max_required_control_stores(self, reqcontrolstores, applicability_criteria)->int:
if ("advanced_control_mapping" in applicability_criteria)and \
len(applicability_criteria['advanced_control_mapping'].values()) > 0:
return max(reqcontrolstores, max(int(val) for val in applicability_criteria['advanced_control_mapping'].values()))
return reqcontrolstores
def _handle_control_per_store_attribute(self, control_stores, one_to_one=False,
control_per_store_attribute=None)->Tuple[pd.DataFrame, str, bool]:
if control_per_store_attribute is not None:
if 'store_attribute' not in self._config["feature_parameter"]['advanced_control_mapping']:
return pd.DataFrame(), "Error in config!! store_attribute key missing from config['feature_parameter']['advanced_control_mapping']", False
req_store_attribute = 'Test_store_'+self._config["feature_parameter"]['advanced_control_mapping']['store_attribute']
req_columns = ['Test_store_'+self._storemstrmapping['partner_id'], req_store_attribute]
if len(set(req_columns).intersection(set(control_stores.columns))) != len(req_columns):
return pd.DataFrame(), "control store passed to function doesnot have following attributes: {}".format(req_columns), False
test_store_store_attribute_dict = dict(
zip(control_stores['Test_store_'+self._storemstrmapping['partner_id']],
control_stores[req_store_attribute]))
cs_updated = pd.DataFrame()
for store_identifier in list(test_store_store_attribute_dict.keys()):
if test_store_store_attribute_dict[store_identifier] in (list(control_per_store_attribute.keys())):
j = control_per_store_attribute[test_store_store_attribute_dict[store_identifier]]
else:
j = 1
cs_updated = pd.concat([
control_stores.loc[
(control_stores['Test_store_'+self._storemstrmapping['partner_id']] == store_identifier)
].sort_values(by=['Similarity_Difference'],
ascending=False).head(j),
cs_updated],
ignore_index=True)
df1 = cs_updated.groupby("Test_store_" + self._storemstrmapping["partner_id"],
as_index=False,
group_keys=False).apply(lambda x: x.iloc[0])
df2 = cs_updated.groupby("Test_store_" + self._storemstrmapping["partner_id"],
as_index=False,
group_keys=False).apply(lambda x: x.iloc[1:])
else:
df1 = control_stores.groupby("Test_store_" + self._storemstrmapping["partner_id"],
as_index=False,
group_keys=False).apply(lambda x: x.iloc[0])
df2 = control_stores.groupby("Test_store_" + self._storemstrmapping["partner_id"],
as_index=False,
group_keys=False).apply(lambda x: x.iloc[1:])
if one_to_one == True:
df1["Checked_Flag"] = 1
df2["Checked_Flag"] = 0
df1["is_recommended"] = 1
df2["is_recommended"] = 0
else:
df1["Checked_Flag"] = 1
df2["Checked_Flag"] = 1
df1["is_recommended"] = 1
df2["is_recommended"] = 1
return pd.concat([df1, df2]), "Handled control stores per test stores", True
def identify_control_stores_util(self, teststores, business_categories,stores_master_df, annualrsvliftdf, consideryearweeks, valid_sales_stores, summary_sales_weeks, sales_weeks, compare_variables,target_variable, max_date_data_available, control_store_pool, reqcontrolstores):
if control_store_pool is not None and len(control_store_pool)>0:
self._control_pool = control_store_pool
stores_master_df = self._store_object.filter_active_test_control_stores(stores_master_df=stores_master_df.copy(deep=True),
remove_type=self._config["feature_parameter"]["active_store_filter_type"],
max_week_data_available=max_date_data_available)
if stores_master_df.shape[0] ==0 :
return pd.DataFrame(), "All stores are actively participating in other test", False
stores_master_df = stores_master_df[stores_master_df[self._storemstrmapping["partner_id"]].isin(valid_sales_stores[self._tarvarmapping['partner_id']].unique())]
filtered = valid_sales_stores[valid_sales_stores[self._tarvarmapping['week']].isin(consideryearweeks[summary_sales_weeks:])]
pivoteddf = pd.pivot_table(filtered, index=[self._tarvarmapping["partner_id"], self._tarvarmapping["banner"]], columns=self._tarvarmapping['week'], values=target_variable).reset_index().rename_axis(None, axis=1)
weekcolumns = [col for col in pivoteddf.columns.tolist() if col not in [self._tarvarmapping["partner_id"], self._tarvarmapping["banner"]]]
stores_master_df = stores_master_df.merge(pivoteddf, on=[self._tarvarmapping["partner_id"], self._tarvarmapping["banner"]])
filtercolumns = [self._tarvarmapping["partner_id"]] + [target_variable+' Year 1', target_variable+' Year 2', target_variable+' Lift']
if self._config["feature_parameter"]["is_product_present"] == 1:
filtercolumns.extend(["CBU_Category_"+target_variable+' Year 1', "CBU_Category_"+target_variable+' Year 2', "CBU_Category_"+target_variable+" Lift"])
compare_variables_cbu_category = compare_variables.copy()
compare_variables_cbu_category.extend(["CBU_Category_"+target_variable+' Year 1', "CBU_Category_"+target_variable+' Year 2',
"CBU_Category_"+target_variable+" Lift"])
if (len(business_categories)!=0) & (len(business_categories)<self._metadata['test_planning']["business_categories_count"]):
common_category_specific = list(set(self._metadata['test_planning']["business_category_specific_compare"]) & set(compare_variables))
if len(common_category_specific)>0:
features_list = [[j+"_"+i for j in common_category_specific] for i in business_categories]
category_specific_features = [item for elem in features_list for item in elem]
compare_variables.extend(category_specific_features)
compare_variables_cbu_category.extend(category_specific_features)
stores_master_df = stores_master_df.merge(annualrsvliftdf[filtercolumns], left_on=self._storemstrmapping["partner_id"], right_on=self._tarvarmapping["partner_id"])
if stores_master_df.shape[0] == 0:
return pd.DataFrame(), "Population stores do not have sales", False
compare_variables.extend([target_variable+" Year 1", target_variable+" Year 2", target_variable+" Lift"])
# Scaling Store Features Column values on the Entire Population set
scaler = StandardScaler()
nonscalingcolumns = [str_col for str_col in stores_master_df.columns if stores_master_df[str_col].dtypes == 'object']
nonscalingcolumns = list(set(nonscalingcolumns) - set([self._storemstrmapping['partner_id']]))
scale_cols = [item for item in compare_variables if item not in nonscalingcolumns]
if stores_master_df.shape[0] == 0:
return pd.DataFrame(), "All population stores are actively participating in other test", False
if len(scale_cols) > 0:
scaler = scaler.fit(stores_master_df[scale_cols])
teststores = stores_master_df[stores_master_df[self._storemstrmapping["partner_id"]].isin(
teststores[self._tarvarmapping['partner_id']].unique())]
# ELIMINATING THE TESTSTORES FROM POPULATION
stores_master_df = stores_master_df[~(stores_master_df[self._storemstrmapping["partner_id"]].isin(
teststores[self._storemstrmapping["partner_id"]]))]
# IF Control Store Pool Available then filter for only those stores
if control_store_pool is not None:
stores_master_df = stores_master_df[stores_master_df[self._storemstrmapping["partner_id"]].isin(control_store_pool)]
if stores_master_df.shape[0] == 0:
return pd.DataFrame(),"No valid control stores satisfying the criteria to proceed further.", False
# Adding Additional Check for Req Control Stores
if reqcontrolstores > stores_master_df[self._storemstrmapping["partner_id"]].nunique():
reqcontrolstores = stores_master_df[self._storemstrmapping["partner_id"]].nunique()
refA = teststores.copy(deep=True)
refB = stores_master_df.copy(deep=True)
useA = refA[compare_variables].copy(deep=True)
useB = refB[compare_variables].copy(deep=True)
if len(scale_cols) > 0:
useA[scale_cols] = scaler.transform(useA[scale_cols])
useB[scale_cols] = scaler.transform(useB[scale_cols])
filter_columns = set(self._metadata['test_planning']["teststores_columns"])
del annualrsvliftdf, valid_sales_stores
# Vectorize implementation
control_df = pd.DataFrame(columns=[self._storemstrmapping["partner_id"]],
data=refB[self._storemstrmapping["partner_id"]].values)
control_df['key'] = 1
control_stores = self._prepare_test_control_stores_vecotrize(useA=useA,
useB=useB,
test_df=refA[[self._storemstrmapping["partner_id"]]],
control_df=control_df, calltype="new",
reqcontrolstores=reqcontrolstores,
corrbased=1)
control_stores = control_stores.merge(refB[filter_columns], on=[self._storemstrmapping["partner_id"]])
teststores_column_rename = ["Test_store_" + col for col in self._metadata['test_planning']["teststores_columns"]]
teststores_df = refA[self._metadata['test_planning']["teststores_columns"]]
teststores_df.columns = teststores_column_rename
control_stores = control_stores.merge(teststores_df,
on=['Test_store_' + self._storemstrmapping["partner_id"]])
# Add CBU_Category Similarity Scores
test_store_dict = dict(zip(refA[self._storemstrmapping["partner_id"]].values.tolist(
), range(0, refA[self._storemstrmapping["partner_id"]].nunique(), 1)))
control_store_dict = dict(zip(refB[self._storemstrmapping["partner_id"]].values.tolist(
), range(0, refB[self._storemstrmapping["partner_id"]].nunique(), 1)))
control_stores['Gower_Distance'] = control_stores['Gower_Distance'].round(2)
if self._config["feature_parameter"]["is_product_present"] == 1:
useA_cbu_category = refA[compare_variables_cbu_category].copy(
deep=True)
useB_cbu_category = refB[compare_variables_cbu_category].copy(
deep=True)
gowermatrix_cbu = gower_matrix(useA_cbu_category, useB_cbu_category)
control_stores['Gower_Distance(CBU)'] = control_stores[['Test_store_'+self._storemstrmapping["partner_id"], self._storemstrmapping["partner_id"]]]\
.apply(lambda x: gowermatrix_cbu[test_store_dict[x['Test_store_'+self._storemstrmapping["partner_id"]]]][control_store_dict[x[self._storemstrmapping["partner_id"]]]], axis=1)
control_stores['Similarity_Measure(CBU)'] = 1 - \
control_stores['Gower_Distance(CBU)']
control_stores['Similarity_Difference'] = control_stores[
"Similarity_Measure(CBU)"]-control_stores['Similarity_Measure']
control_stores[['Gower_Distance(CBU)', 'Similarity_Measure(CBU)', 'Similarity_Difference']] = control_stores[['Gower_Distance(CBU)', 'Similarity_Measure(CBU)',
'Similarity_Difference']].round(2)
control_stores.sort_values(
by=['Similarity_Difference'], ascending=False, inplace=True)
else:
control_stores.sort_values(
by=['Similarity_Measure'], ascending=False, inplace=True)
control_stores = self._get_test_control_stores_correlation(dfA=refA.copy(deep=True),
dfB=refB.copy(
deep=True),
test_control_stores=control_stores.copy(
deep=True),
weekcolumns=weekcolumns,
num_cntrl_rejected=None,
corrbased=1, reqcontrolstores=reqcontrolstores)
control_stores['Gower_Distance'] = control_stores['Gower_Distance'].round(2)
control_stores['Similarity_Measure'] = control_stores['Similarity_Measure'].round(2)
return control_stores, "Control stores are generated successfully!", True
def test_control_similarity_measurement(self, test_control_pairs, prewindow_target_data, target_variable, postwindow_target_data):
metrics_dict = {}
test_stores_pre = test_control_pairs.merge(prewindow_target_data,
left_on=["Test_store_" + self._storemstrmapping["partner_id"],
"Test_store_" + self._storemstrmapping["banner"]],
right_on=[
self._tarvarmapping["partner_id"], self._tarvarmapping["banner"]],
how="left")
test_group_pre = test_stores_pre.groupby(self._tarvarmapping["week"])[
target_variable].mean().reset_index().rename(
columns={target_variable: 'Average_' + target_variable})
test_group_pre['Window'] = 'Pre'
test_group_pre['Group'] = 'Test'
# test group postperiod weekly target data
test_stores_post = test_control_pairs.merge(postwindow_target_data,
left_on=["Test_store_" + self._storemstrmapping["partner_id"],
"Test_store_" + self._storemstrmapping["banner"]],
right_on=[self._tarvarmapping["partner_id"],
self._tarvarmapping["banner"]],
how="left")
test_group_post = test_stores_post.groupby(self._tarvarmapping["week"])[
target_variable].mean().reset_index().rename(
columns={target_variable: 'Average_' + target_variable})
test_group_post['Window'] = 'Post'
test_group_post['Group'] = 'Test'
# control group preperiod weekly target data
control_stores_pre = test_control_pairs.merge(prewindow_target_data,
left_on=[self._storemstrmapping["partner_id"],
self._storemstrmapping["banner"]],
right_on=[self._tarvarmapping["partner_id"],
self._tarvarmapping["banner"]],
how="left")
control_group_pre = control_stores_pre.groupby(self._tarvarmapping["week"])[
target_variable].mean().reset_index().rename(
columns={target_variable: 'Average_' + target_variable})
control_group_pre['Window'] = 'Pre'
control_group_pre['Group'] = 'Control'
# control group postperiod weekly target data
control_stores_post = test_control_pairs.merge(postwindow_target_data,
left_on=[self._storemstrmapping["partner_id"],
self._storemstrmapping["banner"]],
right_on=[self._tarvarmapping["partner_id"],
self._tarvarmapping["banner"]],
how="left")
control_group_post = control_stores_post.groupby(self._tarvarmapping["week"])[
target_variable].mean().reset_index().rename(columns={target_variable: 'Average_' + target_variable})
control_group_post['Window'] = 'Post'
control_group_post['Group'] = 'Control'
# Pre and post period test and control group averages
combined_avg = pd.concat([test_group_pre, test_group_post, control_group_pre, control_group_post],
axis=0).reset_index(drop=True)
combined_avg['Average_' + target_variable] = round(
combined_avg['Average_' + target_variable], 2)
combined_avg["Week"] = combined_avg["Week"].astype(int)
combined_avg["Week"] = combined_avg["Week"].apply(
lambda x: str(x)[:4] + " Week " + str('%02d' % int(str(x)[-2:])))
# Average similarity & correlation
testcontrolstores = test_control_pairs.copy(deep=True)
avg_similarity = testcontrolstores['Similarity_Measure'].mean()
avg_correlation = testcontrolstores['Correlation'].mean()
metrics_dict["Avg_Similarity"] = str(round(avg_similarity, 2))
metrics_dict["Avg_Correlation"] = str(round(avg_correlation, 2))
return metrics_dict, combined_avg, "Calculated Successfully", True
def recompute_control_stores_util(self, target_variable, reqcontrolstores, test_control_stores, stores_master_df, max_date_data_available, annualrsvliftdf, valid_sales_stores,
consideryearweeks, compare_variables, include_cbu_features, business_categories):
accepted = test_control_stores.groupby(
"Test_store_"+self._storemstrmapping["partner_id"]).filter(lambda x: (x['Checked_Flag'] == 1).any())
rejected = test_control_stores[~test_control_stores["Test_store_"+self._storemstrmapping["partner_id"]].isin(
accepted["Test_store_"+self._storemstrmapping["partner_id"]])]
if rejected.shape[0] == 0:
return pd.DataFrame(), "Please unselect all the control stores for a test store to recompute.", False
rejected["is_recommended"]=0
num_cntrl_rejected = rejected.groupby(
"Test_store_"+self._storemstrmapping["partner_id"]).aggregate({self._storemstrmapping["partner_id"]:"nunique"}).reset_index()
stores_master_df = self._store_object.filter_active_test_control_stores(stores_master_df=stores_master_df.copy(deep=True),
remove_type=self._config["feature_parameter"]["active_store_filter_type"],
max_week_data_available=max_date_data_available)
stores_master_df = stores_master_df[stores_master_df[self._storemstrmapping["partner_id"]].isin(
valid_sales_stores[self._tarvarmapping["partner_id"]].unique())]
# -----------------------------------------------New code-------------------------------------------------------------
# check
filtered = valid_sales_stores[valid_sales_stores[self._tarvarmapping['week']].isin(consideryearweeks[:])]
pivoteddf = pd.pivot_table(filtered, index=[self._tarvarmapping["partner_id"], self._tarvarmapping["banner"]], columns=self._tarvarmapping['week'], values=target_variable).reset_index().rename_axis(None, axis=1)
weekcolumns = [col for col in pivoteddf.columns.tolist() if col not in [self._tarvarmapping["partner_id"], self._tarvarmapping["banner"]]]
stores_master_df = stores_master_df.merge(pivoteddf, on=[self._tarvarmapping["partner_id"], self._tarvarmapping["banner"]])
# ------------------------------------------------New code-------------------------------------------------------------
filter_columns = [self._tarvarmapping["partner_id"]] + [target_variable+' Year 1', target_variable+' Year 2', target_variable+' Lift']
if self._config["feature_parameter"]["is_product_present"] == 1:
filter_columns.extend(["CBU_Category_"+target_variable+' Year 1', "CBU_Category_"+target_variable+' Year 2',
"CBU_Category_"+target_variable+" Lift"])
compare_variables_cbu_category = compare_variables.copy()
compare_variables_cbu_category.extend(["CBU_Category_"+target_variable+' Year 1', "CBU_Category_"+target_variable+' Year 2',
"CBU_Category_"+target_variable+" Lift"])
if (len(business_categories)!=0) & (len(business_categories)<self._metadata['test_planning']["business_categories_count"]):
common_category_specific = list(set(self._metadata['test_planning']["business_category_specific_compare"]) & set(compare_variables))
if len(common_category_specific)>0:
features_list = [[j+"_"+i for j in common_category_specific] for i in business_categories]
category_specific_features = [item for elem in features_list for item in elem]
compare_variables.extend(category_specific_features)
compare_variables_cbu_category.extend(category_specific_features)
stores_master_df = stores_master_df.merge(
annualrsvliftdf[filter_columns], left_on=self._storemstrmapping["partner_id"], right_on=self._tarvarmapping["partner_id"])
if include_cbu_features == 1:
compare_variables.extend([target_variable+' Year 1', target_variable+" Year 2", target_variable+" Lift",
"CBU_Category_"+target_variable+' Year 1', "CBU_Category_"+target_variable+' Year 2',
"CBU_Category_"+target_variable+" Lift"])
else:
compare_variables.extend(
[target_variable+' Year 1', target_variable+" Year 2", target_variable+" Lift"])
# Scaling Store Features Column values on the Entire Population set
scaler = StandardScaler()
nonscalingcolumns = [str_col for str_col in stores_master_df.columns if stores_master_df[str_col].dtypes == 'object']
nonscalingcolumns = list(set(nonscalingcolumns) - set([self._storemstrmapping['partner_id']]))
scale_cols = [item for item in compare_variables if item not in nonscalingcolumns]
if len(scale_cols) > 0:
scaler = scaler.fit(stores_master_df[scale_cols])
teststores = stores_master_df[stores_master_df[self._storemstrmapping["partner_id"]].isin(
test_control_stores["Test_store_"+self._storemstrmapping["partner_id"]].unique())]
# ELIMINATING THE TESTSTORES FROM POPULATION
stores_master_df = stores_master_df[~(stores_master_df[self._storemstrmapping["partner_id"]].isin(teststores[self._storemstrmapping["partner_id"]]))]
refA = teststores.copy(deep=True)
refB = stores_master_df.copy(deep=True)
useA = refA[compare_variables].copy(deep=True)
useB = refB[compare_variables].copy(deep=True)
if len(scale_cols) > 0:
useA[scale_cols] = scaler.transform(useA[scale_cols])
useB[scale_cols] = scaler.transform(useB[scale_cols])
gowermatrix = gower_matrix(useA, useB)
rejected_with_control_left = {}
# Filtering out Test stores which have no more control store left to be mapped
teststores_with_exhausted_control = num_cntrl_rejected[num_cntrl_rejected[self._storemstrmapping["partner_id"]]+1 > refB.shape[0]].index.tolist()
rejected.loc[~rejected["Test_store_"+self._storemstrmapping["partner_id"]].isin(teststores_with_exhausted_control), "is_recommended"] = 0
num_cntrl_rejected = num_cntrl_rejected.to_dict()
rejected_with_control_left = rejected[~rejected["Test_store_" +
self._storemstrmapping["partner_id"]].isin(teststores_with_exhausted_control)]
if rejected_with_control_left.shape[0] == 0:
return pd.DataFrame(),"All Control Stores are exhausted for all the Test stores"
rejected_with_control_left = rejected_with_control_left.groupby(
"Test_store_"+self._storemstrmapping["partner_id"])[self._storemstrmapping["partner_id"]].unique()
rejected_with_control_left = rejected_with_control_left.to_dict()
# Identifying similar stores
filter_columns = self._metadata['test_planning']["teststores_columns"].copy()
df_list = []
for test_pid, row in zip(refA[self._storemstrmapping["partner_id"]], gowermatrix):
if test_pid in rejected_with_control_left.keys():
df_list.append(df_list.append(self.prepare_test_control_stores(dfA=refA[filter_columns].copy(deep=True),
dfB=refB[filter_columns].copy(deep=True),
teststoreid=test_pid, gowerdistances=row,
num_cntrl_rejected=num_cntrl_rejected, calltype="new",
rejected_with_control_left=rejected_with_control_left, corrbased=1,
reqcontrolstores=reqcontrolstores)))
control_stores = pd.concat(df_list)
control_stores = control_stores[~control_stores["Test_store_" +
self._storemstrmapping["partner_id"]].isin(teststores_with_exhausted_control)]
control_stores["Checked_Flag"] = 1
control_stores["is_recommended"] = 1
control_stores["test_id"] = self._test_id
# Add CBU_Category Similarity Scores
if self._config["feature_parameter"]["is_product_present"] == 1:
useA_cbu_category = refA[compare_variables_cbu_category].copy(deep=True)
useB_cbu_category = refB[compare_variables_cbu_category].copy(deep=True)
gowermatrix_cbu = gower_matrix(useA_cbu_category, useB_cbu_category)
test_store_dict = dict(zip(refA[self._storemstrmapping["partner_id"]].values.tolist(
), range(0, refA[self._storemstrmapping["partner_id"]].nunique(), 1)))
control_store_dict = dict(zip(refB[self._storemstrmapping["partner_id"]].values.tolist(
), range(0, refB[self._storemstrmapping["partner_id"]].nunique(), 1)))
control_stores['Gower_Distance(CBU)'] = control_stores[['Test_store_'+self._storemstrmapping["partner_id"], self._storemstrmapping["partner_id"]]]\
.apply(lambda x: gowermatrix_cbu[test_store_dict[x['Test_store_'+self._storemstrmapping["partner_id"]]]][control_store_dict[x[self._storemstrmapping["partner_id"]]]], axis=1)
control_stores['Similarity_Measure(CBU)'] = 1 - \
control_stores['Gower_Distance(CBU)']
control_stores['Similarity_Difference'] = control_stores[
"Similarity_Measure(CBU)"] - control_stores['Similarity_Measure']
control_stores[['Gower_Distance(CBU)', 'Similarity_Measure(CBU)', 'Similarity_Difference']] = control_stores[['Gower_Distance(CBU)', 'Similarity_Measure(CBU)',
'Similarity_Difference']].round(2)
control_stores.sort_values(
by=['Similarity_Difference'], ascending=False, inplace=True)
control_stores = self._get_test_control_stores_correlation(dfA=refA.copy(deep=True), dfB=refB.copy(deep=True),
test_control_stores=control_stores.copy(
deep=True),
weekcolumns=weekcolumns, num_cntrl_rejected=num_cntrl_rejected, corrbased=1)
control_stores = pd.concat([control_stores, rejected, accepted])
return control_stores, "Successfully recomputed!!", True
def control_summary_util(self, stores_master_df, test_control_mapping, summary_sales_weeks, consideryearweeks, weekly_target_sales, business_categories, compare_variables, target_variable)->Tuple[dict,dict,dict, str, bool]:
# Create variables
variables_metrics_dict = {}
feature_thresholds_dict = {}
feature_bounds_dict = {}
test_stores = stores_master_df[stores_master_df[self._storemstrmapping["partner_id"]].isin(test_control_mapping["Test_store_"+self._storemstrmapping["partner_id"]])]
control_stores = stores_master_df.merge(test_control_mapping[[self._storemstrmapping["partner_id"], self._storemstrmapping["banner"]]], on=[
self._storemstrmapping["partner_id"], self._storemstrmapping["banner"]])
weeks = consideryearweeks[summary_sales_weeks:]
weeklyrsvdatayear = weekly_target_sales[weekly_target_sales[self._tarvarmapping["week"]].isin(
weeks)]
weeklyrsvdatayear["Year"] = "Year1"
# To Free Space
del weekly_target_sales
aggdict = {k: sum for k in [
self._tarvarmapping['rsv'], self._tarvarmapping['volume']]}
groupbycolumns = [self._tarvarmapping["partner_id"]] + \
[self._tarvarmapping["banner"]]+[self._tarvarmapping['year']]
annualrsvdatayear = weeklyrsvdatayear.groupby(
groupbycolumns).agg(aggdict).reset_index()
mergecolumns = [self._tarvarmapping["partner_id"]] + \
[self._tarvarmapping['rsv'], self._tarvarmapping['volume']]
test_stores = test_stores.merge(annualrsvdatayear[mergecolumns],
left_on=self._storemstrmapping["partner_id"],
right_on=self._tarvarmapping["partner_id"])
control_stores = control_stores.merge(annualrsvdatayear[mergecolumns],
left_on=self._storemstrmapping["partner_id"],
right_on=self._tarvarmapping["partner_id"])
if (len(business_categories)!=0) & (len(business_categories)<self._metadata['test_planning']["business_categories_count"]):
common_category_specific = list(set(self._metadata['test_planning']["business_category_specific_compare"]) & set(compare_variables))
if len(common_category_specific)>0:
features_list = [[j+"_"+i for j in common_category_specific] for i in business_categories]
category_specific_features = [item for elem in features_list for item in elem]
compare_variables.extend(category_specific_features)
compare_variables.append(target_variable)
allstores = pd.concat([test_stores, control_stores])
variable_features = allstores[compare_variables].nunique(
)[allstores[compare_variables].nunique() > 1].index.to_list()
compare_variables = list(
set(compare_variables).intersection(variable_features))
for col in compare_variables:
if test_stores[col].dtype == 'object':
pass
variables_metrics_dict[col] = {}
tStat, pVal = stats.ttest_ind(
test_stores[col], control_stores[col], nan_policy='omit')
variables_metrics_dict[col]["Test Mean"] = round(
test_stores[col].mean(), 2)
variables_metrics_dict[col]["Control Mean"] = round(
control_stores[col].mean(), 2)
variables_metrics_dict[col]["Test Std Dev"] = round(
test_stores[col].std(), 2)
variables_metrics_dict[col]["Control Std Dev"] = round(
control_stores[col].std(), 2)
xcols = [x for x in compare_variables if x != target_variable]
X_train = allstores[xcols].values
y_train = allstores[target_variable].values.ravel()
X_train = sm.add_constant(X_train)
model = sm.OLS(y_train, X_train)
results = model.fit()
summary_df = results.summary2().tables[1]
summary_df.index = ['Constant'] + list(xcols)
pvalue_dict = dict(
zip(summary_df.index.values.tolist(), summary_df["P>|t|"].values.tolist()))
# Calculate feature thresholds
feature_thresholds_dict = self._get_feature_thresholds(
test_stores, control_stores, compare_variables)
for key, value in feature_thresholds_dict.items():
feature_bounds_dict[key] = [
variables_metrics_dict[key]["Test Mean"]-value, variables_metrics_dict[key]["Test Mean"]+value]
return variables_metrics_dict,feature_thresholds_dict,feature_bounds_dict, "Successfully calculated!!", True
def test_control_upload_util(self, filtered_rsv_stores_df, valid_sales_stores, stores_master_df, consideryearweeks, target_variable, applicability_criteria, store_features, test_control_stores):
store_features.extend([target_variable + " Year 1", target_variable + " Year 2", target_variable + " Lift"])
store_features_cbu_category = store_features.copy()
store_features_cbu_category.extend(
["CBU_Category_" + target_variable + ' Year 1',
"CBU_Category_" + target_variable + ' Year 2',
"CBU_Category_" + target_variable + " Lift"])
tv = test_control_stores[test_control_stores["Test_store_"+self._storemstrmapping["partner_id"]].isin(filtered_rsv_stores_df[self._storemstrmapping["partner_id"]])]
if len(tv) == 0:
return pd.DataFrame(), "Test stores uploaded are not present in Store Master database", False
# valid controlstores
cv = test_control_stores[test_control_stores["Control_store_"+self._storemstrmapping["partner_id"]].isin(filtered_rsv_stores_df[self._storemstrmapping["partner_id"]])]
if len(cv) == 0:
return pd.DataFrame(), "Control stores uploaded are not present in Store Master database", False
to_drop = tv[~tv["Control_store_"+self._storemstrmapping["partner_id"]].isin(cv["Control_store_"+self._storemstrmapping["partner_id"]])]
# Final valid test-control pairs
filtered_testcontrol_stores = tv[~tv.isin(to_drop)].dropna()
#message = "No of test-control pairs satisfying the criteria to proceed further are {}".format(filtered_testcontrol_stores.shape[0])
if filtered_testcontrol_stores.shape[0] == 0:
message = "No test-control pairs satisfying the criteria to proceed further."
return pd.DataFrame(), message, False
filtered_testcontrol_stores['order'] = list(range(filtered_testcontrol_stores.shape[0]))
filtered = valid_sales_stores[valid_sales_stores[self._tarvarmapping["week"]].isin(consideryearweeks[self._sales_object.get_summary_sales_weeks(applicability_criteria):])]
pivoteddf = pd.pivot_table(filtered,
index=[self._storemstrmapping["partner_id"],
self._storemstrmapping["banner"]],
columns=self._tarvarmapping["week"],
values=target_variable).reset_index().rename_axis(None, axis=1)
filtered_rsv_stores_df = filtered_rsv_stores_df.merge(pivoteddf, on=[self._tarvarmapping["partner_id"], self._tarvarmapping["banner"]])
scaler = StandardScaler()
nonscalingcolumns = [str_col for str_col in stores_master_df.columns if stores_master_df[str_col].dtypes == 'object']
nonscalingcolumns = list(set(nonscalingcolumns) - set([self._storemstrmapping['partner_id']]))
scale_cols = [item for item in store_features if item not in nonscalingcolumns]
if len(scale_cols) > 0:
scaler = scaler.fit(filtered_rsv_stores_df[scale_cols])
teststores = filtered_rsv_stores_df.merge(
filtered_testcontrol_stores,
left_on=self._storemstrmapping["partner_id"],
right_on='Test_store_'+self._storemstrmapping["partner_id"],
how='right')
controlstores = filtered_rsv_stores_df.merge(
filtered_testcontrol_stores,
left_on=self._storemstrmapping["partner_id"],
right_on='Control_store_'+self._storemstrmapping["partner_id"],
how='right')
controlstores = controlstores.set_index('order')
controlstores = controlstores.reindex(index=teststores['order'])
controlstores = controlstores.reset_index()
# checks that the order of test-control pairs in both files matches
# # # Weekly sales for all stores for the past 1 year (52 weeks)
cols = ["order", self._storemstrmapping['partner_id'], self._storemstrmapping['banner'], 'Test_store_' +
self._storemstrmapping['partner_id'], 'Control_store_'+self._storemstrmapping['partner_id']]
mergecols = [self._storemstrmapping['partner_id'], self._storemstrmapping['banner']]
test_stores_wksales = teststores[cols].merge(pivoteddf, on=mergecols)
control_stores_wksales = controlstores[cols].merge(pivoteddf, on=mergecols)
control_stores_wksales = control_stores_wksales.set_index('order')
control_stores_wksales = control_stores_wksales.reindex(index=test_stores_wksales['order'])
control_stores_wksales = control_stores_wksales.reset_index()
corrlist = []
for j in range(controlstores.shape[0]):
array1 = np.array(test_stores_wksales.loc[j, test_stores_wksales.columns[~test_stores_wksales.columns.isin(cols)]].astype(float))
array2 = np.array(control_stores_wksales.loc[j, control_stores_wksales.columns[~control_stores_wksales.columns.isin(cols)]].astype(float))
corrlist.append(round(pd.np.corrcoef(array1, array2)[0][1], 2))
teststores["Correlation"] = corrlist
# population stores after excluding teststores
pop_stores = filtered_rsv_stores_df[~filtered_rsv_stores_df[self._storemstrmapping['partner_id']].isin(
teststores['Test_store_'+self._storemstrmapping['partner_id']])]
# Similarity Calculation
refA = teststores.copy(deep=True)
refB = pop_stores.copy(deep=True)
useA = refA[store_features].copy(deep=True)
useB = refB[store_features].copy(deep=True)
if len(scale_cols) > 0:
useA[scale_cols] = scaler.transform(useA[scale_cols])
useB[scale_cols] = scaler.transform(useB[scale_cols])
gowermatrix = gower_matrix(useA, useB)
useA = refA[store_features_cbu_category].copy(deep=True)
useB = refB[store_features_cbu_category].copy(deep=True)
gowermatrix_cbu = gower_matrix(useA, useB)
# Identifying similar stores
df_list = list()
for i in range(refA.shape[0]):
teststoreid = refA[self._storemstrmapping["partner_id"]][i]
gowerdistances = gowermatrix[i]
gowerdistances_cbu = gowermatrix_cbu[i]
dfA = refA.copy(deep=True)
dfB = refB.copy(deep=True)
dfB["Gower_Distance"] = list(gowerdistances)
dfB["Gower_Distance(CBU)"] = list(gowerdistances_cbu)
#dfB = dfB.sort_values(by="Gower_Distance",ascending=True)
filteredteststoredf = dfA.loc[i,:].reset_index().T.reset_index(drop=True)
filteredteststoredf.columns = filteredteststoredf.iloc[0, :]
filteredteststoredf = filteredteststoredf.drop(0)
filteredteststoredf = filteredteststoredf.reset_index(drop=True)
for col in self._metadata['test_planning']["teststores_columns"]:
dfB["Test_store_"+col] = filteredteststoredf[col].values[0]
dfB["Gower_Distance"] = dfB["Gower_Distance"].apply(
lambda x: round(x, 2))
dfB["Similarity_Measure"] = dfB["Gower_Distance"].apply(lambda x: 1-x)
dfB["Gower_Distance(CBU)"] = dfB["Gower_Distance(CBU)"].round(2)
dfB["Similarity_Measure(CBU)"] = dfB["Gower_Distance(CBU)"].apply(
lambda x: 1 - x)
dfB["Similarity_Measure(CBU)"] = dfB["Similarity_Measure(CBU)"].round(
2)
dfB['Similarity_Difference'] = dfB['Similarity_Measure(CBU)'] - \
dfB['Similarity_Measure']
dfB['Similarity_Difference'] = dfB['Similarity_Difference'].round(2)
df_append = dfB[dfB[self._storemstrmapping['partner_id']].values ==
filteredteststoredf['Control_store_'+self._storemstrmapping['partner_id']].values]
df_append['Correlation'] = filteredteststoredf['Correlation'].values[0]
df_list.append(df_append)
control_test_pairs = pd.concat(df_list)
control_test_pairs['Checked_Flag'] = 1
return control_test_pairs, "Control stores computed Successfully", True | /rtm_fast-0.0.2-py3-none-any.whl/rtm_fast/ds/library/ds/feature/cntrl_store_gen/cntrl_stores_master.py | 0.71721 | 0.310449 | cntrl_stores_master.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import logging
import math
import multiprocessing
import sys
import numpy as np
from matrixprofile import cycore
logger = logging.getLogger(__name__)
def mp_pool():
"""
Utility function to get the appropriate multiprocessing
handler for Python 2 and 3.
"""
ctxt = None
if sys.version_info[0] == 2:
from contextlib import contextmanager
@contextmanager
def multiprocessing_context(*args, **kwargs):
pool = multiprocessing.Pool(*args, **kwargs)
yield pool
pool.terminate()
ctxt = multiprocessing_context
else:
ctxt = multiprocessing.Pool
return ctxt
def is_array_like(a):
"""
Helper function to determine if a value is array like.
Parameters
----------
a : obj
Object to test.
Returns
-------
True or false respectively.
"""
return isinstance(a, (list, tuple, np.ndarray))
def is_similarity_join(ts_a, ts_b):
"""
Helper function to determine if a similarity join is occuring or not.
Parameters
----------
ts_a : array_like
Time series A.
ts_b : array_like, None
Time series B.
Returns
-------
True or false respectively.
"""
return is_array_like(ts_a) and is_array_like(ts_b)
def to_np_array(a):
"""
Helper function to convert tuple or list to np.ndarray.
Parameters
----------
a : Tuple, list or np.ndarray
The object to transform.
Returns
-------
The np.ndarray.
Raises
------
ValueError
If a is not a valid type.
"""
if isinstance(a, np.ndarray):
return a
if not is_array_like(a):
raise ValueError('Unable to convert to np.ndarray!')
return np.array(a)
def is_one_dimensional(a):
"""
Helper function to determine if value is one dimensional.
Parameters
----------
a : array_like
Object to test.
Returns
-------
True or false respectively.
"""
return a.ndim == 1
def get_profile_length(ts_a, ts_b, m):
"""
Determines the profile length based on the provided inputs.
Parameters
----------
ts_a : array_like
Time series containing the queries for which to calculate the Matrix Profile.
ts_b : array_line
Time series containing the queries for which to calculate the Matrix Profile.
m : int
Length of subsequence to compare.
Returns
-------
int - the length of the matrix profile.
"""
return len(ts_a) - m + 1
def find_skip_locations(ts, profile_length, window_size):
"""
Determines which locations should be skipped based on nan or inf values.
Parameters
----------
ts : array_like
Time series containing the queries for which to calculate the Matrix Profile.
query : array_line
Time series containing the queries for which to calculate the Matrix Profile.
window_size : int
Length of subsequence to compare.
Returns
-------
int - the length of the matrix profile.
"""
skip_loc = np.zeros(profile_length).astype(bool)
for i in range(profile_length):
segment = ts[i:i + window_size]
search = (np.isinf(segment) | np.isnan(segment))
if np.any(search):
skip_loc[i] = True
return skip_loc
def clean_nan_inf(ts):
"""
Replaces nan and inf values with zeros per matrix profile algorithms.
Parameters
----------
ts: array_like
Time series to clean.
Returns
-------
np.ndarray - The cleaned time series.
Raises
------
ValueError
When the ts is not array like.
"""
ts = to_np_array(ts)
search = (np.isinf(ts) | np.isnan(ts))
ts[search] = 0
return ts
def is_nan_inf(val):
"""
Helper function to determine if a given value is NaN or infinite.
Parameters
----------
val : obj
The value to test against.
Returns
-------
boolean - True or False respectively.
"""
return np.isnan(val) or np.isinf(val)
def is_not_nan_inf(val):
"""
Helper function to determine if a given value is not NaN or infinite.
Parameters
----------
val : obj
The value to test against.
Returns
-------
boolean - True or False respectively.
"""
not_nan = not np.isnan(val)
not_inf = not np.isinf(val)
return not_nan and not_inf
def nan_inf_indices(a):
"""
Helper method to obtain the nan/inf indices of an array.
Parameters
----------
a : array_like
The array to test.
Returns
-------
Masked array of indices containing nan/inf.
"""
return (np.isnan(a)) | (np.isinf(a))
def not_nan_inf_indices(a):
"""
Helper method to obtain the non-nan/inf indices of an array.
Parameters
----------
a : array_like
The array to test.
Returns
-------
Masked array of indices containing not nan/inf.
"""
return (~nan_inf_indices(a))
def rolling_window(a, window):
"""
Provides a rolling window on a numpy array given an array and window size.
Parameters
----------
a : array_like
The array to create a rolling window on.
window : int
The window size.
Returns
-------
Strided array for computation.
"""
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def moving_average(a, window=3):
"""
Computes the moving average over an array given a window size.
Parameters
----------
a : array_like
The array to compute the moving average on.
window : int
The window size.
Returns
-------
The moving average over the array.
"""
return np.mean(rolling_window(a, window), -1)
def moving_std(a, window=3):
"""
Computes the moving std. over an array given a window size.
Parameters
----------
a : array_like
The array to compute the moving std. on.
window : int
The window size.
Returns
-------
The moving std. over the array.
"""
return np.std(rolling_window(a, window), -1)
def moving_avg_std(a, window=3):
"""
Computes the moving avg and std. over an array given a window size.
Parameters
----------
a : array_like
The array to compute the moving std. on.
window : int
The window size.
Returns
-------
The moving avg and std. over the array as a tuple.
(avg, std)
"""
a = a.astype('d')
mu, sig = cycore.moving_avg_std(a, window)
return (np.asarray(mu), np.asarray(sig))
def moving_min(a, window=3):
"""
Computes the moving minimum over an array given a window size.
Parameters
----------
a : array_like
The array to compute the moving min on.
window : int
The window size.
Returns
-------
array_like :
The array of moving minimums.
"""
return np.min(rolling_window(a, window), axis=1)
def moving_max(a, window=3):
"""
Computes the moving maximum over an array given a window size.
Parameters
----------
a : array_like
The array to compute the moving max on.
window : int
The window size.
Returns
-------
array_like :
The array of moving maximums.
"""
return np.max(rolling_window(a, window), axis=1)
def moving_median(a, window=3):
"""
Computes the moving median over an array given a window size.
Parameters
----------
a : array_like
The array to compute the moving median on.
window : int
The window size.
Returns
-------
array_like :
The array of moving medians.
"""
return np.median(rolling_window(a, window), axis=1)
def fft_convolve(ts, query):
"""
Computes the sliding dot product for query over the time series using
the quicker FFT convolution approach.
Parameters
----------
ts : array_like
The time series.
query : array_like
The query.
Returns
-------
array_like - The sliding dot product.
"""
n = len(ts)
m = len(query)
x = np.fft.fft(ts)
y = np.append(np.flipud(query), np.zeros([1, n - m]))
y = np.fft.fft(y)
z = np.fft.ifft(x * y)
return np.real(z[m - 1:n])
def sliding_dot_product(ts, query):
"""
Computes the sliding dot product for query over the time series using
convolution. Note that the result is trimmed due to the computations
being invalid; the len(query) to len(ts) is kept.
Parameters
----------
ts : array_like
The time series.
query : array_like
The query.
Returns
-------
array_like - The sliding dot product.
"""
m = len(query)
n = len(ts)
dp = np.convolve(ts, np.flipud(query), mode='full')
return np.real(dp[m - 1:n])
def distance_profile(prod, ws, data_mu, data_sig, query_mu, query_sig):
"""
Computes the distance profile for the given statistics.
Parameters
----------
prod : array_like
The sliding dot product between the time series and query.
ws : int
The window size.
data_mu : array_like
The time series moving average.
data_sig : array_like
The time series moving standard deviation.
query_mu : array_like
The querys moving average.
query_sig : array_like
The querys moving standard deviation.
Returns
-------
array_like - The distance profile.
"""
distance_profile = (
2 * (ws - (prod - ws * data_mu * query_mu) / (data_sig * query_sig))
)
with np.errstate(divide='ignore', invalid='ignore'):
distance_profile = np.sqrt(np.real(distance_profile))
return distance_profile
def precheck_series_and_query_1d(ts, query):
"""
Helper function to ensure we have 1d time series and query.
Parameters
----------
ts : array_like
The array to create a rolling window on.
query : array_like
The query.
Returns
-------
(np.array, np.array) - The ts and query respectively.
Raises
------
ValueError
If ts is not a list or np.array.
If query is not a list or np.array.
If ts or query is not one dimensional.
"""
try:
ts = to_np_array(ts)
except ValueError:
raise ValueError('Invalid ts value given. Must be array_like!')
try:
query = to_np_array(query)
except ValueError:
raise ValueError('Invalid query value given. Must be array_like!')
if not is_one_dimensional(ts):
raise ValueError('ts must be one dimensional!')
if not is_one_dimensional(query):
raise ValueError('query must be one dimensional!')
return (ts, query)
def valid_n_jobs(n_jobs):
"""
Validates and assigns correct number of cpu cores.
Parameters
----------
n_jobs : int
Number of desired cpu cores.
Returns
-------
Valid number of cpu cores.
"""
max_cpus = multiprocessing.cpu_count()
if n_jobs < 1:
n_jobs = max_cpus
if n_jobs > max_cpus:
n_jobs = max_cpus
return n_jobs
def generate_batch_jobs(profile_length, n_jobs):
"""
Generates start and end positions for a matrix profile length and number
of jobs.
Parameters
----------
profile_length : int
The length of the matrix profile to compute.
n_jobs : int
The number of jobs (cpu cores).
Returns
-------
Yielded start and end index for each job.
"""
batch_size = int(math.ceil(profile_length / n_jobs))
if batch_size == profile_length:
yield (0, profile_length)
else:
for i in range(n_jobs):
start = i * batch_size
end = (i + 1) * batch_size
if end > profile_length:
end = profile_length
yield (start, end)
if end == profile_length:
break
def apply_exclusion_zone(exclusion_zone, is_join, window_size, data_length,
index, distance_profile):
if exclusion_zone > 0 and not is_join:
ez_start = np.max([0, index - exclusion_zone])
ez_end = np.min([data_length - window_size + 1, index + exclusion_zone + 1])
distance_profile[ez_start:ez_end] = np.inf
return distance_profile
def pearson_to_euclidean(a, windows):
"""
Converts an array of Pearson metrics to Euclidean. The array and windows
should both be row-wise aligned.
Parameters
----------
a : array_like
The array of Pearson metrics.
windows : int or array_like
The window(s) used to compute the Pearson metrics.
Returns
-------
New array of same dimensions as input array, but with Euclidean distance.
"""
euc_a = np.full(a.shape, np.inf, dtype='d')
if is_one_dimensional(a):
window = windows
if is_array_like(windows):
window = windows[0]
is_inf = np.isinf(a)
euc_a = np.sqrt(2 * window * (1 - a))
else:
for window, idx in zip(windows, range(a.shape[0])):
is_inf = np.isinf(a[idx])
euc_a[idx] = np.sqrt(2 * window * (1 - a[idx]))
euc_a[idx][is_inf] = np.inf
return euc_a
def is_pearson_array(a):
"""
Helper function to determine if an array contains pearson metrics only. It
is assumed that if the min is >= 0 and max is <= 1, then it is pearson.
Parameters
----------
a : array_like
The array to test.
Returns
-------
True or false respectively.
"""
mask = ~nan_inf_indices(a)
min_val = a[mask].min()
max_val = a[mask].max()
return min_val >= 0 and max_val <= 1
def is_stats_obj(obj):
"""
Helper function to determine if the current object matches the structure
that the library enforices for Statistics.
Parameters
----------
obj : object
The object to test.
Returns
-------
bool :
True or false respectively.
"""
return isinstance(obj, dict) and obj.get('class') == 'Statistics'
def is_mp_obj(obj):
"""
Helper function to determine if the current object matches the structure
that the library enforices for MatrixProfiles.
Parameters
----------
obj : object
The object to test.
Returns
-------
bool :
True or false respectively.
"""
return isinstance(obj, dict) and obj.get('class') == 'MatrixProfile'
def is_pmp_obj(obj):
"""
Helper function to determine if the current object matches the structure
that the library enforices for Pan-MatrixProfiles.
Parameters
----------
obj : object
The object to test.
Returns
-------
bool :
True or false respectively.
"""
return isinstance(obj, dict) and obj.get('class') == 'PMP'
def is_mp_or_pmp_obj(obj):
"""
Helper function to determine if the current object matches the structure
that the library enforices for MatrixProfile and Pan-MatrixProfiles.
Parameters
----------
obj : object
The object to test.
Returns
-------
bool :
True or false respectively.
"""
return is_pmp_obj(obj) or is_mp_obj(obj) | /rtm-matrixprofile-1.1.102.tar.gz/rtm-matrixprofile-1.1.102/matrixprofile/core.py | 0.867275 | 0.432543 | core.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import numpy as np
from matrixprofile import core
def apply_av(profile, av="default", custom_av=None):
"""
Utility function that returns a MatrixProfile data structure
with a calculated annotation vector that has been applied
to correct the matrix profile.
Parameters
----------
profile : dict
A MatrixProfile structure.
av : str, Default = "default"
The type of annotation vector to apply.
custom_av : array_like, Default = None
Custom annotation vector (will only be applied if av is "custom").
Returns
-------
dict : profile
A MatrixProfile data structure with a calculated annotation vector
and a corrected matrix profile.
Raises
------
ValueError
If profile is not a MatrixProfile data structure.
If custom_av parameter is not array-like when using a custom av.
If av paramter is invalid.
If lengths of annotation vector and matrix profile are different.
If values in annotation vector are outside [0.0, 1.0].
"""
if not core.is_mp_obj(profile):
raise ValueError('apply_av expects profile as an MP data structure')
temp_av = None
av_type = None
if av == "default":
temp_av = make_default_av(profile['data']['ts'], profile['w'])
av_type = av
elif av == "complexity":
temp_av = make_complexity_av(profile['data']['ts'], profile['w'])
av_type = av
elif av == "meanstd":
temp_av = make_meanstd_av(profile['data']['ts'], profile['w'])
av_type = av
elif av == "clipping":
temp_av = make_clipping_av(profile['data']['ts'], profile['w'])
av_type = av
elif av == "custom":
try:
temp_av = core.to_np_array(custom_av)
except ValueError:
raise ValueError('apply_av expects custom_av to be array-like')
av_type = av
else:
raise ValueError("av parameter is invalid")
if len(temp_av) != len(profile['mp']):
raise ValueError("Lengths of annotation vector and mp are different")
if (temp_av < 0.0).any() or (temp_av > 1.0).any():
raise ValueError("Annotation vector values must be between 0 and 1")
max_val = np.max(profile['mp'])
temp_cmp = profile['mp'] + (np.ones(len(temp_av)) - temp_av) * max_val
profile['cmp'] = temp_cmp
profile['av'] = temp_av
profile['av_type'] = av_type
return profile
def make_default_av(ts, window):
"""
Utility function that returns an annotation vector filled with 1s
(should not change the matrix profile).
Parameters
----------
ts : array_like
The time series.
window : int
The specific window size used to compute the MatrixProfile.
Returns
-------
np.array : av
An annotation vector.
Raises
------
ValueError
If ts is not a list or np.array.
If ts is not one-dimensional.
If window is not an integer.
"""
try:
ts = core.to_np_array(ts)
except ValueError:
raise ValueError('make_default_av expects ts to be array-like')
if not core.is_one_dimensional(ts):
raise ValueError('make_default_av expects ts to be one-dimensional')
if not isinstance(window, int):
raise ValueError('make_default_av expects window to be an integer')
av = np.ones(len(ts) - window + 1)
return av
def make_complexity_av(ts, window):
"""
Utility function that returns an annotation vector where values are based
on the complexity estimation of the signal.
Parameters
----------
ts : array_like
The time series.
window : int
The specific window size used to compute the MatrixProfile.
Returns
-------
np.array : av
An annotation vector.
Raises
------
ValueError
If ts is not a list or np.array.
If ts is not one-dimensional.
If window is not an integer.
"""
try:
ts = core.to_np_array(ts)
except ValueError:
raise ValueError('make_complexity_av expects ts to be array-like')
if not core.is_one_dimensional(ts):
raise ValueError('make_complexity_av expects ts to be one-dimensional')
if not isinstance(window, int):
raise ValueError('make_complexity_av expects window to be an integer')
av = np.zeros(len(ts) - window + 1)
for i in range(len(av)):
ce = np.sum(np.diff(ts[i: i + window]) ** 2)
av[i] = np.sqrt(ce)
max_val, min_val = np.max(av), np.min(av)
if max_val == 0:
av = np.zeros(len(av))
else:
av = (av - min_val) / max_val
return av
def make_meanstd_av(ts, window):
"""
Utility function that returns an annotation vector where values are set to
1 if the standard deviation is less than the mean of standard deviation.
Otherwise, the values are set to 0.
Parameters
----------
ts : array_like
The time series.
window : int
The specific window size used to compute the MatrixProfile.
Returns
-------
np.array : av
An annotation vector.
Raises
------
ValueError
If ts is not a list or np.array.
If ts is not one-dimensional.
If window is not an integer.
"""
try:
ts = core.to_np_array(ts)
except ValueError:
raise ValueError('make_meanstd_av expects ts to be array-like')
if not core.is_one_dimensional(ts):
raise ValueError('make_meanstd_av expects ts to be one-dimensional')
if not isinstance(window, int):
raise ValueError('make_meanstd_av expects window to be an integer')
av = np.zeros(len(ts) - window + 1)
std = core.moving_std(ts, window)
mu = np.mean(std)
for i in range(len(av)):
if std[i] < mu:
av[i] = 1
return av
def make_clipping_av(ts, window):
"""
Utility function that returns an annotation vector such that
subsequences that have more clipping have less importance.
Parameters
----------
ts : array_like
The time series.
window : int
The specific window size used to compute the MatrixProfile.
Returns
-------
np.array : av
An annotation vector.
Raises
------
ValueError
If ts is not a list or np.array.
If ts is not one-dimensional.
If window is not an integer.
"""
try:
ts = core.to_np_array(ts)
except ValueError:
raise ValueError('make_clipping_av expects ts to be array-like')
if not core.is_one_dimensional(ts):
raise ValueError('make_clipping_av expects ts to be one-dimensional')
if not isinstance(window, int):
raise ValueError('make_clipping_av expects window to be an integer')
av = np.zeros(len(ts) - window + 1)
max_val, min_val = np.max(ts), np.min(ts)
for i in range(len(av)):
num_clip = 0.0
for j in range(window):
if ts[i + j] == max_val or ts[i + j] == min_val:
num_clip += 1
av[i] = num_clip
min_val = np.min(av)
av -= min_val
max_val = np.max(av)
if max_val == 0:
av = np.zeros(len(av))
else:
av = 1 - av / max_val
return av | /rtm-matrixprofile-1.1.102.tar.gz/rtm-matrixprofile-1.1.102/matrixprofile/transform.py | 0.904955 | 0.494812 | transform.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import numpy as np
from matrixprofile import core
def empty_mp():
"""
Utility function that provides an empty MatrixProfile data structure.
Returns
-------
dict : profile
An empty MatrixProfile data structure.
"""
return {
'mp': None,
'pi': None,
'rmp': None,
'rpi': None,
'lmp': None,
'lpi': None,
'metric': None,
'w': None,
'ez': None,
'join': None,
'data': {
'ts': None,
'query': None
},
'class': 'MatrixProfile',
'algorithm': None
}
def pick_mp(profile, window):
"""
Utility function that extracts a MatrixProfile from a Pan-MatrixProfile
placing it into the MatrixProfile data structure.
Parameters
----------
profile : dict
A Pan-MatrixProfile data structure.
window : int
The specific window size used to compute the desired MatrixProfile.
Returns
-------
dict : profile
A MatrixProfile data structure.
Raises
------
ValueError
If profile is not a Pan-MatrixProfile data structure.
If window is not an integer.
If desired MatrixProfile is not found based on window.
"""
if not core.is_pmp_obj(profile):
raise ValueError('pluck_mp expects profile as a PMP data structure!')
if not isinstance(window, int):
raise ValueError('pluck_mp expects window to be an int!')
mp_profile = empty_mp()
# find the window index
windows = profile.get('windows')
window_index = np.argwhere(windows == window)
if len(window_index) < 1:
raise RuntimeError('Unable to find window {} in the provided PMP!'.format(window))
window_index = window_index.flatten()[0]
window = windows[window_index]
mp = profile['pmp'][window_index]
n = len(mp)
mp_profile['mp'] = mp[0:n-window+1]
mp_profile['pi'] = profile['pmpi'][window_index][0:n-window+1]
mp_profile['metric'] = profile['metric']
mp_profile['data']['ts'] = profile['data']['ts']
mp_profile['join'] = False
mp_profile['w'] = int(window)
mp_profile['ez'] = int(np.floor(windows[window_index] / 4))
mp_profile['algorithm'] = 'mpx'
return mp_profile | /rtm-matrixprofile-1.1.102.tar.gz/rtm-matrixprofile-1.1.102/matrixprofile/utils.py | 0.898522 | 0.23819 | utils.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
# Third-party imports
import numpy as np
# Project imports
from matrixprofile import core
def validate_preprocess_kwargs(preprocessing_kwargs):
"""
Tests the arguments of preprocess function and raises errors for invalid arguments.
Parameters
----------
preprocessing_kwargs : dict-like or None or False
A dictionary object to store keyword arguments for the preprocess function.
It can also be None/False/{}/"".
Returns
-------
valid_kwargs : dict-like or None
The valid keyword arguments for the preprocess function.
Returns None if the input preprocessing_kwargs is None/False/{}/"".
Raises
------
ValueError
If preprocessing_kwargs is not dict-like or None.
If gets invalid key(s) for preprocessing_kwargs.
If gets invalid value(s) for preprocessing_kwargs['window'], preprocessing_kwargs['impute_method']
preprocessing_kwargs['impute_direction'] and preprocessing_kwargs['add_noise'].
"""
if preprocessing_kwargs:
valid_preprocessing_kwargs_keys = {'window', 'impute_method', 'impute_direction', 'add_noise'}
if not isinstance(preprocessing_kwargs,dict):
raise ValueError("The parameter 'preprocessing_kwargs' is not dict like!")
elif set(preprocessing_kwargs.keys()).issubset(valid_preprocessing_kwargs_keys):
window = 4
impute_method = 'mean'
impute_direction = 'forward'
add_noise = True
methods = ['mean', 'median', 'min', 'max']
directions = ['forward', 'fwd', 'f', 'backward', 'bwd', 'b']
if 'window' in preprocessing_kwargs.keys():
if not isinstance(preprocessing_kwargs['window'],int):
raise ValueError("The value for preprocessing_kwargs['window'] is not an integer!")
window = preprocessing_kwargs['window']
if 'impute_method' in preprocessing_kwargs.keys():
if preprocessing_kwargs['impute_method'] not in methods:
raise ValueError('invalid imputation method! valid include options: ' + ', '.join(methods))
impute_method = preprocessing_kwargs['impute_method']
if 'impute_direction' in preprocessing_kwargs.keys():
if preprocessing_kwargs['impute_direction'] not in directions:
raise ValueError('invalid imputation direction! valid include options: ' + ', '.join(directions))
impute_direction = preprocessing_kwargs['impute_direction']
if 'add_noise' in preprocessing_kwargs.keys():
if not isinstance(preprocessing_kwargs['add_noise'],bool):
raise ValueError("The value for preprocessing_kwargs['add_noise'] is not a boolean value!")
add_noise = preprocessing_kwargs['add_noise']
valid_kwargs = { 'window': window,
'impute_method': impute_method,
'impute_direction': impute_direction,
'add_noise': add_noise }
else:
raise ValueError('invalid key(s) for preprocessing_kwargs! '
'valid key(s) should include '+ str(valid_preprocessing_kwargs_keys))
else:
valid_kwargs = None
return valid_kwargs
def is_subsequence_constant(subsequence):
"""
Determines whether the given time series subsequence is an array of constants.
Parameters
----------
subsequence : array_like
The time series subsequence to analyze.
Returns
-------
is_constant : bool
A boolean value indicating whether the given subsequence is an array of constants.
"""
if not core.is_array_like(subsequence):
raise ValueError('subsequence is not array like!')
temp = core.to_np_array(subsequence)
is_constant = np.all(temp == temp[0])
return is_constant
def add_noise_to_series(series):
"""
Adds noise to the given time series.
Parameters
----------
series : array_like
The time series subsequence to be added noise.
Returns
-------
temp : array_like
The time series subsequence after being added noise.
"""
if not core.is_array_like(series):
raise ValueError('series is not array like!')
temp = np.copy(core.to_np_array(series))
noise = np.random.uniform(0, 0.0000009, size=len(temp))
temp = temp + noise
return temp
def impute_missing(ts, window, method='mean', direction='forward'):
"""
Imputes missing data in time series.
Parameters
----------
ts : array_like
The time series to be handled.
window : int
The window size to compute the mean/median/minimum value/maximum
value.
method : string, Default = 'mean'
A string indicating the data imputation method, which should be
'mean', 'median', 'min' or 'max'.
direction : string, Default = 'forward'
A string indicating the data imputation direction, which should be
'forward', 'fwd', 'f', 'backward', 'bwd', 'b'. If the direction is
forward, we use previous data for imputation; if the direction is
backward, we use subsequent data for imputation.
Returns
-------
temp : array_like
The time series after being imputed missing data.
"""
method_map = {
'mean': np.mean,
'median': np.median,
'min': np.min,
'max': np.max
}
directions = ['forward', 'fwd', 'f', 'backward', 'bwd', 'b']
if not core.is_array_like(ts):
raise ValueError('ts is not array like!')
if method not in method_map:
raise ValueError('invalid imputation method! valid include options: {}'.format(', '.join(method_map.keys())))
if direction not in directions:
raise ValueError('invalid imputation direction! valid include options: ' + ', '.join(directions))
if not isinstance(window, int):
raise ValueError("window is not an integer!")
temp = np.copy(core.to_np_array(ts))
nan_infs = core.nan_inf_indices(temp)
func = method_map[method]
# Deal with missing data at the beginning and end of time series
if np.isnan(temp[0]) or np.isinf(temp[0]):
temp[0] = temp[~nan_infs][0]
nan_infs = core.nan_inf_indices(temp)
if np.isnan(temp[-1]) or np.isinf(temp[-1]):
temp[-1] = temp[~nan_infs][-1]
nan_infs = core.nan_inf_indices(temp)
index_order = None
if direction.startswith('f'):
# Use previous data for imputation / fills in data in a forward direction
index_order = range(len(temp) - window + 1)
elif direction.startswith('b'):
# Use subsequent data for imputation / fills in data in a backward direction
index_order = range(len(temp) - window + 1, 0, -1)
for index in index_order:
start = index
end = index + window
has_missing = np.any(nan_infs[index:index + window])
if has_missing:
subseq = temp[start:end]
nan_infs_subseq = nan_infs[start:end]
stat = func(temp[start:end][~nan_infs_subseq])
temp[start:end][nan_infs_subseq] = stat
# Update nan_infs after array 'temp' is changed
nan_infs = core.nan_inf_indices(temp)
return temp
def preprocess(ts, window, impute_method='mean', impute_direction='forward', add_noise=True):
"""
Preprocesses the given time series by adding noise and imputing missing data.
Parameters
----------
ts : array_like
The time series to be preprocessed.
window : int
The window size to compute the mean/median/minimum value/maximum
value.
method : string, Default = 'mean'
A string indicating the data imputation method, which should be
'mean', 'median', 'min' or 'max'.
direction : string, Default = 'forward'
A string indicating the data imputation direction, which should be
'forward', 'fwd', 'f', 'backward', 'bwd', 'b'. If the direction is
forward, we use previous data for imputation; if the direction is
backward, we use subsequent data for imputation.
add_noise : bool, Default = True
A boolean value indicating whether noise needs to be added into the time series.
Returns
-------
temp : array_like
The time series after being preprocessed.
"""
if not core.is_array_like(ts):
raise ValueError('ts is not array like!')
temp = np.copy(core.to_np_array(ts))
# impute missing
temp = impute_missing(temp, window, method=impute_method, direction=impute_direction)
# handle constant values
if add_noise:
for index in range(len(temp) - window + 1):
start = index
end = index + window
subseq = temp[start:end]
if is_subsequence_constant(subseq):
temp[start:end] = add_noise_to_series(subseq)
return temp | /rtm-matrixprofile-1.1.102.tar.gz/rtm-matrixprofile-1.1.102/matrixprofile/preprocess.py | 0.817793 | 0.29853 | preprocess.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import numpy as np
from matrixprofile import core
def idealized_arc_curve(width, index):
"""
Returns the value at x for the parabola of width n and height n / 2.
Formula taken from https://www.desmos.com/calculator/awtnrxh6rk.
Parameters
----------
width : int
Length of the time series to calculate the parabola for.
index : int
location to compute the parabola value at.
Returns
-------
float : y
The value at index for the parabola.
"""
height = width / 2
c = width / 2
b = height
a = height / (width / 2) ** 2
y = -(a * (index - c) ** 2) + b
return y
def fluss(profile):
"""
Computes the corrected arc curve (CAC) for the MatrixProfile index. This
algorithm is provides Fast Low-cost Unipotent Semantic Segmantation.
Parameters
----------
profile : dict
Data structure from a MatrixProfile algorithm.
Returns
-------
array_like : corrected_arc_curve
The corrected arc curve for the profile.
"""
if not core.is_mp_obj(profile):
raise ValueError('profile must be a MatrixProfile structure')
mpi = profile.get('pi')
w = profile.get('w')
n = len(mpi)
nnmark = np.zeros(n)
# find the number of additional arcs starting to cross over each index
for i in range(n):
mpi_val = mpi[i]
small = int(min(i, mpi_val))
large = int(max(i, mpi_val))
nnmark[small + 1] = nnmark[small + 1] + 1
nnmark[large] = nnmark[large] - 1
# cumulatively sum all crossing arcs at each index
cross_count = np.cumsum(nnmark)
# compute ideal arc curve for all indices
idealized = np.apply_along_axis(lambda i: idealized_arc_curve(n, i), 0, np.arange(0, n))
idealized = cross_count / idealized
# correct the arc curve so that it is between 0 and 1
idealized[idealized > 1] = 1
corrected_arc_curve = idealized
# correct the head and tail with the window size
corrected_arc_curve[:w] = 1
corrected_arc_curve[-w:] = 1
return corrected_arc_curve
def extract_regimes(profile, num_regimes=3):
"""
Given a MatrixProfile, compute the corrected arc curve and extract
the desired number of regimes. Regimes are computed with an exclusion
zone of 5 * window size per the authors.
The author states:
This exclusion zone is based on an assumption that regimes will have
multiple repetitions; FLUSS is not able to segment single gesture
patterns.
Parameters
----------
profile : dict
Data structure from a MatrixProfile algorithm.
num_regimes : int
The desired number of regimes to find.
Returns
-------
dict : profile
The original MatrixProfile object with additional keys containing.
>>> {
>>> 'cac': The corrected arc curve
>>> 'cac_ez': The exclusion zone used
>>> 'regimes': Array of starting indices indicating a regime.
>>> }
"""
if not core.is_mp_obj(profile):
raise ValueError('profile must be a MatrixProfile structure')
cac = profile.get('cac')
window_size = profile.get('w')
ez = window_size * 5
# compute the CAC if needed
if isinstance(cac, type(None)):
cac = fluss(profile)
profile['cac'] = cac
regimes = []
tmp = np.copy(cac)
n = len(tmp)
for _ in range(num_regimes):
min_index = np.argmin(tmp)
regimes.append(min_index)
# apply exclusion zone
ez_start = np.max([0, min_index - ez])
ez_end = np.min([n, min_index + ez])
tmp[ez_start:ez_end] = np.inf
profile['regimes'] = np.array(regimes, dtype=int)
profile['cac_ez'] = ez
return profile | /rtm-matrixprofile-1.1.102.tar.gz/rtm-matrixprofile-1.1.102/matrixprofile/algorithms/regimes.py | 0.93258 | 0.588416 | regimes.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import numpy as np
from matrixprofile import core
from matrixprofile.algorithms.mpdist import mpdist_vector
def snippets(ts, snippet_size, num_snippets=2, window_size=None):
"""
The snippets algorithm is used to summarize your time series by
identifying N number of representative subsequences. If you want to
identify typical patterns in your time series, then this is the algorithm
to use.
Parameters
----------
ts : array_like
The time series.
snippet_size : int
The size of snippet desired.
num_snippets : int, Default 2
The number of snippets you would like to find.
window_size : int, Default (snippet_size / 2)
The window size.
Returns
-------
list : snippets
A list of snippets as dictionary objects with the following structure.
>>> {
>>> index: the index of the snippet,
>>> snippet: the snippet values,
>>> neighbors: the starting indices of all subsequences similar to the current snippet
>>> fraction: fraction of the snippet
>>> }
"""
ts = core.to_np_array(ts).astype('d')
time_series_len = len(ts)
n = len(ts)
if not isinstance(snippet_size, int) or snippet_size < 4:
raise ValueError('snippet_size must be an integer >= 4')
if n < (2 * snippet_size):
raise ValueError('Time series is too short relative to snippet length')
if not window_size:
window_size = int(np.floor(snippet_size / 2))
if window_size >= snippet_size:
raise ValueError('window_size must be smaller than snippet_size')
# pad end of time series with zeros
num_zeros = int(snippet_size * np.ceil(n / snippet_size) - n)
ts = np.append(ts, np.zeros(num_zeros))
# compute all profiles
indices = np.arange(0, len(ts) - snippet_size, snippet_size)
distances = []
for j, i in enumerate(indices):
distance = mpdist_vector(ts, ts[i:(i + snippet_size - 1)], int(window_size))
distances.append(distance)
distances = np.array(distances)
# find N snippets
snippets = []
minis = np.inf
total_min = None
for n in range(num_snippets):
minims = np.inf
for i in range(len(indices)):
s = np.sum(np.minimum(distances[i, :], minis))
if minims > s:
minims = s
index = i
minis = np.minimum(distances[index, :], minis)
actual_index = indices[index]
snippet = ts[actual_index:actual_index + snippet_size]
snippet_distance = distances[index]
snippets.append({
'index': actual_index,
'snippet': snippet,
'distance': snippet_distance
})
if isinstance(total_min, type(None)):
total_min = snippet_distance
else:
total_min = np.minimum(total_min, snippet_distance)
# compute the fraction of each snippet
for snippet in snippets:
mask = (snippet['distance'] <= total_min)
# create a key "neighbors" for the snippet dict,
# and store all the time series indices for the data represented by a snippet (arr[mask])
arr = np.arange(len(mask))
# max_index indicates the length of a profile, which is (n-m) in the Snippets paper)
max_index = time_series_len - snippet_size
# since 'ts' is padded with 0 before calculate the MPdist profile
# all parts of the profile that are out of range [0, n-m] cannot be used as neighboring snippet indices
snippet['neighbors'] = list(filter(lambda x : x <= max_index, arr[mask]))
# Add the last m time series indices into the neighboring snippet indices
if max_index in snippet['neighbors']:
last_m_indices = list(range(max_index+1, time_series_len))
snippet['neighbors'].extend(last_m_indices)
snippet['fraction'] = mask.sum() / (len(ts) - snippet_size)
total_min = total_min - mask
del snippet['distance']
return snippets | /rtm-matrixprofile-1.1.102.tar.gz/rtm-matrixprofile-1.1.102/matrixprofile/algorithms/snippets.py | 0.862945 | 0.532 | snippets.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import numpy as np
from matrixprofile import core
from matrixprofile.algorithms.mpdist import mpdist
def compute_dist(args):
"""
Helper function to parallelize pairwise distance calculation.
Parameters
----------
args : tuple
The arguments to pass to the mpdist calculation.
Returns
-------
values : tuple
The kth index and distance.
"""
k = args[0]
distance = mpdist(args[1], args[2], args[3], threshold=args[4])
return (k, distance)
def pairwise_dist(X, window_size, threshold=0.05, n_jobs=1):
"""
Utility function to compute all pairwise distances between the timeseries
using MPDist.
Note
----
scipy.spatial.distance.pdist cannot be used because they
do not allow for jagged arrays, however their code was used as a reference
in creating this function.
https://github.com/scipy/scipy/blob/master/scipy/spatial/distance.py#L2039
Parameters
----------
X : array_like
An array_like object containing time series to compute distances for.
window_size : int
The window size to use in computing the MPDist.
threshold : float
The threshold used to compute MPDist.
n_jobs : int
Number of CPU cores to use during computation.
Returns
-------
Y : np.ndarray
Returns a condensed distance matrix Y. For
each :math:`i` and :math:`j` (where :math:`i<j<m`),where m is the
number of original observations. The metric ``dist(u=X[i], v=X[j])``
is computed and stored in entry ``ij``.
"""
if not core.is_array_like(X):
raise ValueError('X must be array_like!')
# identify shape based on iterable or np.ndarray.shape
m = 0
if isinstance(X, np.ndarray) and len(X.shape) == 2:
m = X.shape[0]
else:
m = len(X)
dm = np.empty((m * (m - 1)) // 2, dtype=np.double)
k = 0
if n_jobs == 1:
for i in range(0, m - 1):
for j in range(i + 1, m):
dm[k] = mpdist(X[i], X[j], window_size, threshold=threshold,
n_jobs=n_jobs)
k = k + 1
else:
args = []
for i in range(0, m - 1):
for j in range(i + 1, m):
args.append((k, X[i], X[j], window_size, threshold))
k = k + 1
with core.mp_pool()(n_jobs) as pool:
results = pool.map(compute_dist, args)
# put results in the matrix
for result in results:
dm[result[0]] = result[1]
return dm | /rtm-matrixprofile-1.1.102.tar.gz/rtm-matrixprofile-1.1.102/matrixprofile/algorithms/pairwise_dist.py | 0.933495 | 0.549278 | pairwise_dist.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import math
import numpy as np
from matrixprofile import core
from matrixprofile.algorithms.cympx import mpx_ab_parallel as cympx_ab_parallel
from matrixprofile.algorithms.mass2 import mass2
def mpdist(ts, ts_b, w, threshold=0.05, n_jobs=1):
"""
Computes the MPDist between the two series ts and ts_b. For more details
refer to the paper:
Matrix Profile XII: MPdist: A Novel Time Series Distance Measure to Allow
Data Mining in More Challenging Scenarios. Shaghayegh Gharghabi,
Shima Imani, Anthony Bagnall, Amirali Darvishzadeh, Eamonn Keogh. ICDM 2018
Parameters
----------
ts : array_like
The time series to compute the matrix profile for.
ts_b : array_like
The time series to compare against.
w : int
The window size.
threshold : float, Default 0.05
The percentile in which the distance is taken from. By default it is
set to 0.05 based on empircal research results from the paper.
Generally, you should not change this unless you know what you are
doing! This value must be a float greater than 0 and less than 1.
n_jobs : int, Default = 1
Number of cpu cores to use.
Returns
-------
float : mpdist
The MPDist.
"""
ts = core.to_np_array(ts).astype('d')
ts_b = core.to_np_array(ts_b).astype('d')
n_jobs = core.valid_n_jobs(n_jobs)
if not core.is_one_dimensional(ts):
raise ValueError('ts must be one dimensional!')
if not core.is_one_dimensional(ts_b):
raise ValueError('ts_b must be one dimensional!')
if not isinstance(threshold, float) or threshold <= 0 or threshold >= 1:
raise ValueError('threshold must be a float greater than 0 and less'\
' than 1')
mp, mpi, mpb, mpib = cympx_ab_parallel(ts, ts_b, w, 0, n_jobs)
mp_abba = np.append(mp, mpb)
data_len = len(ts) + len(ts_b)
abba_sorted = np.sort(mp_abba[~core.nan_inf_indices(mp_abba)])
distance = np.inf
if len(abba_sorted) > 0:
upper_idx = int(np.ceil(threshold * data_len)) - 1
idx = np.min([len(abba_sorted) - 1, upper_idx])
distance = abba_sorted[idx]
return distance
def mass_distance_matrix(ts, query, w):
"""
Computes a distance matrix using mass that is used in mpdist_vector
algorithm.
Parameters
----------
ts : array_like
The time series to compute the matrix for.
query : array_like
The time series to compare against.
w : int
The window size.
Returns
-------
array_like : dist_matrix
The MASS distance matrix.
"""
subseq_num = len(query) - w + 1
distances = []
for i in range(subseq_num):
distances.append(np.real(mass2(ts, query[i:i + w])))
return np.array(distances)
def calculate_mpdist(profile, threshold, data_length):
"""
Computes the MPDist given a profile, threshold and data length. This is
primarily used for MPDist Vector algorithm.
Parameters
----------
profile : array_like
The profile to calculate the mpdist for.
threshold : float
The threshold to use in computing the distance.
data_length : int
The length of the original data.
Returns
-------
float : mpdist
The MPDist.
"""
dist_loc = int(np.ceil(threshold * data_length))
profile_sorted = np.sort(profile)
mask = core.not_nan_inf_indices(profile_sorted)
profile_clean = profile_sorted[mask]
if len(profile_clean) < 1:
distance = np.inf
elif len(profile_clean) >= dist_loc:
distance = profile_clean[dist_loc]
else:
distance = np.max(profile_clean)
return distance
def mpdist_vector(ts, ts_b, w):
"""
Computes a vector of MPDist measures.
Parameters
----------
ts : array_like
The time series to compute the matrix for.
ts_b : array_like
The time series to compare against.
w : int
The window size.
Returns
-------
array_like : mpdist_vector
The MPDist vector.
"""
matrix = mass_distance_matrix(ts, ts_b, w)
rows, cols = matrix.shape
# compute row and column minimums
all_right_hist = matrix.min(axis=0)
mass_minimums = np.apply_along_axis(core.moving_min, 1, matrix, window=rows)
# recreate the matrix profile and compute MPDist
mpdist_length = len(ts) - len(ts_b) + 1
right_hist_length = len(ts_b) - w + 1
mpdist_array = np.zeros(mpdist_length)
left_hist = np.zeros(right_hist_length)
mpdist_array = []
for i in range(mpdist_length):
right_hist = all_right_hist[i:right_hist_length + i]
left_hist = mass_minimums[:, i]
profile = np.append(left_hist, right_hist)
mpdist_array.append(calculate_mpdist(profile, 0.05, 2 * len(ts_b)))
return np.array(mpdist_array) | /rtm-matrixprofile-1.1.102.tar.gz/rtm-matrixprofile-1.1.102/matrixprofile/algorithms/mpdist.py | 0.91554 | 0.57332 | mpdist.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
from scipy.cluster.hierarchy import linkage, inconsistent, fcluster
from scipy.cluster.hierarchy import cophenet
from matrixprofile import core
from matrixprofile.algorithms.pairwise_dist import pairwise_dist
def hierarchical_clusters(X, window_size, t, threshold=0.05, method='single',
depth=2, criterion='distance', n_jobs=1):
"""
Cluster M time series into hierarchical clusters using agglomerative
approach. This function is more or less a convenience wrapper around
SciPy's scipy.cluster.hierarchy functions, but uses the MPDist algorithm
to compute distances between each pair of time series.
Note
----
Memory usage could potentially high depending on the length of your
time series and how many distances are computed!
Parameters
----------
X : array_like
An M x N matrix where M is the time series and N is the observations at
a given time.
window_size : int
The window size used to compute the MPDist.
t : scalar
For criteria 'inconsistent', 'distance' or 'monocrit', this is the
threshold to apply when forming flat clusters.
For 'maxclust' criteria, this would be max number of clusters
requested.
threshold : float, Default 0.05
The percentile in which the MPDist is taken from. By default it is
set to 0.05 based on empircal research results from the paper.
Generally, you should not change this unless you know what you are
doing! This value must be a float greater than 0 and less than 1.
method : str, Default single
The linkage algorithm to use.
Options: {single, complete, average, weighted}
depth : int, Default 2
A non-negative value more than 0 to specify the number of levels below
a non-singleton cluster to allow.
criterion : str, Default distance
Options: {inconsistent, distance, maxclust, monocrit}
The criterion to use in forming flat clusters.
``inconsistent`` :
If a cluster node and all its
descendants have an inconsistent value less than or equal
to `t`, then all its leaf descendants belong to the
same flat cluster. When no non-singleton cluster meets
this criterion, every node is assigned to its own
cluster. (Default)
``distance`` :
Forms flat clusters so that the original
observations in each flat cluster have no greater a
cophenetic distance than `t`.
``maxclust`` :
Finds a minimum threshold ``r`` so that
the cophenetic distance between any two original
observations in the same flat cluster is no more than
``r`` and no more than `t` flat clusters are formed.
``monocrit`` :
Forms a flat cluster from a cluster node c
with index i when ``monocrit[j] <= t``.
For example, to threshold on the maximum mean distance
as computed in the inconsistency matrix R with a
threshold of 0.8 do::
MR = maxRstat(Z, R, 3)
cluster(Z, t=0.8, criterion='monocrit', monocrit=MR)
n_jobs : int, Default 1
The number of cpu cores used to compute the MPDist.
Returns
-------
clusters : dict
Clustering statistics, distances and labels.
>>> {
>>> pairwise_distances: MPDist between pairs of time series as
>>> np.ndarray,
>>> linkage_matrix: clustering linkage matrix as np.ndarray,
>>> inconsistency_statistics: inconsistency stats as np.ndarray,
>>> assignments: cluster label associated with input X location as
>>> np.ndarray,
>>> cophenet: float the cophenet statistic,
>>> cophenet_distances: cophenet distances between pairs of time
>>> series as np.ndarray
>>> class: hclusters
>>> }
"""
# valid SciPy clustering options to work with custom distance metric
valid_methods = set(['single', 'complete', 'average', 'weighted'])
valid_criterions = set([
'inconsistent', 'distance', 'monocrit', 'maxclust'
])
method = method.lower()
criterion = criterion.lower()
# error handling
if not core.is_array_like(X):
raise ValueError('X must be array like!')
if not isinstance(t, (float, int)):
raise ValueError('t must be a scalar (int or float)')
if not isinstance(threshold, float) or threshold <= 0 or threshold >= 1:
raise ValueError('threshold must be a float greater than 0 and less'\
' than 1')
if not isinstance(depth, int) or depth < 1:
raise ValueError('depth must be an integer greater than 0')
if method not in valid_methods:
opts_str = ', '.join(valid_methods)
raise ValueError('method may only be one of: ' + opts_str)
if criterion not in valid_criterions:
opts_str = ', '.join(valid_criterions)
raise ValueError('criterion may only be one of: ' + opts_str)
Y = pairwise_dist(X, window_size, threshold=threshold, n_jobs=n_jobs)
Z = linkage(Y, method=method)
R = inconsistent(Z, d=depth)
c, coph_dists = cophenet(Z, Y)
T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t)
return {
'pairwise_distances': Y,
'linkage_matrix': Z,
'inconsistency_statistics': R,
'assignments': T,
'cophenet': c,
'cophenet_distances': coph_dists,
'class': 'hclusters'
} | /rtm-matrixprofile-1.1.102.tar.gz/rtm-matrixprofile-1.1.102/matrixprofile/algorithms/hierarchical_clustering.py | 0.942586 | 0.574634 | hierarchical_clustering.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import math
import numpy as np
from matrixprofile import core
from matrixprofile.algorithms.cympx import mpx_parallel as cympx_parallel
from matrixprofile.algorithms.cympx import mpx_ab_parallel as cympx_ab_parallel
def mpx(ts, w, query=None, cross_correlation=False, n_jobs=1):
"""
The MPX algorithm computes the matrix profile without using the FFT.
Parameters
----------
ts : array_like
The time series to compute the matrix profile for.
w : int
The window size.
query : array_like
Optionally a query series.
cross_correlation : bool, Default=False
Determine if cross_correlation distance should be returned. It defaults
to Euclidean Distance.
n_jobs : int, Default = 1
Number of cpu cores to use.
Returns
-------
dict : profile
A MatrixProfile data structure.
>>> {
>>> 'mp': The matrix profile,
>>> 'pi': The matrix profile 1NN indices,
>>> 'rmp': The right matrix profile,
>>> 'rpi': The right matrix profile 1NN indices,
>>> 'lmp': The left matrix profile,
>>> 'lpi': The left matrix profile 1NN indices,
>>> 'metric': The distance metric computed for the mp,
>>> 'w': The window size used to compute the matrix profile,
>>> 'ez': The exclusion zone used,
>>> 'join': Flag indicating if a similarity join was computed,
>>> 'sample_pct': Percentage of samples used in computing the MP,
>>> 'data': {
>>> 'ts': Time series data,
>>> 'query': Query data if supplied
>>> }
>>> 'class': "MatrixProfile"
>>> 'algorithm': "mpx"
>>> }
"""
ts = core.to_np_array(ts).astype('d')
n_jobs = core.valid_n_jobs(n_jobs)
is_join = False
if core.is_array_like(query):
query = core.to_np_array(query).astype('d')
is_join = True
mp, mpi, mpb, mpib = cympx_ab_parallel(ts, query, w,
int(cross_correlation), n_jobs)
else:
mp, mpi = cympx_parallel(ts, w, int(cross_correlation), n_jobs)
mp = np.asarray(mp)
mpi = np.asarray(mpi)
distance_metric = 'euclidean'
if cross_correlation:
distance_metric = 'cross_correlation'
return {
'mp': mp,
'pi': mpi,
'rmp': None,
'rpi': None,
'lmp': None,
'lpi': None,
'metric': distance_metric,
'w': w,
'ez': int(np.ceil(w / 4.0)) if is_join else 0,
'join': is_join,
'sample_pct': 1,
'data': {
'ts': ts,
'query': query
},
'class': 'MatrixProfile',
'algorithm': 'mpx'
} | /rtm-matrixprofile-1.1.102.tar.gz/rtm-matrixprofile-1.1.102/matrixprofile/algorithms/mpx.py | 0.917085 | 0.410815 | mpx.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
# handle Python 2/3 Iterable import
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import math
import warnings
from matplotlib import pyplot as plt
import numpy as np
from matrixprofile import core
from matrixprofile.algorithms.mpx import mpx
from matrixprofile.exceptions import NoSolutionPossible
def split(lower_bound, upper_bound, middle):
"""
Helper function to split the indices for BFS.
"""
if lower_bound == middle:
L = None
R = [middle + 1, upper_bound]
elif upper_bound == middle:
L = [lower_bound, middle - 1]
R = None
else:
L = [lower_bound, middle - 1]
R = [middle + 1, upper_bound]
return (L, R)
def binary_split(n):
"""
Create a breadth first search for indices 0..n.
Parameters
----------
n : int
The length of indices.
Returns
-------
array_like :
The indices to iterate to perform BFS.
"""
# having length of 1 causes infinite loop and it just doesn't really
# make much sense.
# just return it
if n < 2:
return [0,]
index = []
intervals = []
# always begin by exploring the first integer
index.append(0)
# after exploring first integer, split interval 2:n
intervals.append([1, n - 1])
while len(intervals) > 0:
interval = intervals.pop(0)
lower_bound = interval[0]
upper_bound = interval[1]
middle = int(math.floor((lower_bound + upper_bound) / 2))
index.append(middle)
if lower_bound == upper_bound:
continue
else:
L, R = split(lower_bound, upper_bound, middle)
if L is not None:
intervals.append(L)
if R is not None:
intervals.append(R)
return index
def skimp(ts, windows=None, show_progress=False, cross_correlation=False,
pmp_obj=None, sample_pct=0.1, n_jobs=1):
"""
Computes the Pan Matrix Profile (PMP) for the given time series. When the
time series is only passed, windows start from 8 and increase by increments
of 2 up to length(ts) / 2. Also, the PMP is only computed using 10% of the
windows unless sample_pct is set to a different value.
Note
----
When windows is explicitly provided, sample_pct no longer takes affect. The
MP for all windows provided will be computed.
Parameters
----------
ts : array_like
The time series.
show_progress: bool, default = False
Show the progress in percent complete in increments of 5% by printing
it out to the console.
cross_correlation : bool, default = False
Return the MP values as Pearson Correlation instead of Euclidean
distance.
pmp_obj : dict, default = None
Repurpose already computed window sizes with this provided PMP. It
should be the output of a PMP algorithm such as skimp or maximum
subsequence.
sample_pct : float, default = 0.1 (10%)
Number of window sizes to compute MPs for. Decimal percent between
0 and 1.
n_jobs : int, Default = 1
Number of cpu cores to use.
Returns
-------
dict : profile
A Pan-MatrixProfile data structure.
>>> {
>>> 'pmp': the pan matrix profile as a 2D array,
>>> 'pmpi': the pmp indices,
>>> 'data': {
>>> 'ts': time series used,
>>> },
>>> 'windows': the windows used to compute the pmp,
>>> 'sample_pct': the sample percent used,
>>> 'metric':The distance metric computed for the pmp,
>>> 'algorithm': the algorithm used,
>>> 'class': PMP
>>> }
Raises
------
ValueError :
1. ts is not array_like.
2. windows is not an iterable
3. show_progress is not a boolean.
4. cross_correlation is not a boolean.
5. sample_pct is not between 0 and 1.
"""
ts = core.to_np_array(ts)
n = len(ts)
# Argument validation
if isinstance(windows, type(None)):
start = 8
end = int(math.floor(len(ts) / 2))
windows = range(start, end + 1)
if not isinstance(show_progress, bool):
raise ValueError('show_progress must be a boolean!')
if not isinstance(cross_correlation, bool):
raise ValueError('cross_correlation must be a boolean!')
if not isinstance(sample_pct, (int, float)) or sample_pct > 1 or sample_pct < 0:
raise ValueError('sample_pct must be a decimal between 0 and 1')
# create a breath first search index list of our window sizes
split_index = binary_split(len(windows))
pmp = np.full((len(split_index), n), np.inf)
pmpi = np.full((len(split_index), n), np.nan, dtype='int')
idx = np.full(len(split_index), -1)
# compute the sample pct index
last_index = len(split_index)
if sample_pct < 1:
last_index = int(np.floor(len(split_index) * sample_pct))
last_index = np.minimum(len(split_index), last_index)
pct_shown = {}
# compute all matrix profiles for each window size
for i in range(last_index):
window_size = windows[split_index[i]]
# check if we already computed this MP given a passed in PMP
if isinstance(pmp_obj, dict):
cw = pmp_obj.get('windows', None)
w_idx = np.argwhere(cw == window_size)
# having the window provided, we simply copy over the data instead
# of recomputing it
if len(w_idx) == 1:
w_idx = w_idx[0][0]
pmp[split_index[i], :] = pmp_obj['pmp'][w_idx, :]
pmpi[split_index[i], :] = pmp_obj['pmpi'][w_idx, :]
continue
profile = mpx(ts, window_size, cross_correlation=cross_correlation,
n_jobs=n_jobs)
mp = profile.get('mp')
pi = profile.get('pi')
pmp[split_index[i], 0:len(mp)] = mp
pmpi[split_index[i], 0:len(pi)] = pi
j = split_index[i]
while j < last_index and idx[j] != j:
idx[j] = split_index[i]
j = j + 1
# output the progress
if show_progress:
pct_complete = round((i / (last_index - 1)) * 100, 2)
int_pct = math.floor(pct_complete)
if int_pct % 5 == 0 and int_pct not in pct_shown:
print('{}% complete'.format(int_pct))
pct_shown[int_pct] = 1
metric = 'euclidean'
if cross_correlation:
metric = 'pearson'
return {
'pmp': pmp,
'pmpi': pmpi,
'data': {
'ts': ts,
},
'windows': np.array(windows),
'sample_pct': sample_pct,
'metric': metric,
'algorithm': 'skimp',
'class': 'PMP'
}
def maximum_subsequence(ts, threshold=0.95, refine_stepsize=0.05, n_jobs=1,
include_pmp=False, lower_window=8):
"""
Finds the maximum subsequence length based on the threshold provided. Note
that this threshold is domain specific requiring some knowledge about the
underyling time series in question.
The subsequence length starts at 8 and iteratively doubles until the
maximum correlation coefficient is no longer met. When no solution is
possible given the threshold, a matrixprofile.exceptions.NoSolutionPossible
exception is raised.
Parameters
----------
ts : array_like
The time series to analyze.
threshold : float, Default 0.95
The correlation coefficient used as the threshold. It should be between
0 and 1.
refine_stepsize : float, Default 0.05
Used in the refinement step to find a more precise upper window. It
should be a percentage between 0.01 and 0.99.
n_jobs : int, Default = 1
Number of cpu cores to use.
include_pmp : bool, default False
Include the PanMatrixProfile for the computed windows.
lower_window : int, default 8
Lower bound of subsequence length that can be altered if required.
Returns
-------
obj :
With include_pmp=False (default)
int : The maximum subsequence length based on the threshold provided.
With include_pmp=True
dict : A dict containing the upper window, windows and pmp.
>>> {
>>> 'upper_window': The upper window,
>>> 'windows': array_like windows used to compute the pmp,
>>> 'pmp': the pan matrix profile as a 2D array,
>>> 'pmpi': the pmp indices,
>>> }
"""
windows = np.array([], dtype='int')
pearson = np.array([], dtype='d')
pmp = []
pmpi = []
ts = core.to_np_array(ts)
n = len(ts)
correlation_max = np.inf
window_size = lower_window
max_window = int(np.floor(len(ts) / 2))
def resize(mp, pi, n):
"""Helper function to resize mp and pi to be aligned with the
PMP. Also convert pearson to euclidean."""
mp = core.pearson_to_euclidean(profile['mp'], window_size)
infs = np.full(n - mp.shape[0], np.inf)
nans = np.full(n - mp.shape[0], np.nan)
mp = np.append(mp, infs)
pi = np.append(profile['pi'], nans)
return (mp, pi)
# first perform a wide search by increasing window by 2 in
# each iteration
while window_size <= max_window:
profile = mpx(ts, window_size, cross_correlation=True)
mask = ~np.isinf(profile['mp'])
correlation_max = np.max(profile['mp'][mask])
windows = np.append(windows, window_size)
pearson = np.append(pearson, correlation_max)
if include_pmp:
mp, pi = resize(profile['mp'], profile['pi'], n)
pmp.append(mp)
pmpi.append(pi)
if correlation_max < threshold:
break
window_size = window_size * 2
# find last window within threshold and throw away
# computations outside of the threshold
mask = pearson > threshold
pearson = pearson[mask]
windows = windows[mask]
if len(windows) < 1:
raise NoSolutionPossible('Given the threshold {:.2f}, no window was ' \
'found. Please try increasing your ' \
'threshold.')
window_size = windows[-1]
if include_pmp:
pmp = np.vstack(pmp)[mask]
pmpi = np.vstack(pmpi)[mask]
# refine the upper u by increase by + X% increments
test_windows = np.arange(refine_stepsize, 1, step=refine_stepsize) + 1
test_windows = np.append(test_windows, 2)
test_windows = np.floor(test_windows * window_size).astype('int')
# keep windows divisible by 2
mask = test_windows % 2 == 1
test_windows[mask] = test_windows[mask] + 1
for window_size in test_windows:
profile = mpx(ts, window_size, cross_correlation=True)
mask = ~np.isinf(profile['mp'])
correlation_max = np.max(profile['mp'][mask])
windows = np.append(windows, window_size)
pearson = np.append(pearson, correlation_max)
if include_pmp:
mp, pi = resize(profile['mp'], profile['pi'], n)
pmp = np.append(pmp, [mp,], axis=0)
pmpi = np.append(pmpi, [pi,], axis=0)
if correlation_max < threshold:
break
if include_pmp:
return {
'upper_window': window_size,
'windows': windows,
'pmp': pmp,
'pmpi': pmpi
}
return window_size | /rtm-matrixprofile-1.1.102.tar.gz/rtm-matrixprofile-1.1.102/matrixprofile/algorithms/skimp.py | 0.902596 | 0.372049 | skimp.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import numpy as np
from matrixprofile import core
from matrixprofile.algorithms.mass2 import mass2
def pmp_top_k_motifs(profile, exclusion_zone=None, k=3, max_neighbors=10, radius=3):
"""
Find the top K number of motifs (patterns) given a pan matrix profile. By
default the algorithm will find up to 3 motifs (k) and up to 10 of their
neighbors with a radius of 3 * min_dist.
Parameters
----------
profile : dict
The output from one of the pan matrix profile algorithms.
exclusion_zone : int, Default to algorithm ez
Desired number of values to exclude on both sides of the motif. This
avoids trivial matches. It defaults to half of the computed window
size. Setting the exclusion zone to 0 makes it not apply.
k : int, Default = 3
Desired number of motifs to find.
max_neighbors : int, Default = 10
The maximum number of neighbors to include for a given motif.
radius : int, Default = 3
The radius is used to associate a neighbor by checking if the
neighbor's distance is less than or equal to dist * radius
Returns
-------
profile : dict
The original input obj with the addition of the "motifs" key. The
motifs key consists of the following structure.
A list of dicts containing motif indices and their corresponding
neighbor indices. Note that each index is a (row, col) index
corresponding to the pan matrix profile.
>>> [
>>> {
>>> 'motifs': [first_index, second_index],
>>> 'neighbors': [index, index, index ...max_neighbors]
>>> }
>>> ]
"""
if not core.is_pmp_obj(profile):
raise ValueError('Expecting PMP data structure!')
data = profile.get('data', None)
ts = data.get('ts', None)
data_len = len(ts)
pmp = profile.get('pmp', None)
profile_len = pmp.shape[1]
pmpi = profile.get('pmpi', None)
windows = profile.get('windows', None)
# make sure we are working with Euclidean distances
tmp = None
if core.is_pearson_array(pmp):
tmp = core.pearson_to_euclidean(pmp, windows)
else:
tmp = np.copy(pmp).astype('d')
# replace nan and infs with infinity
tmp[core.nan_inf_indices(tmp)] = np.inf
motifs = []
for _ in range(k):
min_idx = np.unravel_index(np.argmin(tmp), tmp.shape)
min_dist = tmp[min_idx]
# nothing else to find...
if core.is_nan_inf(min_dist):
break
# create the motif pair
min_row_idx = min_idx[0]
min_col_idx = min_idx[1]
# motif pairs are respective to the column of the matching row
first_idx = np.min([min_col_idx, pmpi[min_row_idx][min_col_idx]])
second_idx = np.max([min_col_idx, pmpi[min_row_idx][min_col_idx]])
# compute distance profile for first appearance
window_size = windows[min_row_idx]
query = ts[first_idx:first_idx + window_size]
distance_profile = mass2(ts, query)
# extend the distance profile to be as long as the original
infs = np.full(profile_len - len(distance_profile), np.inf)
distance_profile = np.append(distance_profile, infs)
# exclude already picked motifs and neighbors
mask = core.nan_inf_indices(pmp[min_row_idx])
distance_profile[mask] = np.inf
# determine the exclusion zone if not set
if not exclusion_zone:
exclusion_zone = int(np.floor(window_size / 2))
# apply exclusion zone for motif pair
for j in (first_idx, second_idx):
distance_profile = core.apply_exclusion_zone(
exclusion_zone,
False,
window_size,
data_len,
j,
distance_profile
)
tmp2 = core.apply_exclusion_zone(
exclusion_zone,
False,
window_size,
data_len,
j,
tmp[min_row_idx]
)
tmp[min_row_idx] = tmp2
# find up to max_neighbors
neighbors = []
for j in range(max_neighbors):
neighbor_idx = np.argmin(distance_profile)
neighbor_dist = np.real(distance_profile[neighbor_idx])
not_in_radius = not ((radius * min_dist) >= neighbor_dist)
# no more neighbors exist based on radius
if core.is_nan_inf(neighbor_dist) or not_in_radius:
break
# add neighbor and apply exclusion zone
neighbors.append((min_row_idx, neighbor_idx))
distance_profile = core.apply_exclusion_zone(
exclusion_zone,
False,
window_size,
data_len,
neighbor_idx,
distance_profile
)
tmp2 = core.apply_exclusion_zone(
exclusion_zone,
False,
window_size,
data_len,
neighbor_idx,
tmp[min_row_idx]
)
tmp[min_row_idx] = tmp2
# add the motifs and neighbors
# note that they are (row, col) indices
motifs.append({
'motifs': [(min_row_idx, first_idx), (min_row_idx, second_idx)],
'neighbors': neighbors
})
profile['motifs'] = motifs
return profile
def mp_top_k_motifs(profile, exclusion_zone=None, k=3, max_neighbors=10, radius=3, use_cmp=False):
"""
Find the top K number of motifs (patterns) given a matrix profile. By
default the algorithm will find up to 3 motifs (k) and up to 10 of their
neighbors with a radius of 3 * min_dist using the regular matrix profile.
Parameters
----------
profile : dict
The output from one of the matrix profile algorithms.
exclusion_zone : int, Default to algorithm ez
Desired number of values to exclude on both sides of the motif. This
avoids trivial matches. It defaults to half of the computed window
size. Setting the exclusion zone to 0 makes it not apply.
k : int, Default = 3
Desired number of motifs to find.
max_neighbors : int, Default = 10
The maximum number of neighbors to include for a given motif.
radius : int, Default = 3
The radius is used to associate a neighbor by checking if the
neighbor's distance is less than or equal to dist * radius
use_cmp : bool, Default = False
Use the Corrected Matrix Profile to compute the motifs.
Returns
-------
dict : profile
The original input obj with the addition of the "motifs" key. The
motifs key consists of the following structure.
A list of dicts containing motif indices and their corresponding
neighbor indices.
>>> [
>>> {
>>> 'motifs': [first_index, second_index],
>>> 'neighbors': [index, index, index ...max_neighbors]
>>> }
>>> ]
"""
if not core.is_mp_obj(profile):
raise ValueError('Expecting MP data structure!')
window_size = profile['w']
data = profile.get('data', None)
if data:
ts = data.get('ts', None)
data_len = len(ts)
motifs = []
mp = np.copy(profile['mp'])
if use_cmp:
mp = np.copy(profile['cmp'])
mpi = profile['pi']
# TODO: this is based on STOMP standards when this motif finding algorithm
# originally came out. Should we default this to 4.0 instead? That seems
# to be the common value now per new research.
if exclusion_zone is None:
exclusion_zone = profile.get('ez', None)
for i in range(k):
min_idx = np.argmin(mp)
min_dist = mp[min_idx]
# we no longer have any motifs to find as all values are nan/inf
if core.is_nan_inf(min_dist):
break
# create a motif pair corresponding to the first appearance and
# second appearance
first_idx = np.min([min_idx, mpi[min_idx]])
second_idx = np.max([min_idx, mpi[min_idx]])
# compute distance profile using mass2 for first appearance
query = ts[first_idx:first_idx + window_size]
distance_profile = mass2(ts, query)
# exclude already picked motifs and neighbors
mask = core.nan_inf_indices(mp)
distance_profile[mask] = np.inf
# apply exclusion zone for motif pair
for j in (first_idx, second_idx):
distance_profile = core.apply_exclusion_zone(
exclusion_zone,
False,
window_size,
data_len,
j,
distance_profile
)
mp = core.apply_exclusion_zone(
exclusion_zone,
False,
window_size,
data_len,
j,
mp
)
# find up to max_neighbors
neighbors = []
for j in range(max_neighbors):
neighbor_idx = np.argmin(distance_profile)
neighbor_dist = distance_profile[neighbor_idx]
not_in_radius = not ((radius * min_dist) >= neighbor_dist)
# no more neighbors exist based on radius
if core.is_nan_inf(neighbor_dist) or not_in_radius:
break
# add neighbor and apply exclusion zone
neighbors.append(neighbor_idx)
distance_profile = core.apply_exclusion_zone(
exclusion_zone,
False,
window_size,
data_len,
neighbor_idx,
distance_profile
)
mp = core.apply_exclusion_zone(
exclusion_zone,
False,
window_size,
data_len,
neighbor_idx,
mp
)
# add motifs and neighbors to results
motifs.append({
'motifs': [first_idx, second_idx],
'neighbors': neighbors
})
profile['motifs'] = motifs
return profile
def top_k_motifs(profile, exclusion_zone=None, k=3, max_neighbors=10, radius=3, use_cmp=False):
"""
Find the top K number of motifs (patterns) given a matrix profile or a pan
matrix profile. By default the algorithm will find up to 3 motifs (k) and
up to 10 of their neighbors with a radius of 3 * min_dist using the
regular matrix profile. If the profile is a Matrix Profile data structure,
you can also use a Corrected Matrix Profile to compute the motifs.
Parameters
----------
profile : dict
The output from one of the matrix profile algorithms.
exclusion_zone : int, Default to algorithm ez
Desired number of values to exclude on both sides of the motif. This
avoids trivial matches. It defaults to half of the computed window
size. Setting the exclusion zone to 0 makes it not apply.
k : int, Default = 3
Desired number of motifs to find.
max_neighbors : int, Default = 10
The maximum number of neighbors to include for a given motif.
radius : int, Default = 3
The radius is used to associate a neighbor by checking if the
neighbor's distance is less than or equal to dist * radius
use_cmp : bool, Default = False
Use the Corrected Matrix Profile to compute the motifs (only for
a Matrix Profile data structure).
Returns
-------
dict : profile
The original input profile with the addition of the "motifs" key. The
motifs key consists of the following structure.
A list of dicts containing motif indices and their corresponding
neighbor indices.
>>> [
>>> {
>>> 'motifs': [first_index, second_index],
>>> 'neighbors': [index, index, index ...max_neighbors]
>>> }
>>> ]
The index is a single value when a MatrixProfile is passed in otherwise
the index contains a row and column index for Pan-MatrixProfile.
"""
if not core.is_mp_or_pmp_obj(profile):
raise ValueError('Expecting MP or PMP data structure!')
cls = profile.get('class', None)
func = None
if cls == 'MatrixProfile':
func = mp_top_k_motifs
elif cls == 'PMP':
func = pmp_top_k_motifs
else:
raise ValueError('Unsupported data structure!')
if cls == 'PMP':
return func(
profile,
exclusion_zone=exclusion_zone,
k=k,
max_neighbors=max_neighbors,
radius=radius
)
return func(
profile,
exclusion_zone=exclusion_zone,
k=k,
max_neighbors=max_neighbors,
radius=radius,
use_cmp=use_cmp
) | /rtm-matrixprofile-1.1.102.tar.gz/rtm-matrixprofile-1.1.102/matrixprofile/algorithms/top_k_motifs.py | 0.834204 | 0.53522 | top_k_motifs.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import numpy as np
from matrixprofile import core
def mass2(ts, query, extras=False, threshold=1e-10):
"""
Compute the distance profile for the given query over the given time
series.
Parameters
----------
ts : array_like
The time series to search.
query : array_like
The query.
extras : boolean, default False
Optionally return additional data used to compute the matrix profile.
Returns
-------
np.array, dict : distance_profile
An array of distances np.array() or dict with extras.
With extras:
>>> {
>>> 'distance_profile': The distance profile,
>>> 'product': The FFT product between ts and query,
>>> 'data_mean': The moving average of the ts over len(query),
>>> 'query_mean': The mean of the query,
>>> 'data_std': The moving std. of the ts over len(query),
>>> 'query_std': The std. of the query
>>> }
Raises
------
ValueError
If ts is not a list or np.array.
If query is not a list or np.array.
If ts or query is not one dimensional.
"""
ts, query = core.precheck_series_and_query_1d(ts, query)
n = len(ts)
m = len(query)
x = ts
y = query
meany = np.mean(y)
sigmay = np.std(y)
meanx, sigmax = core.moving_avg_std(x, m)
meanx = np.append(np.ones([1, len(x) - len(meanx)]), meanx)
sigmax = np.append(np.zeros([1, len(x) - len(sigmax)]), sigmax)
y = np.append(np.flip(y), np.zeros([1, n - m]))
X = np.fft.fft(x)
Y = np.fft.fft(y)
Y.resize(X.shape)
Z = X * Y
z = np.fft.ifft(Z)
# do not allow divide by zero
tmp = (sigmax[m - 1:n] * sigmay)
tmp[tmp == 0] = 1e-12
dist = 2 * (m - (z[m - 1:n] - m * meanx[m - 1:n] * meany) / tmp)
# fix to handle constant values
dist[sigmax[m - 1:n] < threshold] = m
dist[(sigmax[m - 1:n] < threshold) & (sigmay < threshold)] = 0
dist = np.sqrt(dist)
if extras:
return {
'distance_profile': dist,
'product': z,
'data_mean': meanx,
'query_mean': meany,
'data_std': sigmax,
'query_std': sigmay
}
return dist | /rtm-matrixprofile-1.1.102.tar.gz/rtm-matrixprofile-1.1.102/matrixprofile/algorithms/mass2.py | 0.880271 | 0.489748 | mass2.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import numpy as np
from matrixprofile import core
def statistics(ts, window_size):
"""
Compute global and moving statistics for the provided 1D time
series. The statistics computed include the min, max, mean, std. and median
over the window specified and globally.
Parameters
----------
ts : array_like
The time series.
window_size: int
The size of the window to compute moving statistics over.
Returns
-------
dict : statistics
The global and rolling window statistics.
>>> {
>>> ts: the original time series,
>>> min: the global minimum,
>>> max: the global maximum,
>>> mean: the global mean,
>>> std: the global standard deviation,
>>> median: the global median,
>>> moving_min: the moving minimum,
>>> moving_max: the moving maximum,
>>> moving_mean: the moving mean,
>>> moving_std: the moving standard deviation,
>>> moving_median: the moving median,
>>> window_size: the window size provided,
>>> class: Statistics
>>> }
Raises
------
ValueError
If window_size is not an int.
If window_size > len(ts)
If ts is not a list or np.array.
If ts is not 1D.
"""
if not core.is_array_like(ts):
raise ValueError('ts must be array like')
if not core.is_one_dimensional(ts):
raise ValueError('The time series must be 1D')
if not isinstance(window_size, int):
raise ValueError('Expecting int for window_size')
if window_size > len(ts):
raise ValueError('Window size cannot be greater than len(ts)')
if window_size < 3:
raise ValueError('Window size cannot be less than 3')
moving_mu, moving_sigma = core.moving_avg_std(ts, window_size)
rolling_ts = core.rolling_window(ts, window_size)
return {
'ts': ts,
'min': np.min(ts),
'max': np.max(ts),
'mean': np.mean(ts),
'std': np.std(ts),
'median': np.median(ts),
'moving_min': np.min(rolling_ts, axis=1),
'moving_max': np.max(rolling_ts, axis=1),
'moving_mean': moving_mu,
'moving_std': moving_sigma,
'moving_median': np.median(rolling_ts, axis=1),
'window_size': window_size,
'class': 'Statistics'
} | /rtm-matrixprofile-1.1.102.tar.gz/rtm-matrixprofile-1.1.102/matrixprofile/algorithms/statistics.py | 0.927822 | 0.362969 | statistics.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
__all__ = [
'to_json',
'from_json',
'to_disk',
'from_disk',
]
import json.tool
import numpy as np
from matrixprofile import core
from matrixprofile.io.protobuf.protobuf_utils import (
to_mpf,
from_mpf
)
# Supported file extensions
SUPPORTED_EXTS = set([
'json',
'mpf',
])
# Supported file formats
SUPPORTED_FORMATS = set([
'json',
'mpf',
])
def JSONSerializer(obj):
"""
Default JSON serializer to write numpy arays and other non-supported
data types.
Borrowed from:
https://stackoverflow.com/a/52604722
"""
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
raise TypeError('Unknown type:', type(obj))
def from_json(profile):
"""
Converts a JSON formatted string into a profile data structure.
Parameters
----------
profile : str
The profile as a JSON formatted string.
Returns
-------
profile : dict_like
A MatrixProfile or Pan-MatrixProfile data structure.
"""
dct = json.load(profile)
# handle pmp and convert to appropriate types
if core.is_pmp_obj(dct):
dct['pmp'] = np.array(dct['pmp'], dtype='float64')
dct['pmpi'] = np.array(dct['pmpi'], dtype=int)
dct['data']['ts'] = np.array(dct['data']['ts'], dtype='float64')
dct['windows'] = np.array(dct['windows'], dtype=int)
# handle mp
elif core.is_mp_obj(dct):
dct['mp'] = np.array(dct['mp'], dtype='float64')
dct['pi'] = np.array(dct['pi'], dtype=int)
has_l = isinstance(dct['lmp'], list)
has_l = has_l and isinstance(dct['lpi'], list)
if has_l:
dct['lmp'] = np.array(dct['lmp'], dtype='float64')
dct['lpi'] = np.array(dct['lpi'], dtype=int)
has_r = isinstance(dct['rmp'], list)
has_r = has_r and isinstance(dct['rpi'], list)
if has_r:
dct['rmp'] = np.array(dct['rmp'], dtype='float64')
dct['rpi'] = np.array(dct['rpi'], dtype=int)
dct['data']['ts'] = np.array(dct['data']['ts'], dtype='float64')
if isinstance(dct['data']['query'], list):
dct['data']['query'] = np.array(dct['data']['query'], dtype='float64')
else:
raise ValueError('File is not of type profile!')
return dct
def to_json(profile):
"""
Converts a given profile object into JSON format.
Parameters
----------
profile : dict_like
A MatrixProfile or Pan-MatrixProfile data structure.
Returns
-------
str :
The profile as a JSON formatted string.
"""
if not core.is_mp_or_pmp_obj(profile):
raise ValueError('profile is expected to be of type MatrixProfile or PMP')
return json.dumps(profile, default=JSONSerializer)
def add_extension_to_path(file_path, extension):
"""
Utility function to add the file extension when it is not provided by the
user in the file path.
Parameters
----------
file_path : str
The file path.
Returns
-------
str :
The file path with the extension appended.
str :
The file format extension.
"""
end = '.{}'.format(extension)
if not file_path.endswith(end):
file_path = '{}{}'.format(file_path, end)
return file_path
def infer_file_format(file_path):
"""
Attempts to determine the file type based on the extension. The extension
is assumed to be the last dot suffix.
Parameters
----------
file_path : str
The file path to infer the file format of.
Returns
-------
str :
A label described the file extension.
"""
pieces = file_path.split('.')
extension = pieces[-1].lower()
if extension not in SUPPORTED_EXTS:
raise RuntimeError('Unsupported file type with extension {}'.format(extension))
return extension
def to_disk(profile, file_path, format='json'):
"""
Writes a profile object of type MatrixProfile or PMP to disk as a JSON
formatted file by default.
Note
----
The JSON format is human readable where as the mpf format is binary and
cannot be read when opened in a text editor. When the file path does not
include the extension, it is appended for you.
Parameters
----------
profile : dict_like
A MatrixProfile or Pan-MatrixProfile data structure.
file_path : str
The path to write the file to.
format : str, default json
The format of the file to be written. Options include json, mpf
"""
if not core.is_mp_or_pmp_obj(profile):
raise ValueError('profile is expected to be of type MatrixProfile or PMP')
if format not in SUPPORTED_FORMATS:
raise ValueError('Unsupported file format {} given.'.format(format))
file_path = add_extension_to_path(file_path, format)
if format == 'json':
with open(file_path, 'w') as out:
out.write(to_json(profile))
elif format == 'mpf':
with open(file_path, 'wb') as out:
out.write(to_mpf(profile))
def from_disk(file_path, format='infer'):
"""
Reads a profile object of type MatrixProfile or PMP from disk into the
respective object type. By default the type is inferred by the file
extension.
Parameters
----------
file_path : str
The path to read the file from.
format : str, default infer
The file format type to read from disk. Options include:
infer, json, mpf
Returns
-------
profile : dict_like, None
A MatrixProfile or Pan-MatrixProfile data structure.
"""
if format != 'infer':
if format not in SUPPORTED_FORMATS:
raise ValueError('format supplied {} is not supported'.format(format))
else:
format = infer_file_format(file_path)
profile = None
if format == 'json':
with open(file_path) as f:
profile = from_json(f)
elif format == 'mpf':
with open(file_path, 'rb') as f:
profile = from_mpf(f.read())
return profile | /rtm-matrixprofile-1.1.102.tar.gz/rtm-matrixprofile-1.1.102/matrixprofile/io/__io.py | 0.853699 | 0.276111 | __io.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import numpy as np
from matrixprofile import core
from matrixprofile.io.protobuf.proto_messages_pb2 import (
Location, Motif, MPFOutput
)
def get_matrix_attributes(matrix):
"""
Utility function to extract the rows, cols and flattened array from a
numpy array so it can be stored in the MPFOutput protobuf message.
Parameters
----------
matrix : np.ndarray
The numpy array to extract the attributes from.
Returns
-------
tuple :
A tuple containing the rows, cols and flattened array.
"""
if not core.is_array_like(matrix) or len(matrix) < 1:
return None, None, None
rows = matrix.shape[0]
cols = 0
if len(matrix.shape) > 1:
cols = matrix.shape[1]
return rows, cols, matrix.flatten()
def get_windows(profile):
"""
Utility function to format the windows from a profile structure ensuring
that the windows are in an array.
Parameters
----------
profile : dict
The MatrixProfile or PMP profile.
Returns
-------
list :
The window(s) in a list.
"""
windows = []
if core.is_mp_obj(profile):
windows.append(profile.get('w'))
elif core.is_pmp_obj(profile):
windows = profile.get('windows')
return windows
def get_proto_motif(motif):
"""
Utility function to convert a motif from a MatrixProfile or PMP structure
ensuring that it is compatible with the MPFOutput message.
Note
----
A single dimensional motif location will only have a row index and
a column index of 0.
Parameters
----------
motif : dict
The motif to convert.
Returns
-------
Motif :
The motif object for MPFOutput message.
"""
out_motif = Motif()
for indices in motif['motifs']:
tmp = Location()
tmp.row = 0
tmp.col = 0
# handle single integer location
if core.is_array_like(indices):
tmp.row = indices[0]
tmp.col = indices[1]
else:
tmp.row = indices
out_motif.motifs.append(tmp)
for neighbor in motif['neighbors']:
tmp = Location()
tmp.row = 0
tmp.col = 0
# handle single integer location
if core.is_array_like(neighbor):
tmp.row = neighbor[0]
tmp.col = neighbor[1]
else:
tmp.row = neighbor
out_motif.neighbors.append(tmp)
return out_motif
def get_proto_discord(discord):
"""
Utility function to convert a discord into the MPFOutput message
format.
Note
----
A single dimensional discord location will only have a row index and
a column index of 0.
Parameters
----------
discord : int or tuple
The discord with row, col index or single index.
Returns
-------
Location :
The Location message used in the MPFOutput protobuf message.
"""
out_discord = Location()
out_discord.row = 0
out_discord.col = 0
if core.is_array_like(discord):
out_discord.row = discord[0]
out_discord.col = discord[1]
else:
out_discord.row = discord
return out_discord
def profile_to_proto(profile):
"""
Utility function that takes a MatrixProfile or PMP profile data structure
and converts it to the MPFOutput protobuf message object.
Parameters
----------
profile : dict
The profile to convert.
Returns
-------
MPFOutput :
The MPFOutput protobuf message object.
"""
output = MPFOutput()
# add higher level attributes that work for PMP and MP
output.klass = profile.get('class')
output.algorithm = profile.get('algorithm')
output.metric = profile.get('metric')
output.sample_pct = profile.get('sample_pct')
# add time series data
ts = profile.get('data').get('ts')
query = profile.get('data').get('query')
rows, cols, data = get_matrix_attributes(ts)
output.ts.rows = rows
output.ts.cols = cols
output.ts.data.extend(data)
# add query data
query = profile.get('data').get('query')
rows, cols, data = get_matrix_attributes(query)
if rows and cols and core.is_array_like(data):
output.query.rows = rows
output.query.cols = cols
output.query.data.extend(data)
# add window(s)
output.windows.extend(get_windows(profile))
# add motifs
motifs = profile.get('motifs')
if not isinstance(motifs, type(None)):
for motif in motifs:
output.motifs.append(get_proto_motif(motif))
# add discords
discords = profile.get('discords')
if not isinstance(discords, type(None)):
for discord in discords:
output.discords.append(get_proto_discord(discord))
# add cmp
cmp = profile.get('cmp')
if not isinstance(cmp, type(None)):
rows, cols, data = get_matrix_attributes(cmp)
output.cmp.rows = rows
output.cmp.cols = cols
output.cmp.data.extend(data)
# add av
av = profile.get('av')
if not isinstance(av, type(None)):
rows, cols, data = get_matrix_attributes(av)
output.av.rows = rows
output.av.cols = cols
output.av.data.extend(data)
# add av_type
av_type = profile.get('av_type')
if not isinstance(av_type, type(None)) and len(av_type) > 0:
output.av_type = av_type
# add the matrix profile specific attributes
if core.is_mp_obj(profile):
output.mp.ez = profile.get('ez')
output.mp.join = profile.get('join')
# add mp
rows, cols, data = get_matrix_attributes(profile.get('mp'))
output.mp.mp.rows = rows
output.mp.mp.cols = cols
output.mp.mp.data.extend(data)
# add pi
rows, cols, data = get_matrix_attributes(profile.get('pi'))
output.mp.pi.rows = rows
output.mp.pi.cols = cols
output.mp.pi.data.extend(data)
# add lmp
rows, cols, data = get_matrix_attributes(profile.get('lmp'))
if rows and cols and core.is_array_like(data):
output.mp.lmp.rows = rows
output.mp.lmp.cols = cols
output.mp.lmp.data.extend(data)
# add lpi
rows, cols, data = get_matrix_attributes(profile.get('lpi'))
if rows and cols and core.is_array_like(data):
output.mp.lpi.rows = rows
output.mp.lpi.cols = cols
output.mp.lpi.data.extend(data)
# add rmp
rows, cols, data = get_matrix_attributes(profile.get('rmp'))
if rows and cols and core.is_array_like(data):
output.mp.rmp.rows = rows
output.mp.rmp.cols = cols
output.mp.rmp.data.extend(data)
# add rpi
rows, cols, data = get_matrix_attributes(profile.get('rpi'))
if rows and cols and core.is_array_like(data):
output.mp.rpi.rows = rows
output.mp.rpi.cols = cols
output.mp.rpi.data.extend(data)
# add the pan matrix profile specific attributes
elif core.is_pmp_obj(profile):
# add pmp
rows, cols, data = get_matrix_attributes(profile.get('pmp'))
output.pmp.pmp.rows = rows
output.pmp.pmp.cols = cols
output.pmp.pmp.data.extend(data)
# add pmpi
rows, cols, data = get_matrix_attributes(profile.get('pmpi'))
output.pmp.pmpi.rows = rows
output.pmp.pmpi.cols = cols
output.pmp.pmpi.data.extend(data)
else:
raise ValueError('Expecting Pan-MatrixProfile or MatrixProfile!')
return output
def to_mpf(profile):
"""
Converts a given profile object into MPF binary file format.
Parameters
----------
profile : dict_like
A MatrixProfile or Pan-MatrixProfile data structure.
Returns
-------
str :
The profile as a binary formatted string.
"""
obj = profile_to_proto(profile)
return obj.SerializeToString()
def from_proto_to_array(value):
"""
Utility function to convert a protobuf array back into the correct
dimensions.
Parameters
----------
value : array_like
The array to transform.
Returns
-------
np.ndarray :
The transformed array.
"""
if isinstance(value, type(None)) or len(value.data) < 1:
return None
shape = (value.rows, value.cols)
out = np.array(value.data)
if shape[1] > 0:
out = out.reshape(shape)
return out
def discords_from_proto(discords, is_one_dimensional=False):
"""
Utility function to transform discord locations back to single dimension
or multi-dimension location.
Parameter
---------
discords : array_like
The protobuf formatted array.
is_one_dimensional : boolean
A flag to indicate if the original locations should be 1D.
Returns
-------
np.ndarray :
The transformed discord locations.
"""
out = []
for discord in discords:
if is_one_dimensional:
out.append(discord.row)
else:
out.append((discord.row, discord.col))
return np.array(out, dtype=int)
def motifs_from_proto(motifs, is_one_dimensional=False):
"""
Utility function to transform motif locations back to single dimension
or multi-dimension location.
Parameter
---------
motifs : array_like
The protobuf formatted array.
is_one_dimensional : boolean
A flag to indicate if the original locations should be 1D.
Returns
-------
list :
The transformed motif locations.
"""
out = []
for motif in motifs:
tmp = {'motifs': [], 'neighbors': []}
for location in motif.motifs:
if is_one_dimensional:
tmp['motifs'].append(location.row)
else:
tmp['motifs'].append((location.row, location.col))
for neighbor in motif.neighbors:
if is_one_dimensional:
tmp['neighbors'].append(neighbor.row)
else:
tmp['neighbors'].append((neighbor.row, neighbor.col))
out.append(tmp)
return out
def from_mpf(profile):
"""
Converts binary formatted MPFOutput message into a profile data structure.
Parameters
----------
profile : str
The profile as a binary formatted MPFOutput message.
Returns
-------
profile : dict_like
A MatrixProfile or Pan-MatrixProfile data structure.
"""
obj = MPFOutput()
obj.ParseFromString(profile)
out = {}
is_one_dimensional = False
# load in all higher level attributes
out['class'] = obj.klass
out['algorithm'] = obj.algorithm
out['metric'] = obj.metric
out['sample_pct'] = obj.sample_pct
out['data'] = {
'ts': from_proto_to_array(obj.ts),
'query': from_proto_to_array(obj.query)
}
if obj.klass == 'MatrixProfile':
out['mp'] = from_proto_to_array(obj.mp.mp)
out['pi'] = from_proto_to_array(obj.mp.pi)
out['lmp'] = from_proto_to_array(obj.mp.lmp)
out['lpi'] = from_proto_to_array(obj.mp.lpi)
out['rmp'] = from_proto_to_array(obj.mp.rmp)
out['rpi'] = from_proto_to_array(obj.mp.rpi)
out['ez'] = obj.mp.ez
out['join'] = obj.mp.join
out['w'] = obj.windows[0]
is_one_dimensional = len(out['mp'].shape) == 1
elif obj.klass == 'PMP':
out['pmp'] = from_proto_to_array(obj.pmp.pmp)
out['pmpi'] = from_proto_to_array(obj.pmp.pmpi)
out['windows'] = np.array(obj.windows)
if not isinstance(obj.discords, type(None)) and len(obj.discords) > 0:
out['discords'] = discords_from_proto(
obj.discords, is_one_dimensional=is_one_dimensional)
if not isinstance(obj.motifs, type(None)) and len(obj.motifs) > 0:
out['motifs'] = motifs_from_proto(
obj.motifs, is_one_dimensional=is_one_dimensional)
if not isinstance(obj.cmp, type(None)) and len(obj.cmp.data) > 0:
out['cmp'] = from_proto_to_array(obj.cmp)
if not isinstance(obj.av, type(None)) and len(obj.av.data) > 0:
out['av'] = from_proto_to_array(obj.av)
if not isinstance(obj.av_type, type(None)) and len(obj.av_type) > 0:
out['av_type'] = obj.av_type
return out | /rtm-matrixprofile-1.1.102.tar.gz/rtm-matrixprofile-1.1.102/matrixprofile/io/protobuf/protobuf_utils.py | 0.850127 | 0.46952 | protobuf_utils.py | pypi |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import csv
import gzip
import json
import os
# load urlretrieve for python2 and python3
try:
from urllib.request import urlretrieve
except:
from urllib import urlretrieve
import numpy as np
DATA_LISTING_URL = 'https://raw.githubusercontent.com/matrix-profile-foundation/mpf-datasets/master/listings.json'
DATA_URL = 'https://raw.githubusercontent.com/matrix-profile-foundation/mpf-datasets/master/{}/{}'
DATA_DIR = os.path.expanduser(os.path.join('~', '.mpf-datasets'))
def create_dirs(path):
"""
Python 2 and 3 compatible function to make directories. Python 3 has the
exist_ok option in makedirs, but Python 2 does not.
Parameters
----------
path : str
The path to create directories for.
"""
try:
os.makedirs(path)
except:
pass
if not os.path.exists(path):
raise OSError('Unable to create path: {}'.format(path))
def fetch_available(category=None):
"""
Fetches the available datasets found in
github.com/matrix-profile-foundation/mpf-datasets github repository.
Providing a category filters the datasets.
Parameters
----------
category : str, Optional
The desired category to retrieve datasets by.
Returns
-------
list :
A list of dictionaries containing details about each dataset.
Raises
------
ValueError:
When a category is provided, but is not found in the listing.
"""
# download the file and load it
create_dirs(DATA_DIR)
output_path = os.path.join(DATA_DIR, 'listings.json')
result = urlretrieve(DATA_LISTING_URL, output_path)
with open(output_path) as f:
datasets = json.load(f)
# filter with category
if category:
category_found = False
filtered = []
for dataset in datasets:
if dataset['category'] == category.lower():
filtered.append(dataset)
category_found = True
datasets = filtered
if not category_found:
raise ValueError('category {} is not a valid option.'.format(category))
return datasets
def get_csv_indices(fp, is_gzip=False):
"""
Utility function to provide indices of the datetime dimension and the
real valued dimensions.
Parameters
----------
fp : str
The filepath to load.
is_gzip : boolean, Default False
Flag to tell if the csv is gzipped.
Returns
-------
(dt_index, real_indices) :
The datetime index and real valued indices.
"""
first_line = None
if is_gzip:
with gzip.open(fp, 'rt') as f:
first_line = f.readline()
else:
with open(fp) as f:
first_line = f.readline()
dt_index = None
real_indices = []
for index, label in enumerate(first_line.split(',')):
if 'date' in label.lower() or 'time' in label.lower():
dt_index = index
else:
real_indices.append(index)
return dt_index, real_indices
def load(name):
"""
Loads a MPF dataset by base file name or file name. The match is case
insensitive.
Note
----
An internet connection is required to fetch the data.
Returns
-------
dict :
The dataset and metadata.
>>> {
>>> 'name': The file name loaded,
>>> 'category': The category the file came from,
>>> 'description': A description,
>>> 'data': The real valued data as an np.ndarray,
>>> 'datetime': The datetime as an np.ndarray
>>> }
"""
datasets = fetch_available()
# find the filename in datasets matching either on filename provided or
# the base name
filename = None
category = None
description = None
for dataset in datasets:
base_name = dataset['name'].split('.')[0]
if name.lower() == base_name or name.lower() == dataset['name']:
filename = dataset['name']
category = dataset['category']
description = dataset['description']
if not filename:
raise ValueError('Could not find dataset {}'.format(name))
# download the file
output_dir = os.path.join(DATA_DIR, category)
create_dirs(output_dir)
output_path = os.path.join(output_dir, filename)
if not os.path.exists(output_path):
url = DATA_URL.format(category, filename)
urlretrieve(url, output_path)
# load the file based on type
is_txt = filename.endswith('.txt')
is_txt_gunzip = filename.endswith('.txt.gz')
is_csv = filename.endswith('.csv')
is_csv_gunzip = filename.endswith('.csv.gz')
data = None
dt_data = None
if is_txt or is_txt_gunzip:
data = np.loadtxt(output_path)
elif is_csv or is_csv_gunzip:
dt_index, real_indices = get_csv_indices(
output_path, is_gzip=is_csv_gunzip)
if isinstance(dt_index, int):
dt_data = np.genfromtxt(
output_path,
dtype='datetime64',
delimiter=',',
skip_header=True,
usecols=[dt_index,]
)
data = np.genfromtxt(
output_path,
delimiter=',',
dtype='float64',
skip_header=True,
usecols=real_indices
)
return {
'name': filename,
'category': category,
'description': description,
'data': data,
'datetime': dt_data
} | /rtm-matrixprofile-1.1.102.tar.gz/rtm-matrixprofile-1.1.102/matrixprofile/datasets/datasets.py | 0.771284 | 0.281206 | datasets.py | pypi |
<div align="center">
<img width="70%" src="./docs/images/tsfresh_logo.svg">
</div>
-----------------
# tsfresh
[](https://tsfresh.readthedocs.io/en/latest/?badge=latest)
[](https://github.com/blue-yonder/tsfresh/actions)
[](https://codecov.io/gh/blue-yonder/tsfresh)
[](https://github.com/blue-yonder/tsfresh/blob/main/LICENSE.txt)
[](https://github.com/blue-yonder/tsfresh/issues/8)
[](https://mybinder.org/v2/gh/blue-yonder/tsfresh/main?filepath=notebooks)
[](https://pepy.tech/project/tsfresh)
This repository contains the *TSFRESH* python package. The abbreviation stands for
*"Time Series Feature extraction based on scalable hypothesis tests"*.
The package provides systematic time-series feature extraction by combining established algorithms from statistics, time-series analysis, signal processing, and nonlinear dynamics with a robust feature selection algorithm. In this context, the term *time-series* is interpreted in the broadest possible sense, such that any types of sampled data or even event sequences can be characterised.
## Spend less time on feature engineering
Data Scientists often spend most of their time either cleaning data or building features.
While we cannot change the first thing, the second can be automated.
*TSFRESH* frees your time spent on building features by extracting them automatically.
Hence, you have more time to study the newest deep learning paper, read hacker news or build better models.
## Automatic extraction of 100s of features
*TSFRESH* automatically extracts 100s of features from time series.
Those features describe basic characteristics of the time series such as the number of peaks, the average or maximal value or more complex features such as the time reversal symmetry statistic.

The set of features can then be used to construct statistical or machine learning models on the time series to be used for example in regression or
classification tasks.
## Forget irrelevant features
Time series often contain noise, redundancies or irrelevant information.
As a result most of the extracted features will not be useful for the machine learning task at hand.
To avoid extracting irrelevant features, the *TSFRESH* package has a built-in filtering procedure.
This filtering procedure evaluates the explaining power and importance of each characteristic for the regression or classification tasks at hand.
It is based on the well developed theory of hypothesis testing and uses a multiple test procedure.
As a result the filtering process mathematically controls the percentage of irrelevant extracted features.
The *TSFRESH* package is described in the following open access paper:
* Christ, M., Braun, N., Neuffer, J., and Kempa-Liehr A.W. (2018).
_Time Series FeatuRe Extraction on basis of Scalable Hypothesis tests (tsfresh -- A Python package)._
Neurocomputing 307 (2018) 72-77, [doi: 10.1016/j.neucom.2018.03.067](https://doi.org/10.1016/j.neucom.2018.03.067).
The FRESH algorithm is described in the following whitepaper:
* Christ, M., Kempa-Liehr, A.W., and Feindt, M. (2017).
_Distributed and parallel time series feature extraction for industrial big data applications._
ArXiv e-print 1610.07717, [https://arxiv.org/abs/1610.07717](https://arxiv.org/abs/1610.07717).
Due to the fact that tsfresh basically provides time-series feature extraction for free, you can now concentrate on engineering new time-series,
like e.g. differences of signals from synchronous measurements, which provide even better time-series features:
* Kempa-Liehr, A.W., Oram, J., Wong, A., Finch, M., Besier, T. (2020).
_Feature engineering workflow for activity recognition from synchronized inertial measurement units._
In: Pattern Recognition. ACPR 2019. Ed. by M. Cree et al. Vol. 1180. Communications in Computer and Information Science (CCIS).
Singapore: Springer 2020, 223–231. [doi: 10.1007/978-981-15-3651-9_20](https://doi.org/10.1007/978-981-15-3651-9_20).
Systematic time-series features engineering allows to work with time-series samples of different lengths, because every time-series is projected
into a well-defined feature space. This approach allows the design of robust machine learning algorithms in applications with missing data.
* Kennedy, A., Gemma, N., Rattenbury, N., Kempa-Liehr, A.W. (2021).
_Modelling the projected separation of microlensing events using systematic time-series feature engineering._
Astronomy and Computing 35.100460 (2021), 1–14, [doi: 10.1016/j.ascom.2021.100460](https://doi.org/10.1016/j.ascom.2021.100460)
Natural language processing of written texts is an example of applying systematic time-series feature engineering to event sequences,
which is described in the following open access paper:
* Tang, Y., Blincoe, K., Kempa-Liehr, A.W. (2020).
_Enriching Feature Engineering for Short Text Samples by Language Time Series Analysis._
EPJ Data Science 9.26 (2020), 1–59. [doi: 10.1140/epjds/s13688-020-00244-9](https://doi.org/10.1140/epjds/s13688-020-00244-9)
## Advantages of tsfresh
*TSFRESH* has several selling points, for example
1. it is field tested
2. it is unit tested
3. the filtering process is statistically/mathematically correct
4. it has a comprehensive documentation
5. it is compatible with sklearn, pandas and numpy
6. it allows anyone to easily add their favorite features
7. it both runs on your local machine or even on a cluster
## Next steps
If you are interested in the technical workings, go to see our comprehensive Read-The-Docs documentation at [http://tsfresh.readthedocs.io](http://tsfresh.readthedocs.io).
The algorithm, especially the filtering part are also described in the paper mentioned above.
If you have some questions or feedback you can find the developers in the [gitter chatroom.](https://gitter.im/tsfresh/Lobby?utm_source=share-link&utm_medium=link&utm_campaign=share-link)
We appreciate any contributions, if you are interested in helping us to make *TSFRESH* the biggest archive of feature extraction methods in python, just head over to our [How-To-Contribute](http://tsfresh.readthedocs.io/en/latest/text/how_to_contribute.html) instructions.
If you want to try out `tsfresh` quickly or if you want to integrate it into your workflow, we also have a docker image available:
docker pull nbraun/tsfresh
## Acknowledgements
The research and development of *TSFRESH* was funded in part by the German Federal Ministry of Education and Research under grant number 01IS14004 (project iPRODICT).
| /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/README.md | 0.77886 | 0.934753 | README.md | pypi |
.. _forecasting-label:
Rolling/Time series forecasting
===============================
Features extracted with *tsfresh* can be used for many different tasks, such as time series classification,
compression or forecasting.
This section explains how we can use the features for time series forecasting.
Let's say you have the price of a certain stock, e.g., Apple, for 100 time steps.
Now, you want to build a feature-based model to forecast future prices of the Apple stock.
You could remove the last price value (of today) and extract features from the time series until today to predict the price of today.
But this would only give you a single example to train.
Instead, you can repeat this process: for every day in your stock price time series, remove the current value, extract features for the time until this value and train to predict the value of the day (which you removed).
You can think of it as shifting a cut-out window over your sorted time series data: on each shift step you extract the data you see through your cut-out window to build a new, smaller time series and extract features only on this one.
Then you continue shifting.
In ``tsfresh``, the process of shifting a cut-out window over your data to create smaller time series cut-outs is called *rolling*.
Rolling is a way to turn a single time series into multiple time series, each of them ending one (or n) time step later than the one before.
The rolling utilities implemented in `tsfresh` help you in this process of reshaping (and rolling) your data into a format on which you can apply the usual :func:`tsfresh.extract_features` method.
This means that the step of extracting the time series windows and the feature extraction are separated.
Please note that "time" does not necessarily mean clock time here.
The "sort" column of a DataFrame in the supported :ref:`data-formats-label` gives a sequential state to the
individual measurements.
In the case of time series this can be the *time* dimension while in other cases, this can be a location, a frequency. etc.
The following image illustrates the process:
.. image:: ../images/rolling_mechanism_1.png
:scale: 100 %
:alt: The rolling mechanism
:align: center
Another example can be found in streaming data, e.g., in Industry 4.0 applications.
Here, you typically get one new data row at a time and use it to, for example, predict machine failures. To train your model,
you could act as if you would stream the data, by feeding your classifier the data after one time step,
the data after the first two time steps, etc.
In tsfresh, rolling is implemented via the helper function :func:`tsfresh.utilities.dataframe_functions.roll_time_series`.
Further, we provide the :func:`tsfresh.utilities.dataframe_functions.make_forecasting_frame` method as a convenient
wrapper to quickly construct the container and target vector for a given sequence.
Let's walk through an example to see how it works:
The rolling mechanism
---------------------
We look into the following flat DataFrame example, which is a tsfresh suitable format (see :ref:`data-formats-label`).
Note, that rolling also works for all other time series formats.
+----+------+----+----+
| id | time | x | y |
+====+======+====+====+
| 1 | 1 | 1 | 5 |
+----+------+----+----+
| 1 | 2 | 2 | 6 |
+----+------+----+----+
| 1 | 3 | 3 | 7 |
+----+------+----+----+
| 1 | 4 | 4 | 8 |
+----+------+----+----+
| 2 | 8 | 10 | 12 |
+----+------+----+----+
| 2 | 9 | 11 | 13 |
+----+------+----+----+
In the above flat DataFrame, we measured the values from two sensors x and y for two different entities (id 1 and 2) in 4 or 2 time
steps (1, 2, 3, 4, 8, 9).
If you want to follow along, here is the python code to generate this data:
.. code:: python
import pandas as pd
df = pd.DataFrame({
"id": [1, 1, 1, 1, 2, 2],
"time": [1, 2, 3, 4, 8, 9],
"x": [1, 2, 3, 4, 10, 11],
"y": [5, 6, 7, 8, 12, 13],
})
Now, we can use :func:`tsfresh.utilities.dataframe_functions.roll_time_series` to get consecutive sub-time series.
You could think of having a window sliding over your time series data and extracting out every data you can see through this window.
There are three parameters to tune for the window:
* `max_timeshift` defines, how large the window is at maximum. The extracted time series will have at maximum length of `max_timeshift + 1`.
(they can also be smaller, as time stamps in the beginning have less past values).
* `min_timeshift` defines the minimal size of each window. Shorter time series (usually at the beginning) will be omitted.
* Advanced: `rolling_direction`: if you want to slide in positive (increasing sort) or negative (decreasing sort) direction. You barely need negative direction, so you probably do not want to change the default. The absolute value of this parameter decides how much you want to shift per cut-out step.
The column parameters are the same as in the usual :ref:`data-formats-label`.
Let's see what will happen with our data sample:
.. code:: python
from tsfresh.utilities.dataframe_functions import roll_time_series
df_rolled = roll_time_series(df, column_id="id", column_sort="time")
The new data set consists only of values from the old data set, but with new ids.
Also the sort column values (in this case ``time``) is copied.
If you group by ``id``, you will end up with the following parts (or "windows"):
+-------+-------+---+----+
| id | time | x | y |
+=======+=======+===+====+
| (1,1) | 1 | 1 | 5 |
+-------+-------+---+----+
+-------+-------+---+----+
| id | time | x | y |
+=======+=======+===+====+
| (1,2) | 1 | 1 | 5 |
+-------+-------+---+----+
| (1,2) | 2 | 2 | 6 |
+-------+-------+---+----+
+-------+-------+---+----+
| id | time | x | y |
+=======+=======+===+====+
| (1,3) | 1 | 1 | 5 |
+-------+-------+---+----+
| (1,3) | 2 | 2 | 6 |
+-------+-------+---+----+
| (1,3) | 3 | 3 | 7 |
+-------+-------+---+----+
+-------+-------+---+----+
| id | time | x | y |
+=======+=======+===+====+
| (1,4) | 1 | 1 | 5 |
+-------+-------+---+----+
| (1,4) | 2 | 2 | 6 |
+-------+-------+---+----+
| (1,4) | 3 | 3 | 7 |
+-------+-------+---+----+
| (1,4) | 4 | 4 | 8 |
+-------+-------+---+----+
+-------+-------+---+----+
| id | time | x | y |
+=======+=======+===+====+
| (2,8) | 8 |10 | 12 |
+-------+-------+---+----+
+-------+-------+---+----+
| id | time | x | y |
+=======+=======+===+====+
| (2,9) | 8 |10 | 12 |
+-------+-------+---+----+
| (2,9) | 9 |11 | 13 |
+-------+-------+---+----+
Now, you can run the usual feature extraction procedure on the rolled data:
.. code:: python
from tsfresh import extract_features
df_features = extract_features(df_rolled, column_id="id", column_sort="time")
You will end up with features generated for each one of the parts above, which you can then use for training your forecasting model.
+----------+----------------+-----------------------------+-----+
| variable | x__abs_energy | x__absolute_sum_of_changes | ... |
+==========+================+=============================+=====+
| id | | | ... |
+----------+----------------+-----------------------------+-----+
| (1,1) | 1.0 | 0.0 | ... |
+----------+----------------+-----------------------------+-----+
| (1,2) | 5.0 | 1.0 | ... |
+----------+----------------+-----------------------------+-----+
| (1,3) | 14.0 | 2.0 | ... |
+----------+----------------+-----------------------------+-----+
| (1,4) | 30.0 | 3.0 | ... |
+----------+----------------+-----------------------------+-----+
| (2,8) | 100.0 | 0.0 | ... |
+----------+----------------+-----------------------------+-----+
| (2,9) | 221.0 | 1.0 | ... |
+----------+----------------+-----------------------------+-----+
The features for example for id ``(1,3)`` are extracted using the data of ``id=1`` up to and including ``t=3`` (so ``t=1``, ``t=2`` and ``t=3``).
If you want to train a model for a forecasting, `tsfresh` also offers the function :func:`tsfresh.utilities.dataframe_functions.make_forecasting_frame`, which will help you match the target vector properly.
This process is visualized in the following figure.
It shows how the purple, rolled sub-timeseries are used as base for the construction of the feature matrix *X*
(if *f* is the `extract_features` function).
The green data points need to be predicted by the model and are used as rows in the target vector *y*.
Be aware that this only works for a one-dimensional time series of a single `id` and `kind`.
.. image:: ../images/rolling_mechanism_2.png
:scale: 100 %
:alt: The rolling mechanism
:align: center
Parameters and Implementation Notes
-----------------------------------
The above example demonstrates the overall rolling mechanism, which creates new time series.
Now, we discuss the naming convention for the new time series.
For identifying every subsequence, `tsfresh` uses the time stamp of the point that will be predicted together with the old identifier as "id".
For positive rolling, this `timeshift` is the last time stamp in the subsequence.
For negative rolling, it is the first one, for example the above dataframe rolled in negative direction gives us:
+-------+------+----+----+
| id | time | x | y |
+=======+======+====+====+
| (1,1) | 1 | 1 | 5 |
+-------+------+----+----+
| (1,1) | 2 | 2 | 6 |
+-------+------+----+----+
| (1,1) | 3 | 3 | 7 |
+-------+------+----+----+
| (1,1) | 4 | 4 | 8 |
+-------+------+----+----+
| (1,2) | 2 | 2 | 6 |
+-------+------+----+----+
| (1,2) | 3 | 3 | 7 |
+-------+------+----+----+
| (1,2) | 4 | 4 | 8 |
+-------+------+----+----+
| (1,3) | 3 | 3 | 7 |
+-------+------+----+----+
| (1,3) | 4 | 4 | 8 |
+-------+------+----+----+
| (1,4) | 4 | 4 | 8 |
+-------+------+----+----+
| (2,8) | 8 | 10 | 12 |
+-------+------+----+----+
| (2,8) | 9 | 11 | 13 |
+-------+------+----+----+
| (2,9) | 9 | 11 | 13 |
+-------+------+----+----+
which you could use to predict the current value using the future time series values (if that makes sense in your case).
Choosing a non-default `max_timeshift` or `min_timeshift` would make the extracted sub-time-series smaller or even remove them completely (e.g. with `min_timeshift = 1` the ``(1,1)`` (i.e. ``id=1,timeshift=1``) of the positive rolling case would disappear).
Using a ``rolling_direction`` with a larger absolute value (e.g. -2 or 2) will skip some of the windows (in this case, every second).
| /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/docs/text/forecasting.rst | 0.949716 | 0.969785 | forecasting.rst | pypi |
Feature extraction settings
===========================
When starting a new data science project involving time series you probably want to start by extracting a
comprehensive set of features. Later you can identify which features are relevant for the task at hand.
In the final stages, you probably want to fine tune the parameter of the features to fine tune your models.
You can do all those things with tsfresh. So, you need to know how to control which features are calculated by tsfresh
and how one can adjust the parameters. In this section, we will clarify this.
For the lazy: Just let me calculate some features!
--------------------------------------------------
To calculate a comprehensive set of features, call the :func:`tsfresh.extract_features` method without
passing a ``default_fc_parameters`` or ``kind_to_fc_parameters`` object. This way you will be using the default options,
which will use all the feature calculators in this package, that we consider are OK to return by default.
For the advanced: How do I set the parameters for all kind of time series?
----------------------------------------------------------------------------
After digging deeper into your data, you maybe want to calculate more of a certain type of feature and less of another
type. So, you need to use custom settings for the feature extractors. To do that with tsfresh you will have to use a
custom settings object:
>>> from tsfresh.feature_extraction import ComprehensiveFCParameters
>>> settings = ComprehensiveFCParameters()
>>> # Set here the options of the settings object as shown in the paragraphs below
>>> # ...
>>> from tsfresh.feature_extraction import extract_features
>>> extract_features(df, default_fc_parameters=settings)
The ``default_fc_parameters`` is expected to be a dictionary which maps feature calculator names
(the function names you can find in the :mod:`tsfresh.feature_extraction.feature_calculators` file) to a list
of dictionaries, which are the parameters with which the function will be called (as key value pairs). Each
function-parameter combination that is in this dict will be called during the extraction and will produce a feature.
If the function does not take any parameters, the value should be set to `None`.
For example:
.. code:: python
fc_parameters = {
"length": None,
"large_standard_deviation": [{"r": 0.05}, {"r": 0.1}]
}
will produce three features: one by calling the
:func:`tsfresh.feature_extraction.feature_calculators.length` function without any parameters and two by calling
:func:`tsfresh.feature_extraction.feature_calculators.large_standard_deviation` with `r = 0.05` and `r = 0.1`.
So you can control which features will be extracted, by adding or removing either keys or parameters from this dict.
It is as easy as that.
If you decide not to calculate the length feature here, you delete it from the dictionary:
.. code:: python
del fc_parameters["length"]
And now, only the two other features are calculated.
For convenience, three dictionaries are predefined and can be used right away:
* :class:`tsfresh.feature_extraction.settings.ComprehensiveFCParameters`: includes all features without parameters and
all features with parameters, each with different parameter combinations. This is the default for `extract_features`
if you do not hand in a `default_fc_parameters` at all.
* :class:`tsfresh.feature_extraction.settings.MinimalFCParameters`: includes only a handful of features
and can be used for quick tests. The features which have the "minimal" attribute are used here.
* :class:`tsfresh.feature_extraction.settings.EfficientFCParameters`: Mostly the same features as in the
:class:`tsfresh.feature_extraction.settings.ComprehensiveFCParameters`, but without features which are marked with the
"high_comp_cost" attribute. This can be used if runtime performance plays a major role.
Theoretically, you could calculate an unlimited number of features with tsfresh by adding entry after entry to the
dictionary.
For the ambitious: How do I set the parameters for different type of time series?
---------------------------------------------------------------------------------
It is also possible to control the features to be extracted for the different kinds of time series individually.
You can do so by passing another dictionary to the extract function as a
kind_to_fc_parameters = {"kind" : fc_parameters}
parameter. This dict must be a mapping from kind names (as string) to `fc_parameters` objects,
which you would normally pass as an argument to the `default_fc_parameters` parameter.
So, for example the following code snippet:
.. code:: python
kind_to_fc_parameters = {
"temperature": {"mean": None},
"pressure": {"maximum": None, "minimum": None}
}
will extract the `"mean"` feature of the `"temperature"` time series and the `"minimum"` and `"maximum"` of the
`"pressure"` time series.
The `kind_to_fc_parameters` argument will partly override the `default_fc_parameters`. So, if you include a kind
name in the `kind_to_fc_parameters` parameter, its value will be used for that kind.
Other kinds will still use the `default_fc_parameters`.
A handy trick: Do I really have to create the dictionary by hand?
-----------------------------------------------------------------
Not necessarily. Let's assume you have a DataFrame of tsfresh features.
By using feature selection algorithms you find out that only a subgroup of features is relevant.
Then, we provide the :func:`tsfresh.feature_extraction.settings.from_columns` method that constructs the `kind_to_fc_parameters`
dictionary from the column names of this filtered feature matrix to make sure that only relevant features are extracted.
This can save a huge amount of time because you prevent the calculation of unnecessary features.
Let's illustrate this with an example:
.. code:: python
# X_tsfresh contains the extracted tsfresh features
X_tsfresh = extract_features(...)
# which are now filtered to only contain relevant features
X_tsfresh_filtered = some_feature_selection(X_tsfresh, y, ....)
# we can easily construct the corresponding settings object
kind_to_fc_parameters = tsfresh.feature_extraction.settings.from_columns(X_tsfresh_filtered)
The above code will construct for you the `kind_to_fc_parameters` dictionary that corresponds to the features and parameters (!) from
the tsfresh features that were filtered by the `some_feature_selection` feature selection algorithm.
| /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/docs/text/feature_extraction_settings.rst | 0.948513 | 0.930332 | feature_extraction_settings.rst | pypi |
How to add a custom feature
===========================
If you want to extract custom made features from your time series, tsfresh allows you to do so in a few
simple steps:
Step 1. Decide which type of feature you want to implement
----------------------------------------------------------
tsfresh supports two types of feature calculation methods:
*1.* simple
*2.* combiner
The difference lays in the number of features calculated for a singular time series.
The feature_calculator is simple if it returns one (*1.*) feature, and it is a combiner and returns multiple features (*2.*).
So if you want to add a singular feature, you should select *1.*, the simple feature calculator class.
If it is however, better to calculate multiple features at the same time (e.g., to perform auxiliary calculations only
once for all features), then you should choose type *2.*.
Step 2. Write the feature calculator
------------------------------------
Depending on which type of feature calculator you are implementing, you can use the following feature calculator skeletons:
1. simple features
~~~~~~~~~~~~~~~~~~
You can write a simple feature calculator that returns exactly one feature, without parameters as follows:
.. code:: python
from tsfresh.feature_extraction.feature_calculators import set_property
@set_property("fctype", "simple")
def your_feature_calculator(x):
"""
The description of your feature
:param x: the time series to calculate the feature of
:type x: pandas.Series
:return: the value of this feature
:return type: bool, int or float
"""
# Calculation of feature as float, int or bool
result = f(x)
return result
or with parameters:
.. code:: python
@set_property("fctype", "simple"")
def your_feature_calculator(x, p1, p2, ...):
"""
Description of your feature
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param p1: description of your parameter p1
:type p1: type of your parameter p1
:param p2: description of your parameter p2
:type p2: type of your parameter p2
...
:return: the value of this feature
:return type: bool, int or float
"""
# Calculation of feature as float, int or bool
f = f(x)
return f
2. combiner features
~~~~~~~~~~~~~~~~~~~~
Alternatively, you can write a combiner feature calculator that returns multiple features as follows:
.. code:: python
from tsfresh.utilities.string_manipulation import convert_to_output_format
@set_property("fctype", "combiner")
def your_feature_calculator(x, param):
"""
Short description of your feature (should be a one liner as we parse the first line of the description)
Long detailed description, add somme equations, add some references, what kind of statistics is the feature
capturing? When should you use it? When not?
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param c: the time series name
:type c: str
:param param: contains dictionaries {"p1": x, "p2": y, ...} with p1 float, p2 int ...
:type param: list
:return: list of tuples (s, f) where s are the parameters, serialized as a string,
and f the respective feature value as bool, int or float
:return type: pandas.Series
"""
# Do some pre-processing if needed for all parameters
# f is a function that calculates the feature value for each single parameter combination
return [(convert_to_output_format(config), f(x, config)) for config in param]
Writing your own time-based feature calculators
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Writing your own time-based feature calculators is no different than usual. Only two new properties must be set using the `@set_property` decorator:
* Adding ``@set_property("input", "pd.Series")`` tells the function that the input of the function is a ``pd.Series`` rather than a ``numpy`` array.
This allows the index to be used automatically.
* Adding ``@set_property("index_type", pd.DatetimeIndex)`` tells the function that the input is a `DatetimeIndex`,
allowing it to perform calculations based on time data types.
For example, if we want to write a function that calculates the time between the first and last measurement, it could look something like this:
.. code:: python
@set_property("input", "pd.Series")
@set_property("index_type", pd.DatetimeIndex)
def timespan(x, param):
ix = x.index
# Get differences between the last timestamp and the first timestamp in seconds,
# then convert to hours.
times_seconds = (ix[-1] - ix[0]).total_seconds()
return times_seconds / float(3600)
Step 3. Add custom settings for your feature
--------------------------------------------
Finally, you need to add your new custom feature to the extraction settings, otherwise it is not used
during extraction.
To do this, create a new settings object (by default, ``tsfresh`` uses the
:class:`tsfresh.feature_extraction.settings.ComprehensiveFCParameters`) and
add your function as a key to the dictionary.
As a value, either use ``None`` if your function does not need parameters or a list with the
parameters you want to use (as dictionaries).
.. code:: python
settings = ComprehensiveFCParameters()
settings[f] = [{"n": 1}, {"n": 2}]
After that, make sure you pass your newly created settings in the call to ``extract_features``.
Step 4. Make a pull request
---------------------------
We would be very happy if you contribute your custom features to tsfresh.
To do this, add your feature into the ``feature_calculators.py`` file and append your
feature (as a name) with safe default parameters to the ``name_to_param`` dictionary inside the
:class:`tsfresh.feature_extraction.settings.ComprehensiveFCParameters` constructor:
.. code:: python
name_to_param.update({
# here are the existing settings
...
# Now the settings of your feature calculator
"your_feature_calculator" = [{"p1": x, "p2": y, ...} for x,y in ...],
})
Make sure, that the different feature extraction settings
(e.g. :class:`tsfresh.feature_extraction.settings.EfficientFCParameters`,
:class:`tsfresh.feature_extraction.settings.MinimalFCParameters` or
:class:`tsfresh.feature_extraction.settings.ComprehensiveFCParameters`) do include different sets of
feature calculators to use. You can control, which feature extraction settings object will include your new
feature calculator by giving your function attributes like "minimal" or "high_comp_cost". See the
classes in :mod:`tsfresh.feature_extraction.settings` for more information.
After that, add some tests and make a pull request to our `github repo <https://github.com/blue-yonder/tsfresh>`_.
We happily accept partly implemented feature calculators, which we can finalize together.
| /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/docs/text/how_to_add_custom_feature.rst | 0.909202 | 0.828419 | how_to_add_custom_feature.rst | pypi |
.. _tsfresh-on-a-cluster-label:
.. role:: python(code)
:language: python
Parallelization
===============
The feature extraction, the feature selection, as well as the rolling, offer the possibility of parallelization.
By default, all of those tasks are parallelized by tsfresh.
Here we discuss the different settings to control the parallelization.
To achieve the best results for your use-case you should experiment with the parameters.
.. NOTE::
This document describes parallelization to speed up processing time.
If you are working with large amounts of data (which might not fit into memory),
check :ref:`large-data-label`.
Please, let us know about your results tuning the below mentioned parameters! It will help improve the documentation as
well as the default settings.
Parallelization of Feature Selection
------------------------------------
We use a :class:`multiprocessing.Pool` to parallelize the calculation of the p-values for each feature. On
instantiation we set the Pool's number of worker processes to
`n_jobs`. This field defaults to
the number of processors on the current system. We recommend setting it to the maximum number of available (and
otherwise idle) processors.
The chunksize of the Pool's map function is another important parameter to consider. It can be set via the
`chunksize` field. By default it is up to
:class:`multiprocessing.Pool` is parallelisation parameter. One data chunk is
defined as a singular time series for one id and one kind. The chunksize is the
number of chunks that are submitted as one task to one worker process. If you
set the chunksize to 10, then it means that one worker task corresponds to
calculate all features for 10 id/kind time series combinations. If it is set
to None, depending on distributor, heuristics are used to find the optimal
chunksize. The chunksize can have a crucial influence on the optimal cluster
performance and should be optimised in benchmarks for the problem at hand.
Parallelization of Feature Extraction
-------------------------------------
For the feature extraction tsfresh exposes the parameters
`n_jobs` and `chunksize`. Both behave similarly to the parameters
for the feature selection.
To do performance studies and profiling, it is sometimes useful to turn off parallelization. This can be
done by setting the parameter `n_jobs` to 0.
Parallelization beyond a single machine
---------------------------------------
The high volume of time series data can demand an analysis at scale.
So, time series need to be processed on a group of computational units instead of a singular machine.
Accordingly, it may be necessary to distribute the extraction of time series features to a cluster.
It is possible to extract features with *tsfresh* in a distributed fashion.
In the following paragraphs we discuss how to setup a distributed *tsfresh*.
To distribute the calculation of features, we use a certain object, the Distributor class (located in the
:mod:`tsfresh.utilities.distribution` module).
Essentially, a Distributor organizes the application of feature calculators to data chunks.
It maps the feature calculators to the data chunks and then reduces them, meaning that it combines the results of the
individual mappings into one object, the feature matrix.
So, Distributor will, in the following order,
1. calculate an optimal :python:`chunk_size`, based on the characteristics of the time series data
(by :func:`~tsfresh.utilities.distribution.DistributorBaseClass.calculate_best_chunk_size`)
2. split the time series data into chunks
(by :func:`~tsfresh.utilities.distribution.DistributorBaseClass.partition`)
3. distribute the application of the feature calculators to the data chunks
(by :func:`~tsfresh.utilities.distribution.DistributorBaseClass.distribute`)
4. combine the results into the feature matrix
(by :func:`~tsfresh.utilities.distribution.DistributorBaseClass.map_reduce`)
5. close all connections, shutdown all resources and clean everything
(by :func:`~tsfresh.utilities.distribution.DistributorBaseClass.close`)
So, how can you use the Distributor to extract features with *tsfresh*?
You will have to pass :python:`distributor` as an argument to the :func:`~tsfresh.feature_extraction.extract_features`
method.
The following example shows how to define the MultiprocessingDistributor, which will distribute the calculations to a
local pool of threads:
.. code:: python
from tsfresh.examples.robot_execution_failures import \
download_robot_execution_failures, \
load_robot_execution_failures
from tsfresh.feature_extraction import extract_features
from tsfresh.utilities.distribution import MultiprocessingDistributor
# download and load some time series data
download_robot_execution_failures()
df, y = load_robot_execution_failures()
# We construct a Distributor that will spawn the calculations
# over four threads on the local machine
Distributor = MultiprocessingDistributor(n_workers=4,
disable_progressbar=False,
progressbar_title="Feature Extraction")
# just to pass the Distributor object to
# the feature extraction, along with the other parameters
X = extract_features(timeseries_container=df,
column_id='id',
column_sort='time',
distributor=Distributor)
The following example corresponds to the existing multiprocessing *tsfresh* API, where you just specify the number of
jobs, without the need to construct the Distributor:
.. code:: python
from tsfresh.examples.robot_execution_failures import \
download_robot_execution_failures, \
load_robot_execution_failures
from tsfresh.feature_extraction import extract_features
download_robot_execution_failures()
df, y = load_robot_execution_failures()
X = extract_features(timeseries_container=df,
column_id='id',
column_sort='time',
n_jobs=4)
Using dask to distribute the calculations
'''''''''''''''''''''''''''''''''''''''''
We provide a Distributor for the `dask framework <https://dask.pydata.org/en/latest/>`_, where
*"Dask is a flexible parallel computing library for analytic computing."*
.. NOTE::
This part of the documentation only handles parallelizing the computation using
a dask cluster. The input and output are still pandas objects.
If you want to use dask's capabilities to scale to data beyond your local
memory, have a look at :ref:`large-data-label`.
Dask is a great framework to distribute analytic calculations into clusters.
It scales up and down, meaning that you can also use it on a singular machine.
The only thing that you will need to run *tsfresh* on a Dask cluster is the ip address and port number of the
`dask-scheduler <http://distributed.readthedocs.io/en/latest/setup.html>`_.
Let's say that your dask scheduler is running at ``192.168.0.1:8786``, then we can construct a
:class:`~sfresh.utilities.distribution.ClusterDaskDistributor` that connects to the scheduler and distributes the
time series data and the calculation to a cluster:
.. code:: python
from tsfresh.examples.robot_execution_failures import \
download_robot_execution_failures, \
load_robot_execution_failures
from tsfresh.feature_extraction import extract_features
from tsfresh.utilities.distribution import ClusterDaskDistributor
download_robot_execution_failures()
df, y = load_robot_execution_failures()
Distributor = ClusterDaskDistributor(address="192.168.0.1:8786")
X = extract_features(timeseries_container=df,
column_id='id',
column_sort='time',
distributor=Distributor)
Compared to the :class:`~tsfresh.utilities.distribution.MultiprocessingDistributor` example from above, we only had to
change one line to switch from one machine to a whole cluster.
It is as easy as that.
By changing the Distributor you can easily deploy your application to run to a cluster instead of your workstation.
You can also use a local DaskCluster on your local machine to emulate a Dask network.
The following example shows how to setup a :class:`~tsfresh.utilities.distribution.LocalDaskDistributor` on a local cluster
of 3 workers:
.. code:: python
from tsfresh.examples.robot_execution_failures import \
download_robot_execution_failures, \
load_robot_execution_failures
from tsfresh.feature_extraction import extract_features
from tsfresh.utilities.distribution import LocalDaskDistributor
download_robot_execution_failures()
df, y = load_robot_execution_failures()
Distributor = LocalDaskDistributor(n_workers=3)
X = extract_features(timeseries_container=df,
column_id='id',
column_sort='time',
distributor=Distributor)
Writing your own distributor
''''''''''''''''''''''''''''
If you want to use other framework instead of Dask, you will have to write your own Distributor.
To construct your custom Distributor, you need to define an object that inherits from the abstract base class
:class:`tsfresh.utilities.distribution.DistributorBaseClass`.
The :mod:`tsfresh.utilities.distribution` module contains more information about what you need to implement.
| /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/docs/text/tsfresh_on_a_cluster.rst | 0.917291 | 0.837819 | tsfresh_on_a_cluster.rst | pypi |
.. _data-formats-label:
Data Formats
============
tsfresh offers three different options to specify the format of the time series data to use with the function
:func:`tsfresh.extract_features` (and all utility functions that expect a time series, for that
matter, like for example :func:`tsfresh.utilities.dataframe_functions.roll_time_series`).
Irrespective of the input format, tsfresh will always return the calculated features in the same output format
described below.
Typically, the input format options are :class:`pandas.DataFrame` objects, which we will discuss here, and also
Dask dataframes and PySpark computational graphs, which are discussed here :ref:`large-data-label`.
There are four important column types that
make up those DataFrames. Each will be described with an example from the robot failures dataset
(see :ref:`quick-start-label`).
:`column_id`: This column indicates which entities the time series belong to. Features will be extracted individually
for each entity (id). The resulting feature matrix will contain one row per id.
Each robot is a different entity, so each of it has a different id.
:`column_sort`: This column contains values which allow to sort the time series (e.g. time stamps).
In general, it is not required to have equidistant time steps or the same time scale for the different ids and/or kinds.
Some features might make however only sense for equidistant time stamps.
If you omit this column, the DataFrame is assumed to be already sorted in ascending order.
Each of the robot sensor measurements have a time stamp which is used as the `column_sort`.
Need only to be specified on some data formats (see below):
:`column_value`: This column contains the actual values of the time series.
This corresponds to the measured values of different sensors on the robots.
:`column_kind`: This column indicates the names of the different time series types (e.g. different sensors in an
industrial application as in the robot dataset).
For each kind of time series the features are calculated individually.
Important: None of these columns is allowed to contain ``NaN``, ``Inf`` or ``-Inf`` values.
In the following paragrpahs, we describe the different input formats that are build based off those columns:
* A flat DataFrame
* A stacked DataFrame
* A dictionary of flat DataFrames
The difference between a flat and a stacked DataFrame is indicated by specifying (or not) the parameters
``column_value`` and ``column_kind`` in the :func:`tsfresh.extract_features` function.
If you are unsure which one to choose, try either the flat or stacked DataFrame.
Input Option 1. Flat DataFrame or Wide DataFrame
------------------------------------------------
If both ``column_value`` and ``column_kind`` are set to ``None``, the time series data is assumed to be in a flat
DataFrame. This means that each different time series must be saved as its own column.
Example: Imagine you record the values of time series x and y for different objects A and B for three different
times t1, t2 and t3. Your resulting DataFrame may look like this:
+----+------+----------+----------+
| id | time | x | y |
+====+======+==========+==========+
| A | t1 | x(A, t1) | y(A, t1) |
+----+------+----------+----------+
| A | t2 | x(A, t2) | y(A, t2) |
+----+------+----------+----------+
| A | t3 | x(A, t3) | y(A, t3) |
+----+------+----------+----------+
| B | t1 | x(B, t1) | y(B, t1) |
+----+------+----------+----------+
| B | t2 | x(B, t2) | y(B, t2) |
+----+------+----------+----------+
| B | t3 | x(B, t3) | y(B, t3) |
+----+------+----------+----------+
Now, you want to calculate some features with tsfresh so you would pass:
.. code:: python
column_id="id", column_sort="time", column_kind=None, column_value=None
to the extraction function, to extract features separately for all ids and separately for the x and y values.
You can also omit the ``column_kind=None, column_value=None`` as this is the default.
Input Option 2. Stacked DataFrame or Long DataFrame
---------------------------------------------------
If both ``column_value`` and ``column_kind`` are set, the time series data is assumed to be a stacked DataFrame.
This means that there are no different columns for the different types of time series.
This representation has several advantages over the flat Data Frame.
For example, the time stamps of the different time series do not have to align.
It does not contain different columns for the different types of time series but only one
value column and a kind column. Following with our previous example, the dataframe would look like this:
+----+------+------+----------+
| id | time | kind | value |
+====+======+======+==========+
| A | t1 | x | x(A, t1) |
+----+------+------+----------+
| A | t2 | x | x(A, t2) |
+----+------+------+----------+
| A | t3 | x | x(A, t3) |
+----+------+------+----------+
| A | t1 | y | y(A, t1) |
+----+------+------+----------+
| A | t2 | y | y(A, t2) |
+----+------+------+----------+
| A | t3 | y | y(A, t3) |
+----+------+------+----------+
| B | t1 | x | x(B, t1) |
+----+------+------+----------+
| B | t2 | x | x(B, t2) |
+----+------+------+----------+
| B | t3 | x | x(B, t3) |
+----+------+------+----------+
| B | t1 | y | y(B, t1) |
+----+------+------+----------+
| B | t2 | y | y(B, t2) |
+----+------+------+----------+
| B | t3 | y | y(B, t3) |
+----+------+------+----------+
Then you would set:
.. code:: python
column_id="id", column_sort="time", column_kind="kind", column_value="value"
to end up with the same extracted features.
You can also omit the value column and let ``tsfresh`` deduce it automatically.
Input Option 3. Dictionary of flat DataFrames
---------------------------------------------
Instead of passing a DataFrame which must be split up by its different kinds by tsfresh, you can also give a
dictionary mapping from the kind as string to a DataFrame containing only the time series data of that kind.
So essentially you are using a singular DataFrame for each kind of time series.
The data from the example can be split into two DataFrames resulting in the following dictionary:
{ "x":
+----+------+----------+
| id | time | value |
+====+======+==========+
| A | t1 | x(A, t1) |
+----+------+----------+
| A | t2 | x(A, t2) |
+----+------+----------+
| A | t3 | x(A, t3) |
+----+------+----------+
| B | t1 | x(B, t1) |
+----+------+----------+
| B | t2 | x(B, t2) |
+----+------+----------+
| B | t3 | x(B, t3) |
+----+------+----------+
,
"y":
+----+------+----------+
| id | time | value |
+====+======+==========+
| A | t1 | y(A, t1) |
+----+------+----------+
| A | t2 | y(A, t2) |
+----+------+----------+
| A | t3 | y(A, t3) |
+----+------+----------+
| B | t1 | y(B, t1) |
+----+------+----------+
| B | t2 | y(B, t2) |
+----+------+----------+
| B | t3 | y(B, t3) |
+----+------+----------+
}
You would pass this dictionary to tsfresh together with the following arguments:
.. code:: python
column_id="id", column_sort="time", column_kind=None, column_value="value":
In this case we do not need to specify the kind column as the kind is the respective dictionary key.
Output Format
-------------
The resulting feature matrix, containing the extracted features, is the same for all three input options.
It will always be a :class:`pandas.DataFrame` with the following layout:
+----+-------------+-----+-------------+-------------+-----+-------------+
| id | x_feature_1 | ... | x_feature_N | y_feature_1 | ... | y_feature_N |
+====+=============+=====+=============+=============+=====+=============+
| A | ... | ... | ... | ... | ... | ... |
+----+-------------+-----+-------------+-------------+-----+-------------+
| B | ... | ... | ... | ... | ... | ... |
+----+-------------+-----+-------------+-------------+-----+-------------+
where the x features are calculated using all x values (independently for A and B), the y features using all y values
(independently for A and B), and so on.
This DataFrame is also the expected input format to the feature selection algorithms used by tsfresh (e.g. the
:func:`tsfresh.select_features` function).
| /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/docs/text/data_formats.rst | 0.948976 | 0.927626 | data_formats.rst | pypi |
Introduction
============
Why tsfresh?
------------
tsfresh is used for systematic feature engineering from time-series and other sequential data [1]_.
These data have in common that they are ordered by an independent variable.
The most common independent variable is time (time series).
Other examples for sequential data are reflectance and absorption spectra,
which have wavelength as their ordering dimension.
In order keeps things simple, we are simplify referring to all different types of sequential data as time-series.
.. image:: ../images/introduction_ts_exa.png
:scale: 70 %
:alt: the time series
:align: center
(and yes, it is pretty cold!)
Now you want to calculate different characteristics such as the maximum or minimum temperature, the average temperature
or the number of temporary temperature peaks:
.. image:: ../images/introduction_ts_exa_features.png
:scale: 70 %
:alt: some characteristics of the time series
:align: center
Without tsfresh, you would have to calculate all those characteristics manually; tsfresh automates this process
calculating and returning all those features automatically.
In addition, tsfresh is compatible with the Python libraries :mod:`pandas` and :mod:`scikit-learn`, so you can easily
integrate the feature extraction with your current routines.
What can we do with these features?
-----------------------------------
The extracted features can be used to describe the time series, i.e., often these features give new insights into the
time series and their dynamics. They can also be used to cluster time series and to train machine learning models that
perform classification or regression tasks on time series.
The tsfresh package has been successfully used in the following projects:
* prediction of steel billets quality during a continuous casting process [2]_,
* activity recognition from synchronized sensors [3]_,
* volcanic eruption forecasting [4]_,
* authorship attribution from written text samples [5]_,
* characterisation of extrasolar planetary systems from time-series with missing data [6]_,
* sensor anomaly detection [7]_,
* and `many many more <https://scholar.google.de/scholar?cites=365611925060572663>`_.
What can't we do with tsfresh?
------------------------------
Currently, tsfresh is not suitable:
* for streaming data (by streaming data we mean data that is usually used for online operations, while time series data is usually used for offline operations)
* to train models on the extracted features (we do not want to reinvent the wheel, to train machine learning models check out the Python package
`scikit-learn <http://scikit-learn.org/stable/>`_)
* for usage with highly irregular time series; tsfresh uses timestamps only to order observations, while many features are interval-agnostic (e.g., number of peaks) and can be determined for any series, some otherfeatures (e.g., linear trend) assume equal spacing in time, and should be used with care when this assumption is not met.
However, some of these use cases could be implemented, if you have an application in mind, open
an issue at `<https://github.com/blue-yonder/tsfresh/issues>`_, or feel free to contact us.
What else is out there?
-----------------------
There is a matlab package called `hctsa <https://github.com/benfulcher/hctsa>`_ which can be used to automatically
extract features from time series.
It is also possible to use hctsa from within Python through the `pyopy <https://github.com/strawlab/pyopy>`_
package.
Other available packagers are `featuretools <https://www.featuretools.com/>`_, `FATS <http://isadoranun.github.io/tsfeat/>`_ and `cesium <http://cesium-ml.org/>`_.
References
----------
.. [1] Christ, M., Braun, N., Neuffer, J. and Kempa-Liehr A.W. (2018).
*Time Series FeatuRe Extraction on basis of Scalable Hypothesis tests (tsfresh – A Python package)*.
Neurocomputing 307 (2018) 72-77,
`doi: 10.1016/j.neucom.2018.03.067 <https://doi.org/10.1016/j.neucom.2018.03.067>`_.
.. [2] Christ, M., Kempa-Liehr, A.W. and Feindt, M. (2016).
*Distributed and parallel time series feature extraction for industrial big data applications*.
Asian Conference on Machine Learning (ACML), Workshop on Learning on Big Data (WLBD).
`<https://arxiv.org/abs/1610.07717v1>`_.
.. [3] Kempa-Liehr, A.W., Oram, J., Wong, A., Finch, M. and Besier, T. (2020).
*Feature engineering workflow for activity recognition from synchronized inertial measurement units*.
In: Pattern Recognition. ACPR 2019. Ed. by M. Cree et al. Vol. 1180.
Communications in Computer and Information Science (CCIS).
Singapore: Springer 2020, 223–231.
`doi: 10.1007/978-981-15-3651-9_20 <https://doi.org/10.1007/978-981-15-3651-9_20>`_.
.. [4] D. E. Dempsey, S. J. Cronin, S. Mei, and A. W. Kempa-Liehr (2020).
*Automatic precursor recognition and real-time forecasting of sudden explosive volcanic eruptions at Whakaari, New Zealand*.
Nature Communications 11.3562, pp. 1–8.
`doi: 10.1038/s41467-020-17375-2 <https://dx.doi.org/10.1038/s41467-020-17375-2>`_.
.. [5] Tang, Y., Blincoe, K., Kempa-Liehr, A.W. (2020).
*Enriching Feature Engineering for Short Text Samples by Language Time Series Analysis*.
EPJ Data Science 9.26 (2020), 1–59.
`doi: 10.1140/epjds/s13688-020-00244-9 <https://doi.org/10.1140/epjds/s13688-020-00244-9>`_.
.. [6] Kennedy, A., Gemma, N., Rattenbury, N., Kempa-Liehr, A.W. (2021).
*Modelling the projected separation of microlensing events using systematic time-series feature engineering*.
Astronomy and Computing 35.100460 (2021), 1–14,
`doi: 10.1016/j.ascom.2021.100460 <https://doi.org/10.1016/j.ascom.2021.100460>`_.
.. [7] Hui Yie Teh, Kevin I-Kai Wang, and Andreas W. Kempa-Liehr (2021).
*Expect the Unexpected: Unsupervised feature selection for automated sensor anomaly detection*.
IEEE Sensors Journal 15.16, pp. 18033–18046.
`doi: 10.1109/JSEN.2021.3084970 <https://doi.org/10.1109/JSEN.2021.3084970>`_.
| /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/docs/text/introduction.rst | 0.961043 | 0.942401 | introduction.rst | pypi |
.. _quick-start-label:
Quick Start
===========
Install tsfresh
---------------
As the compiled tsfresh package is hosted on the Python Package Index (PyPI) you can easily install it with pip
.. code:: shell
pip install tsfresh
Dive in
-------
Before boring yourself by reading the docs in detail, you can dive right into tsfresh with the following example:
We are given a data set containing robot failures as discussed in [1]_.
Each robot records time series from six different sensors.
For each sample denoted by a different id we are going to classify if the robot reports a failure or not.
From a machine learning point of view, our goal is to classify each group of time series.
To start, we load the data into python
.. code:: python
from tsfresh.examples.robot_execution_failures import download_robot_execution_failures, \
load_robot_execution_failures
download_robot_execution_failures()
timeseries, y = load_robot_execution_failures()
and end up with a pandas.DataFrame `timeseries` having the following shape
.. code:: python
print(timeseries.head())
+-----+-----+-----+-----+-----+-----+-----+-----+-----+
| | id | time| F_x | F_y | F_z | T_x | T_y | T_z |
+=====+=====+=====+=====+=====+=====+=====+=====+=====+
| 0 | 1 | 0 | -1 | -1 | 63 | -3 | -1 | 0 |
+-----+-----+-----+-----+-----+-----+-----+-----+-----+
| 1 | 1 | 1 | 0 | 0 | 62 | -3 | -1 | 0 |
+-----+-----+-----+-----+-----+-----+-----+-----+-----+
| 2 | 1 | 2 | -1 | -1 | 61 | -3 | 0 | 0 |
+-----+-----+-----+-----+-----+-----+-----+-----+-----+
| 3 | 1 | 3 | -1 | -1 | 63 | -2 | -1 | 0 |
+-----+-----+-----+-----+-----+-----+-----+-----+-----+
| 4 | 1 | 4 | -1 | -1 | 63 | -3 | -1 | 0 |
+-----+-----+-----+-----+-----+-----+-----+-----+-----+
| ... | ... | ... | ... | ... | ... | ... | ... | ... |
+-----+-----+-----+-----+-----+-----+-----+-----+-----+
The first column is the DataFrame index and has no meaning here.
There are six different time series (`F_x`, `F_y`, `F_z`, `T_x`, `T_y`, `T_z`) for the different sensors. The different robots are denoted by the ids column.
On the other hand, ``y`` contains the information which robot `id` reported a failure and which not:
+---+---+
| 1 | 0 |
+---+---+
| 2 | 0 |
+---+---+
| 3 | 0 |
+---+---+
| 4 | 0 |
+---+---+
| 5 | 0 |
+---+---+
|...|...|
+---+---+
Here, for the samples with ids 1 to 5 no failure was reported.
In the following we illustrate the time series of the sample id 3 reporting no failure:
.. code:: python
import matplotlib.pyplot as plt
timeseries[timeseries['id'] == 3].plot(subplots=True, sharex=True, figsize=(10,10))
plt.show()
.. image:: ../images/ts_example_robot_failures_nofail.png
:alt: the time series for id 3 (no failure)
:align: center
And for id 20 reporting a failure:
.. code:: python
timeseries[timeseries['id'] == 20].plot(subplots=True, sharex=True, figsize=(10,10))
plt.show()
.. image:: ../images/ts_example_robot_failures_fail.png
:alt: the time series for id 20 (failure)
:align: center
You can already see some differences by eye - but for successful machine learning we have to put these differences into
numbers.
For this, tsfresh comes into place.
It allows us to automatically extract over 1200 features from those six different time series for each robot.
For extracting all features, we do:
.. code:: python
from tsfresh import extract_features
extracted_features = extract_features(timeseries, column_id="id", column_sort="time")
You end up with the DataFrame `extracted_features` with more than 1200 different extracted features.
We will now first, remove all ``NaN`` values (which were created by feature calculators that can not be used on the
given data, e.g., because the statistics are too low), and then select only the relevant features:
.. code-block:: python
from tsfresh import select_features
from tsfresh.utilities.dataframe_functions import impute
impute(extracted_features)
features_filtered = select_features(extracted_features, y)
Only around 300 features were classified as relevant enough.
Further, you can even perform the extraction, imputing and filtering at the same time with the
:func:`tsfresh.extract_relevant_features` function:
.. code-block:: python
from tsfresh import extract_relevant_features
features_filtered_direct = extract_relevant_features(timeseries, y,
column_id='id', column_sort='time')
You can now use the features in the DataFrame `features_filtered` (which is equal to
`features_filtered_direct`) in conjunction with `y` to train your classification model.
You can find an example in the Jupyter notebook
`01 Feature Extraction and Selection.ipynb <https://github.com/blue-yonder/tsfresh/blob/main/notebooks/examples/01%20Feature%20Extraction%20and%20Selection.ipynb>`_
were we train a RandomForestClassifier using the extracted features.
References
.. [1] http://archive.ics.uci.edu/ml/datasets/Robot+Execution+Failures
| /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/docs/text/quick_start.rst | 0.94154 | 0.72841 | quick_start.rst | pypi |
# Feature Selection in a sklearn pipeline
This notebook is quite similar to [the first example](./01%20Feature%20Extraction%20and%20Selection.ipynb).
This time however, we use the `sklearn` pipeline API of `tsfresh`.
If you want to learn more, have a look at [the documentation](https://tsfresh.readthedocs.io/en/latest/text/sklearn_transformers.html).
```
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from tsfresh.examples import load_robot_execution_failures
from tsfresh.transformers import RelevantFeatureAugmenter
from tsfresh.utilities.dataframe_functions import impute
```
## Load and Prepare the Data
Check out the first example notebook to learn more about the data and format.
```
from tsfresh.examples.robot_execution_failures import download_robot_execution_failures
download_robot_execution_failures()
df_ts, y = load_robot_execution_failures()
```
We want to use the extracted features to predict for each of the robot executions, if it was a failure or not.
Therefore our basic "entity" is a single robot execution given by a distinct `id`.
A dataframe with these identifiers as index needs to be prepared for the pipeline.
```
X = pd.DataFrame(index=y.index)
# Split data into train and test set
X_train, X_test, y_train, y_test = train_test_split(X, y)
```
## Build the pipeline
We build a sklearn pipeline that consists of a feature extraction step (`RelevantFeatureAugmenter`) with a subsequent `RandomForestClassifier`.
The `RelevantFeatureAugmenter` takes roughly the same arguments as `extract_features` and `select_features` do.
```
ppl = Pipeline([
('augmenter', RelevantFeatureAugmenter(column_id='id', column_sort='time')),
('classifier', RandomForestClassifier())
])
```
<div class="alert alert-warning">
Here comes the tricky part!
The input to the pipeline will be our dataframe `X`, which one row per identifier.
It is currently empty.
But which time series data should the `RelevantFeatureAugmenter` to actually extract the features from?
We need to pass the time series data (stored in `df_ts`) to the transformer.
</div>
In this case, df_ts contains the time series of both train and test set, if you have different dataframes for
train and test set, you have to call set_params two times
(see further below on how to deal with two independent data sets)
```
ppl.set_params(augmenter__timeseries_container=df_ts);
```
We are now ready to fit the pipeline
```
ppl.fit(X_train, y_train)
```
The augmenter has used the input time series data to extract time series features for each of the identifiers in the `X_train` and selected only the relevant ones using the passed `y_train` as target.
These features have been added to `X_train` as new columns.
The classifier can now use these features during trainings.
## Prediction
During interference, the augmentor does only extract the relevant features it has found out in the training phase and the classifier predicts the target using these features.
```
y_pred = ppl.predict(X_test)
```
So, finally we inspect the performance:
```
print(classification_report(y_test, y_pred))
```
You can also find out, which columns the augmenter has selected
```
ppl.named_steps["augmenter"].feature_selector.relevant_features
```
<div class="alert alert-info">
In this example we passed in an empty (except the index) `X_train` or `X_test` into the pipeline.
However, you can also fill the input with other features you have (e.g. features extracted from the metadata)
or even use other pipeline components before.
</div>
## Separating the time series data containers
In the example above we passed in a single `df_ts` into the `RelevantFeatureAugmenter`, which was used both for training and predicting.
During training, only the data with the `id`s from `X_train` where extracted and during prediction the rest.
However, it is perfectly fine to call `set_params` twice: once before training and once before prediction.
This can be handy if you for example dump the trained pipeline to disk and re-use it only later for prediction.
You only need to make sure that the `id`s of the enteties you use during training/prediction are actually present in the passed time series data.
```
df_ts_train = df_ts[df_ts["id"].isin(y_train.index)]
df_ts_test = df_ts[df_ts["id"].isin(y_test.index)]
ppl.set_params(augmenter__timeseries_container=df_ts_train);
ppl.fit(X_train, y_train);
import pickle
with open("pipeline.pkl", "wb") as f:
pickle.dump(ppl, f)
```
Later: load the fitted model and do predictions on new, unseen data
```
import pickle
with open("pipeline.pkl", "rb") as f:
ppk = pickle.load(f)
ppl.set_params(augmenter__timeseries_container=df_ts_test);
y_pred = ppl.predict(X_test)
print(classification_report(y_test, y_pred))
```
| /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/notebooks/examples/02 sklearn Pipeline.ipynb | 0.418459 | 0.980766 | 02 sklearn Pipeline.ipynb | pypi |
# Multiclass Example
This example show shows how to use `tsfresh` to extract and select useful features from timeseries in a multiclass classification example.
The underlying control of the false discovery rate (FDR) has been introduced by [Tang et al. (2020, Sec. 3.2)](https://doi.org/10.1140/epjds/s13688-020-00244-9).
We use an example dataset of human activity recognition for this.
The dataset consists of timeseries for 7352 accelerometer readings.
Each reading represents an accelerometer reading for 2.56 sec at 50hz (for a total of 128 samples per reading). Furthermore, each reading corresponds one of six activities (walking, walking upstairs, walking downstairs, sitting, standing and laying).
For more information go to https://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
This notebook follows the example in [the first notebook](./01%20Feature%20Extraction%20and%20Selection.ipynb), so we will go quickly over the extraction and focus on the more interesting feature selection in this case.
```
%matplotlib inline
import matplotlib.pylab as plt
from tsfresh import extract_features, extract_relevant_features, select_features
from tsfresh.utilities.dataframe_functions import impute
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
import pandas as pd
import numpy as np
```
## Load and visualize data
```
from tsfresh.examples.har_dataset import download_har_dataset, load_har_dataset, load_har_classes
# fetch dataset from uci
download_har_dataset()
df = load_har_dataset()
df.head()
y = load_har_classes()
```
The data is not in a typical time series format so far:
the columns are the time steps whereas each row is a measurement of a different person.
Therefore we bring it to a format where the time series of different persons are identified by an `id` and are order by time vertically.
```
df["id"] = df.index
df = df.melt(id_vars="id", var_name="time").sort_values(["id", "time"]).reset_index(drop=True)
df.head()
plt.title('accelerometer reading')
plt.plot(df[df["id"] == 0].set_index("time").value)
plt.show()
```
## Extract Features
```
# only use the first 500 ids to speed up the processing
X = extract_features(df[df["id"] < 500], column_id="id", column_sort="time", impute_function=impute)
X.head()
```
## Train and evaluate classifier
For later comparison, we train a decision tree on all features (without selection):
```
X_train, X_test, y_train, y_test = train_test_split(X, y[:500], test_size=.2)
classifier_full = DecisionTreeClassifier()
classifier_full.fit(X_train, y_train)
print(classification_report(y_test, classifier_full.predict(X_test)))
```
# Multiclass feature selection
We will now select a subset of relevant features using the `tsfresh` select features method.
However it only works for binary classification or regression tasks.
For a 6 label multi classification we therefore split the selection problem into 6 binary one-versus all classification problems.
For each of them we can do a binary classification feature selection:
```
relevant_features = set()
for label in y.unique():
y_train_binary = y_train == label
X_train_filtered = select_features(X_train, y_train_binary)
print("Number of relevant features for class {}: {}/{}".format(label, X_train_filtered.shape[1], X_train.shape[1]))
relevant_features = relevant_features.union(set(X_train_filtered.columns))
len(relevant_features)
```
we keep only those features that we selected above, for both the train and test set
```
X_train_filtered = X_train[list(relevant_features)]
X_test_filtered = X_test[list(relevant_features)]
```
and train again:
```
classifier_selected = DecisionTreeClassifier()
classifier_selected.fit(X_train_filtered, y_train)
print(classification_report(y_test, classifier_selected.predict(X_test_filtered)))
```
It worked! The precision improved by removing irrelevant features.
## Improved Multiclass feature selection
We can instead specify the number of classes for which a feature should be a relevant predictor in order to pass through the filtering process. This is as simple as setting the `multiclass` parameter to `True` and setting `n_significant` to the required number of classes. We will try with a requirement of being relevant for 5 classes.
```
X_train_filtered_multi = select_features(X_train, y_train, multiclass=True, n_significant=5)
X_train_filtered_multi.shape
```
We can see that the number of relevant features is lower than the previous implementation.
```
classifier_selected_multi = DecisionTreeClassifier()
classifier_selected_multi.fit(X_train_filtered_multi, y_train)
X_test_filtered_multi = X_test[X_train_filtered_multi.columns]
print(classification_report(y_test, classifier_selected_multi.predict(X_test_filtered_multi)))
```
We now get slightly better classification performance, especially for classes where the previous classifier performed poorly. The parameter `n_significant` can be tuned for best results.
| /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/notebooks/examples/04 Multiclass Selection Example.ipynb | 0.538498 | 0.983691 | 04 Multiclass Selection Example.ipynb | pypi |
# Timeseries Forecasting
This notebook explains how to use `tsfresh` in time series foreacasting.
Make sure you also read through the [documentation](https://tsfresh.readthedocs.io/en/latest/text/forecasting.html) to learn more on this feature.
We will use the stock price of Apple for this.
In this notebook we will only showcase how to work with a single time series at a time (one stock).
There exist another notebook in the `advanced` folder, which treats several stocks at the same time.
Basically the same - but a bit more complex when it comes to pandas multi-indexing.
```
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
from tsfresh import extract_features, select_features
from tsfresh.utilities.dataframe_functions import roll_time_series, make_forecasting_frame
from tsfresh.utilities.dataframe_functions import impute
try:
import pandas_datareader.data as web
except ImportError:
print("You need to install the pandas_datareader. Run pip install pandas_datareader.")
from sklearn.linear_model import LinearRegression
```
## Reading the data
We download the data from "stooq" and only store the High value.
Please note: this notebook is for showcasing `tsfresh`s feature extraction - not to predict stock market prices :-)
```
df = web.DataReader("AAPL", 'stooq')["High"]
df.head()
plt.figure(figsize=(15, 6))
df.plot(ax=plt.gca())
plt.show()
```
We want to make the time dependency a bit clearer and add an identifier to each of the stock values (in this notebook we only have Google though).
```
df_melted = pd.DataFrame({"high": df.copy()})
df_melted["date"] = df_melted.index
df_melted["Symbols"] = "AAPL"
df_melted.head()
```
## Create training data sample
Forecasting typically involves the following steps:
* take all data up to today
* do feature extraction (e.g. by running `extract_features`)
* run a prediction model (e.g. a regressor, see below)
* use the result as the forecast for tomorrow
In training however, we need multiple examples to train.
If we would only use the time series until today (and wait for the value of tomorrow to have a target), we would only have a single training example.
Therefore we use a trick: we replay the history.
Imagine you have a cut-out window sliding over your data.
At each time step $t$, you treat the data as it would be today.
You extract the features with everything you know until today (which is all data until and including $t$).
The target for the features until time $t$ is the time value of time $t + 1$ (which you already know, because everything has already happened).
The process of window-sliding is implemented in the function `roll_time_series`.
Our window size will be 20 (we look at max 20 days in the past) and we disregard all windows which are shorter than 5 days.
```
df_rolled = roll_time_series(df_melted, column_id="Symbols", column_sort="date",
max_timeshift=20, min_timeshift=5)
df_rolled.head()
```
The resulting dataframe now consists of these "windows" stamped out of the original dataframe.
For example all data with the `id = (AAPL, 2020-07-14 00:00:00)` comes from the original data of stock `AAPL` including the last 20 days until `2020-07-14`:
```
df_rolled[df_rolled["id"] == ("AAPL", pd.to_datetime("2020-07-14"))]
df_melted[(df_melted["date"] <= pd.to_datetime("2020-07-14")) &
(df_melted["date"] >= pd.to_datetime("2020-06-15")) &
(df_melted["Symbols"] == "AAPL")]
```
If you now group by the new `id` column, each of the groups will be a certain stock symbol until and including the data until a certain day (and including the last 20 days in the past).
Whereas we started with 1259 data samples:
```
len(df_melted)
```
we now have 1254 unique windows (identified by stock symbol and ending date):
```
df_rolled["id"].nunique()
```
We "lost" 5 windows, as we required to have a minimum history of more than 5 days.
```
df_rolled.groupby("id").size().agg([np.min, np.max])
```
The process is also shown in this image (please note that the window size is smaller for better visibility):
<img src="./stocks.png"/>
## Extract Features
The rolled (windowed) data sample is now in the correct format to use it for `tsfresh`s feature extraction.
As normal, features will be extracted using all data for a given `id`, which is in our case all data of a given window and a given id (one colored box in the graph above).
If the feature extraction returns a row with the index `(AAPL, 2020-07-14 00:00:00)`, you know it has been calculated using the `AAPL` data up and including `2020-07-14` (and 20 days of history).
```
X = extract_features(df_rolled.drop("Symbols", axis=1),
column_id="id", column_sort="date", column_value="high",
impute_function=impute, show_warnings=False)
X.head()
```
We make the data a bit easier to work with by removing the tuple-index
```
X = X.set_index(X.index.map(lambda x: x[1]), drop=True)
X.index.name = "last_date"
X.head()
```
Our `(AAPL, 2020-07-14 00:00:00)` is also in the data again:
```
X.loc['2020-07-14']
```
Just to repeat: the features in this row were only calculated using the time series values of `AAPL` up to and including `2015-07-14` and the last 20 days.
## Prediction
We can now use the extracted features to train a regressor.
But what will be our targets?
The target for the row `2020-07-13` is the value on the next timestep (that would be `2020-07-14` in this case).
So all we need to do is go back to our original dataframe and take the stock value of tomorrow.
This is done with `shift`:
```
y = df_melted.set_index("date").sort_index().high.shift(-1)
```
Quick consistency test:
```
y["2020-07-13"], df["2020-07-14"].iloc[0]
```
However, we need to be a bit careful here: `X` is missing the first 5 dates (as our minimum window size was 5) and `y` is missing the last date (as there is nothing to predict on today).
So lets make sure we have a consistent view on the data.
```
y = y[y.index.isin(X.index)]
X = X[X.index.isin(y.index)]
```
We can now train normal AdaBoostRegressors to predict the next time step .
Let's split the data into a training and testing sample (but make sure to keep temporal consistency).
We take everything until 2019 as train data an the rest as test:
```
X[:"2018"]
X_train = X[:"2018"]
X_test = X["2019":]
y_train = y[:"2018"]
y_test = y["2019":]
```
and do feature selection before training
```
X_train_selected = select_features(X_train, y_train)
ada = LinearRegression()
ada.fit(X_train_selected, y_train)
```
Now lets check how good our prediction is:
```
X_test_selected = X_test[X_train_selected.columns]
y_pred = pd.Series(ada.predict(X_test_selected), index=X_test_selected.index)
```
The prediction is for the next day, so for drawing we need to shift 1 step back:
```
plt.figure(figsize=(15, 6))
y.plot(ax=plt.gca())
y_pred.plot(ax=plt.gca(), legend=None, marker=".")
```
Well, clearly not perfect ;-)
| /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/notebooks/examples/05 Timeseries Forecasting.ipynb | 0.529993 | 0.989119 | 05 Timeseries Forecasting.ipynb | pypi |
# Feature Calculator Settings
By default, all feature calculators are used when you call `extract_features`.
There could be multiple reasons why you do not want that:
* you are only interested on a certain feature (or features)
* you want to save time during extraction
* you have ran the feature selection before and already know, which features are relevant
For more information on these settings, please have a look into [the documentation](http://tsfresh.readthedocs.io/en/latest/text/feature_extraction_settings.html).
```
from tsfresh.feature_extraction import extract_features
from tsfresh.feature_extraction import settings
import numpy as np
import pandas as pd
```
## Construct a time series container
For testing, we construct the time series container that includes two sensor time series, "temperature" and "pressure", for two devices "a" and "b".
```
df = pd.DataFrame({"id": ["a", "a", "b", "b"], "temperature": [1,2,3,1], "pressure": [-1, 2, -1, 7]})
df
```
## The `default_fc_parameters`
Which features are calculated by `tsfresh` is controlled by a dictionary that contains a mapping from feature calculator names to their parameters.
This dictionary is called `fc_parameters`.
It maps feature calculator names (= keys) to parameters (= values).
Every key in the dictionary will be looked up as a function in `tsfresh.feature_extraction.feature_calculators` and be used to extract features.
`tsfresh` comes with some predefined sets of `fc_parameters` dictionaries:
```
settings.ComprehensiveFCParameters, settings.EfficientFCParameters, settings.MinimalFCParameters
```
For example, to only calculate a very minimal set of features:
```
settings_minimal = settings.MinimalFCParameters()
settings_minimal
```
Each key stands for one of the feature calculators.
The value are the parameters. If a feature calculator has no parameters, `None` is used as a value (and as these feature calculators are very simple, they all have no parameters).
This dictionary can passed to the extract method, resulting in a few basic time series beeing calculated:
```
X_tsfresh = extract_features(df, column_id="id", default_fc_parameters=settings_minimal)
X_tsfresh.head()
```
By using the settings_minimal as value of the default_fc_parameters parameter, those settings are used for all type of time series.
In this case, the `settings_minimal` dictionary is used for both "temperature" and "pressure" time series.
Please note how the columns in the resulting dataframe depend both on the settings as well as the kinds of the data.
Now, lets say we want to remove the length feature and prevent it from beeing calculated. We just delete it from the dictionary.
```
del settings_minimal["length"]
settings_minimal
```
Now, if we extract features for this reduced dictionary, the length feature will not be calculated
```
X_tsfresh = extract_features(df, column_id="id", default_fc_parameters=settings_minimal)
X_tsfresh.head()
```
## The `kind_to_fc_parameters`
Now, lets say we do not want to calculate the same features for both type of time series. Instead there should be different sets of features for each kind.
To do that, we can use the `kind_to_fc_parameters` parameter, which lets us specifiy which `fc_parameters` we want to use for which kind of time series:
```
fc_parameters_pressure = {"length": None,
"sum_values": None}
fc_parameters_temperature = {"maximum": None,
"minimum": None}
kind_to_fc_parameters = {
"temperature": fc_parameters_temperature,
"pressure": fc_parameters_pressure
}
print(kind_to_fc_parameters)
```
So, in this case, for sensor "pressure" both "max" and "min" are calculated.
For the "temperature" signal, the length and sum\_values features are extracted instead.
```
X_tsfresh = extract_features(df, column_id="id", kind_to_fc_parameters=kind_to_fc_parameters)
X_tsfresh.head()
```
### Extracting from data
After applying a feature selection algorithm to drop irrelevant feature columns you know which features are relevant and which are not.
You can also use this information to only extract these relevant features in the first place.
The provided `from_columns` method can be used to infer a settings dictionary from the dataframe containing the features.
This dictionary can then for example be stored and be used in the next feature extraction.
```
# Assuming `X_tsfresh` contains only our relevant features
relevant_settings = settings.from_columns(X_tsfresh)
relevant_settings
```
## More complex dictionaries
We provide `fc_parameters` dictionaries with larger sets of features.
The `EfficientFCParameters` contain features and parameters that should be calculated quite fast:
```
settings_efficient = settings.EfficientFCParameters()
settings_efficient
```
The `ComprehensiveFCParameters` are the biggest set of features. It will take the longest to calculate
```
settings_comprehensive = settings.ComprehensiveFCParameters()
settings_comprehensive
```
### Feature Calculator Parameters
More complex feature calculators have parameters that you can use to tune the extracted features.
The predefined settings (such as `ComprehensiveFCParameters`) already contain default values of these features.
However for your own projects, you might want/need to tune them.
In detail, the values in a `fc_parameters` dictionary contain a list of parameter dictionaries.
When calculating the feature, each entry in the list of parameters will be used to calculate one feature.
For example, lets have a look into the feature `large_standard_deviation`, which depends on a single parameter called `r` (it basically defines how large "large" is).
The `ComprehensiveFCParameters` contains several default values for `r`.
Each of them will be used to calculate a single feature:
```
settings_comprehensive['large_standard_deviation']
```
If you use these settings in feature extraction, that would trigger the calculation of 20 different `large_standard_deviation` features, one for `r=0.05` up to `r=0.95`.
```
settings_tmp = {'large_standard_deviation': settings_comprehensive['large_standard_deviation']}
X_tsfresh = extract_features(df, column_id="id", default_fc_parameters=settings_tmp)
X_tsfresh.columns
```
If you now want to change the parameters for a specific feature calculator, all you need to do is to change the dictionary values.
| /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/notebooks/examples/03 Feature Extraction Settings.ipynb | 0.533154 | 0.964489 | 03 Feature Extraction Settings.ipynb | pypi |
# Timeseries Forecasting
This notebook explains how to use `tsfresh` in time series foreacasting.
Make sure you also read through the [documentation](https://tsfresh.readthedocs.io/en/latest/text/forecasting.html) to learn more on this feature.
It is basically a copy of the other time series forecasting notebook, but this time using more than one
stock.
This is conceptionally not much different, but the pandas multi-index magic is a bit advanced :-)
We will use the Google, Facebook and Alphabet stock.
Please find all documentation in the other notebook.
```
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
from tsfresh import extract_features, select_features
from tsfresh.utilities.dataframe_functions import roll_time_series, make_forecasting_frame
from tsfresh.utilities.dataframe_functions import impute
try:
import pandas_datareader.data as web
except ImportError:
print("You need to install the pandas_datareader. Run pip install pandas_datareader.")
from sklearn.ensemble import AdaBoostRegressor
```
## Reading the data
```
df = web.DataReader(['F', "AAPL", "GOOGL"], 'stooq')["High"]
df.head()
plt.figure(figsize=(15, 6))
df.plot(ax=plt.gca())
plt.show()
```
This time we need to make sure to preserve the stock symbol information while reordering:
```
df_melted = df.copy()
df_melted["date"] = df_melted.index
df_melted = df_melted.melt(id_vars="date", value_name="high").sort_values(["Symbols", "date"])
df_melted = df_melted[["Symbols", "date", "high"]]
df_melted.head()
```
## Create training data sample
```
df_rolled = roll_time_series(df_melted, column_id="Symbols", column_sort="date",
max_timeshift=20, min_timeshift=5)
df_rolled.head()
```
## Extract Features
```
X = extract_features(df_rolled.drop("Symbols", axis=1),
column_id="id", column_sort="date", column_value="high",
impute_function=impute, show_warnings=False)
X.head()
```
We make the data a bit easier to work with by giving them a multi-index instead ot the tuple index:
```
# split up the two parts of the index and give them proper names
X = X.set_index([X.index.map(lambda x: x[0]), X.index.map(lambda x: x[1])], drop=True)
X.index.names = ["Symbols", "last_date"]
X.head()
```
Our `(AAPL, 2020-07-14 00:00:00)` is also in the data again:
```
X.loc["AAPL", pd.to_datetime('2020-07-14')]
```
Just to repeat: the features in this row were only calculated using the time series values of `AAPL` up to and including `2015-07-15` and the last 20 days.
## Prediction
The next line might look like magic if you are not used to pandas transformations, but what it does is:
for each stock symbol separately:
* sort by date
* take the high value
* shift 1 time step in the future
* bring into the same multi-index format as `X` above
```
y = df_melted.groupby("Symbols").apply(lambda x: x.set_index("date")["high"].shift(-1)).T.unstack()
```
Quick consistency test:
```
y["AAPL", pd.to_datetime("2020-07-13")], df.loc[pd.to_datetime("2020-07-14"), "AAPL"]
y = y[y.index.isin(X.index)]
X = X[X.index.isin(y.index)]
```
The splitting into train and test samples workes in principle the same as with a single identifier, but this time we have a multi-index symbol-date, so the `loc` call looks a bit more complicated:
```
X_train = X.loc[(slice(None), slice(None, "2018")), :]
X_test = X.loc[(slice(None), slice("2019", "2020")), :]
y_train = y.loc[(slice(None), slice(None, "2018"))]
y_test = y.loc[(slice(None), slice("2019", "2020"))]
X_train_selected = select_features(X_train, y_train)
```
We are training a regressor for each of the stocks separately
```
adas = {stock: AdaBoostRegressor() for stock in ["AAPL", "F", "GOOGL"]}
for stock, ada in adas.items():
ada.fit(X_train_selected.loc[stock], y_train.loc[stock])
```
Now lets check again how good our prediction is:
```
X_test_selected = X_test[X_train_selected.columns]
y_pred = pd.concat({
stock: pd.Series(adas[stock].predict(X_test_selected.loc[stock]), index=X_test_selected.loc[stock].index)
for stock in adas.keys()
})
y_pred.index.names = ["Symbols", "last_date"]
plt.figure(figsize=(15, 6))
y.unstack("Symbols").plot(ax=plt.gca())
y_pred.unstack("Symbols").plot(ax=plt.gca(), legend=None, marker=".")
```
| /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/notebooks/advanced/05 Timeseries Forecasting (multiple ids).ipynb | 0.557604 | 0.96862 | 05 Timeseries Forecasting (multiple ids).ipynb | pypi |
# Example of extracting features from dataframes with Datetime indices
Assuming that time-varying measurements are taken at regular intervals can be sufficient for many situations. However, for a large number of tasks it is important to take into account **when** a measurement is made. An example can be healthcare, where the interval between measurements of vital signs contains crucial information.
Tsfresh now supports calculator functions that use the index of the timeseries container in order to calculate the features. The only requirements for these function is that the index of the input dataframe is of type `pd.DatetimeIndex`. These functions are contained in the new class TimeBasedFCParameters.
Note that the behaviour of all other functions is unaffected. The settings parameter of `extract_features()` can contain both index-dependent functions and 'regular' functions.
```
import pandas as pd
from tsfresh.feature_extraction import extract_features
# TimeBasedFCParameters contains all functions that use the Datetime index of the timeseries container
from tsfresh.feature_extraction.settings import TimeBasedFCParameters
```
# Build a time series container with Datetime indices
Let's build a dataframe with a datetime index. The format must be with a `value` and a `kind` column, since each measurement has its own timestamp - i.e. measurements are not assumed to be simultaneous.
```
df = pd.DataFrame({"id": ["a", "a", "a", "a", "b", "b", "b", "b"],
"value": [1, 2, 3, 1, 3, 1, 0, 8],
"kind": ["temperature", "temperature", "pressure", "pressure",
"temperature", "temperature", "pressure", "pressure"]},
index=pd.DatetimeIndex(
['2019-03-01 10:04:00', '2019-03-01 10:50:00', '2019-03-02 00:00:00', '2019-03-02 09:04:59',
'2019-03-02 23:54:12', '2019-03-03 08:13:04', '2019-03-04 08:00:00', '2019-03-04 08:01:00']
))
df = df.sort_index()
df
```
Right now `TimeBasedFCParameters` only contains `linear_trend_timewise`, which performs a calculation of a linear trend, but using the time difference in hours between measurements in order to perform the linear regression. As always, you can add your own functions in `tsfresh/feature_extraction/feature_calculators.py`.
```
settings_time = TimeBasedFCParameters()
settings_time
```
We extract the features as usual, specifying the column value, kind, and id.
```
X_tsfresh = extract_features(df, column_id="id", column_value='value', column_kind='kind',
default_fc_parameters=settings_time)
X_tsfresh.head()
```
The output looks exactly, like usual. If we compare it with the 'regular' `linear_trend` feature calculator, we can see that the intercept, p and R values are the same, as we'd expect – only the slope is now different.
```
settings_regular = {'linear_trend': [
{'attr': 'pvalue'},
{'attr': 'rvalue'},
{'attr': 'intercept'},
{'attr': 'slope'},
{'attr': 'stderr'}
]}
X_tsfresh = extract_features(df, column_id="id", column_value='value', column_kind='kind',
default_fc_parameters=settings_regular)
X_tsfresh.head()
```
# Writing your own time-based feature calculators
Writing your own time-based feature calculators is no different from usual. Only two new properties must be set using the `@set_property` decorator:
1) `@set_property("input", "pd.Series")` tells the function that the input of the function is a `pd.Series` rather than a numpy array. This allows the index to be used.
2) `@set_property("index_type", pd.DatetimeIndex)` tells the function that the input is a DatetimeIndex, allowing it to perform calculations based on time datatypes.
For example, if we want to write a function that calculates the time between the first and last measurement, it could look something like this:
```python
@set_property("input", "pd.Series")
@set_property("index_type", pd.DatetimeIndex)
def timespan(x, param):
ix = x.index
# Get differences between the last timestamp and the first timestamp in seconds, then convert to hours.
times_seconds = (ix[-1] - ix[0]).total_seconds()
return times_seconds / float(3600)
```
| /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/notebooks/advanced/feature_extraction_with_datetime_index.ipynb | 0.717012 | 0.991084 | feature_extraction_with_datetime_index.ipynb | pypi |
*tsfresh* returns a great number of features. Depending on the dynamics of the inspected time series, some of them maybe highly correlated.
A common technique to deal with such highly correlated features are transformations such as a principal component analysis (PCA). This notebooks shows you how to perform a PCA on the extracted features.
```
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import pandas as pd
class PCAForPandas(PCA):
"""This class is just a small wrapper around the PCA estimator of sklearn including normalization to make it
compatible with pandas DataFrames.
"""
def __init__(self, **kwargs):
self._z_scaler = StandardScaler()
super(self.__class__, self).__init__(**kwargs)
self._X_columns = None
def fit(self, X, y=None):
"""Normalize X and call the fit method of the base class with numpy arrays instead of pandas data frames."""
X = self._prepare(X)
self._z_scaler.fit(X.values, y)
z_data = self._z_scaler.transform(X.values, y)
return super(self.__class__, self).fit(z_data, y)
def fit_transform(self, X, y=None):
"""Call the fit and the transform method of this class."""
X = self._prepare(X)
self.fit(X, y)
return self.transform(X, y)
def transform(self, X, y=None):
"""Normalize X and call the transform method of the base class with numpy arrays instead of pandas data frames."""
X = self._prepare(X)
z_data = self._z_scaler.transform(X.values, y)
transformed_ndarray = super(self.__class__, self).transform(z_data)
pandas_df = pd.DataFrame(transformed_ndarray)
pandas_df.columns = ["pca_{}".format(i) for i in range(len(pandas_df.columns))]
return pandas_df
def _prepare(self, X):
"""Check if the data is a pandas DataFrame and sorts the column names.
:raise AttributeError: if pandas is not a DataFrame or the columns of the new X is not compatible with the
columns from the previous X data
"""
if not isinstance(X, pd.DataFrame):
raise AttributeError("X is not a pandas DataFrame")
X.sort_index(axis=1, inplace=True)
if self._X_columns is not None:
if self._X_columns != list(X.columns):
raise AttributeError("The columns of the new X is not compatible with the columns from the previous X data")
else:
self._X_columns = list(X.columns)
return X
```
## Load robot failure example
Splits the data set in a train (1 <= id <= 87) and a test set (87 <= id <= 88). It is assumed that the selection process is done in the past (train) and features for future (test) data sets should be determined. The id 87 is overlapping so that the correctness of the procedure can be easily shown.
```
from tsfresh.examples.robot_execution_failures import download_robot_execution_failures, load_robot_execution_failures
from tsfresh.feature_extraction import extract_features
from tsfresh.feature_selection import select_features
from tsfresh.utilities.dataframe_functions import impute
from tsfresh.feature_extraction import ComprehensiveFCParameters, MinimalFCParameters, settings
download_robot_execution_failures()
df, y = load_robot_execution_failures()
df_train = df.iloc[(df.id <= 87).values]
y_train = y[0:-1]
df_test = df.iloc[(df.id >= 87).values]
y_test = y[-2:]
df.head()
```
# Train
## Extract train features
```
X_train = extract_features(df_train, column_id='id', column_sort='time', default_fc_parameters=MinimalFCParameters(),
impute_function=impute)
X_train.head()
```
## Select train features
```
X_train_filtered = select_features(X_train, y_train)
X_train_filtered.tail()
```
## Principal Component Analysis on train features
```
pca_train = PCAForPandas(n_components=4)
X_train_pca = pca_train.fit_transform(X_train_filtered)
# add index plus 1 to keep original index from robot example
X_train_pca.index += 1
X_train_pca.tail()
```
# Test
## Extract test features
Only the selected features from the train data are extracted.
```
X_test_filtered = extract_features(df_test, column_id='id', column_sort='time',
kind_to_fc_parameters=settings.from_columns(X_train_filtered.columns),
impute_function=impute)
X_test_filtered
```
## Principal Component Analysis on test features
The PCA components of the id 87 are the same as in the previous train PCA.
```
X_test_pca = pca_train.transform(X_test_filtered)
# reset index to keep original index from robot example
X_test_pca.index = [87, 88]
X_test_pca
```
| /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/notebooks/advanced/perform-PCA-on-extracted-features.ipynb | 0.904362 | 0.973418 | perform-PCA-on-extracted-features.ipynb | pypi |
<h1><center> Estimating Friedrich's coefficients describing the deterministic dynamics of Langevin model</center></h1>
<center>Andreas W. Kempa-Liehr (Department of Engineering Science, University of Auckland)</center>
This notebooks explains the friedrich_coefficient features, which has been inspired by the paper of Friedrich et al. (2000): *Extracting model equations from experimental data*. Physics Letters A 271, p. 217-222
The general idea is to assume a Langevin model for the dynamics of the time series $x(t)$
$$\dot{x}(t) = h(x(t)) + \mathcal{N}(0,R)$$
with $\dot{x}(t)$ denoting the temporal derivative, $h(x(t))$ the deterministic dynamics, and $\mathcal{N}(0,R)$ a Langevin force modelled as Gaussian white noise with standard deviation $R$.
Now, an estimate $\tilde{h}(x)$ of the deterministic dynamics can be computed by averaging $\dot{x}(t)$ for a specific interval $x(t)\in[x-\epsilon,x+\epsilon]$ with $|\epsilon|\ll 1$:
$$\left.\tilde{h}(x)\right|_{x\in[x-\epsilon,x+\epsilon]} \approx \frac{\sum\limits_{x(t)\in[x-\epsilon,x+\epsilon]} x(t+\Delta_t)-x(t)}{\Delta_t \sum\limits_{x(t)\in[x-\epsilon,x+\epsilon]} 1}.$$
Having a set of estimations $\{\tilde{h}(x_1),\tilde{h}(x_2),\ldots,\tilde{h}(x_n)\}$ with $x_1<x_2<\ldots<x_n$ at hand, Friedrich's coefficients are calculated by fitting a polynomial of order $m$ to these estimates.
In order to demonstrate this approach, the dynamics of a dissipative soliton before and after its drift-bifurcation is simulated (Liehr 2013: *Dissipative Solitons in Reaction-Diffusion Systems*. Springer, p. 164).
By applying the approach of Friedrich et al. for estimating the deterministic dynamics, the equilibrium velocity of the dissipative soliton is recovered.
```
from matplotlib import pylab as plt
import numpy as np
import seaborn as sbn
import pandas as pd
from tsfresh.examples.driftbif_simulation import velocity
%matplotlib inline
from tsfresh.feature_extraction import ComprehensiveFCParameters
from tsfresh.feature_extraction.feature_calculators import max_langevin_fixed_point, friedrich_coefficients
settings = ComprehensiveFCParameters()
default_params = settings['max_langevin_fixed_point'][0]
default = settings['friedrich_coefficients']
def friedrich_method(v, param):
df = pd.DataFrame({'velocity': v[:-1,0], 'acceleration': np.diff(v[:,0])})
df['quantiles']=pd.qcut(df.velocity.values, 30)
groups = df.groupby('quantiles')
result = pd.DataFrame({'a_mean': groups.acceleration.mean(),
'a_std': groups.acceleration.std(),
'v_mean': groups.velocity.mean(),
'v_std': groups.velocity.std()
})
dynamics = friedrich_coefficients(v[:,0], param)
dynamics = [d[1] for d in dynamics]
v0 = max_langevin_fixed_point(v[:,0], **default_params)
plt.subplot(2,1,1)
plt.plot(v[:,0])
plt.axhline(y=v0, color='r')
plt.xlabel('time')
plt.ylabel('velocity')
#Active Brownian motion is given if the linear term of the dynamics is positive
if dynamics[-2]>0:
active='Active'
else:
active=''
plt.title('{} Brownian Motion (largest equilibrium velocity in red)'.format(active))
plt.subplot(2,1,2)
ax = plt.errorbar(result.v_mean,result.a_mean,
xerr=result.v_std,fmt='o')
x = np.linspace(-0.004, 0.004, 201)
print(dynamics)
plt.plot(x, np.poly1d(dynamics)(x), label='estimated dynamics')
plt.plot(v0,0.,'ro')
plt.axvline(x=v0, color='r')
plt.xlabel('mean velocity')
plt.ylabel('mean acceleration')
```
# Beyond drift-bifurcation
```
ds = velocity(tau=3.8, delta_t=0.05, R=3e-4, seed=0)
v = ds.simulate(1000000, v0=np.zeros(1))
friedrich_method(v, default)
```
# Before drift-bifurcation
```
ds = velocity(tau=2./0.3-3.8, delta_t=0.05, R=3e-4, seed=0)
v = ds.simulate(1000000, v0=np.zeros(1))
friedrich_method(v, default)
```
| /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/notebooks/advanced/friedrich_coefficients.ipynb | 0.670824 | 0.983955 | friedrich_coefficients.ipynb | pypi |
The Benjamini Yekutieli (BY) procedure is a multiple testing procedure that can be used to control the accumulation in type 1 errors when comparing multiple hypothesis at the same time.
In the tsfresh filtering the BY procedure is used to decide which features to use and which to keep.
The method is based on a line, the so called rejection line, that is compared to the sequence of ordered p-values. In this notebook, we will visualize that rejection line.
```
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from tsfresh.examples.robot_execution_failures import download_robot_execution_failures, load_robot_execution_failures
from tsfresh import defaults, extract_features
from tsfresh.feature_selection.relevance import calculate_relevance_table
from tsfresh.utilities.dataframe_functions import impute
from tsfresh.feature_extraction import ComprehensiveFCParameters
matplotlib.rcParams["figure.figsize"] = [16, 6]
matplotlib.rcParams["font.size"] = 14
matplotlib.style.use('seaborn-darkgrid')
```
## Parameter setting
```
FDR_LEVEL = defaults.FDR_LEVEL
HYPOTHESES_INDEPENDENT = defaults.HYPOTHESES_INDEPENDENT
```
## Load robot data
```
download_robot_execution_failures()
df, y = load_robot_execution_failures()
df.head()
```
## Extract Features
```
X = extract_features(df,
column_id='id', column_sort='time',
default_fc_parameters=ComprehensiveFCParameters(),
impute_function=impute)
# drop constant features
print(X.shape)
X = X.loc[:, X.apply(pd.Series.nunique) != 1]
print(X.shape)
```
## Calculate p-values and Benjamini-Yekutieli Procedure
tsfresh has implemented two different feature significance tests, the Mann-Whitney-U test and the Kolmogorov-Smirnov test. In the following, both of them are being illustrated to show a scientific report of the feature selection process and to give a comparison of the differences of both methods.
### Mann-Whitney-U
Run significance test with Mann-Whitney-U test. Returns the p-values of the features and whether they are rejected or not.
```
df_pvalues_mann = calculate_relevance_table(X, y, fdr_level=FDR_LEVEL, test_for_binary_target_real_feature='mann')
print("# total \t", len(df_pvalues_mann))
print("# relevant \t", (df_pvalues_mann["relevant"] == True).sum())
print("# irrelevant \t", (df_pvalues_mann["relevant"] == False).sum(),
"( # constant", (df_pvalues_mann["type"] == "const").sum(), ")")
df_pvalues_mann.head()
```
### Kolmogorov-Smirnov
Run significance test with Kolmogorov-Smirnov test. Returns the p-values of the features and whether they are rejected or not.
```
df_pvalues_smir = calculate_relevance_table(X, y, fdr_level=FDR_LEVEL, test_for_binary_target_real_feature='smir')
print("# total \t", len(df_pvalues_smir))
print("# relevant \t", (df_pvalues_smir["relevant"] == True).sum())
print("# irrelevant \t", (df_pvalues_smir["relevant"] == False).sum(),
"( # constant", (df_pvalues_smir["type"] == "const").sum(), ")")
df_pvalues_smir.head()
```
## Calculate rejection line
With the rejection line it is determined whether a feature is relevant or irrelevant.
```
def calc_rejection_line(df_pvalues, hypothesis_independent, fdr_level):
m = len(df_pvalues.loc[~(df_pvalues.type == "const")])
K = list(range(1, m + 1))
if hypothesis_independent:
C = [1] * m
else:
C = [sum([1.0 / k for k in K])] * m
return [fdr_level * k / m * 1.0 / c for k, c in zip(K, C)]
```
### Mann-Whitney-U
```
rejection_line_mann = calc_rejection_line(df_pvalues_mann, HYPOTHESES_INDEPENDENT, FDR_LEVEL)
```
### Kolmogorov-Smirnov
```
rejection_line_smir = calc_rejection_line(df_pvalues_smir, HYPOTHESES_INDEPENDENT, FDR_LEVEL)
```
## Plot ordered p-values and rejection line
In the plot, the p-values are ordered from low to high. Constant features (green points) are always irrelevant but are not considered for calculating the rejection line (red line).
For nice plotting, the p-values are divided in the three groups relevant, irrelevant and constant (which are also irrelevant).
### Mann-Whitney-U
```
df_pvalues_mann.index = pd.Series(range(0, len(df_pvalues_mann.index)))
df_pvalues_mann.p_value.where(df_pvalues_mann.relevant)\
.plot(style=".", label="relevant features")
df_pvalues_mann.p_value.where(~df_pvalues_mann.relevant & (df_pvalues_mann.type != "const"))\
.plot(style=".", label="irrelevant features")
df_pvalues_mann.p_value.fillna(1).where(df_pvalues_mann.type == "const")\
.plot(style=".", label="irrelevant (constant) features")
plt.plot(rejection_line_mann, label="rejection line (FDR = " + str(FDR_LEVEL) + ")")
plt.xlabel("Feature #")
plt.ylabel("p-value")
plt.title("Mann-Whitney-U")
plt.legend()
plt.plot()
```
### Kolmogorov-Smirnov
```
df_pvalues_smir.index = pd.Series(range(0, len(df_pvalues_smir.index)))
df_pvalues_smir.p_value.where(df_pvalues_smir.relevant)\
.plot(style=".", label="relevant features")
df_pvalues_smir.p_value.where(~df_pvalues_smir.relevant & (df_pvalues_smir.type != "const"))\
.plot(style=".", label="irrelevant features")
df_pvalues_smir.p_value.fillna(1).where(df_pvalues_smir.type == "const")\
.plot(style=".", label="irrelevant (constant) features")
plt.plot(rejection_line_smir, label="rejection line (FDR = " + str(FDR_LEVEL) + ")")
plt.xlabel("Feature #")
plt.ylabel("p-value")
plt.title("Kolmogorov-Smirnov")
plt.legend()
plt.plot()
```
## Plot zoomed ordered p-values and rejection line
Since the intersection of the ordered p-values and the rejection line is not clearly visible, a zoomed plot is provided.
### Mann-Whitney-U
```
last_rejected_index = (df_pvalues_mann["relevant"] == True).sum() - 1
margin = 20
a = max(last_rejected_index - margin, 0)
b = min(last_rejected_index + margin, len(df_pvalues_mann) - 1)
df_pvalues_mann[a:b].p_value.where(df_pvalues_mann[a:b].relevant)\
.plot(style=".", label="relevant features")
df_pvalues_mann[a:b].p_value.where(~df_pvalues_mann[a:b].relevant)\
.plot(style=".", label="irrelevant features")
plt.plot(np.arange(a, b), rejection_line_mann[a:b], label="rejection line (FDR = " + str(FDR_LEVEL) + ")")
plt.xlabel("Feature #")
plt.ylabel("p-value")
plt.title("Mann-Whitney-U")
plt.legend()
plt.plot()
```
### Kolmogorov-Smirnov
```
last_rejected_index = (df_pvalues_smir["relevant"] == True).sum() - 1
margin = 20
a = max(last_rejected_index - margin, 0)
b = min(last_rejected_index + margin, len(df_pvalues_smir) - 1)
df_pvalues_smir[a:b].p_value.where(df_pvalues_smir[a:b].relevant)\
.plot(style=".", label="relevant features")
df_pvalues_smir[a:b].p_value.where(~df_pvalues_smir[a:b].relevant)\
.plot(style=".", label="irrelevant features")
plt.plot(np.arange(a, b), rejection_line_smir[a:b], label="rejection line (FDR = " + str(FDR_LEVEL) + ")")
plt.xlabel("Feature #")
plt.ylabel("p-value")
plt.title("Kolmogorov-Smirnov")
plt.legend()
plt.plot()
```
| /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/notebooks/advanced/visualize-benjamini-yekutieli-procedure.ipynb | 0.41052 | 0.978156 | visualize-benjamini-yekutieli-procedure.ipynb | pypi |
import logging
import os
from builtins import map
import pandas as pd
import requests
_logger = logging.getLogger(__name__)
UCI_MLD_REF_MSG = (
"The example data could not be found. You need to download the Robot Execution Failures "
"LP1 Data Set from the UCI Machine Learning Repository. To do so, you can call the function "
"tsfresh.examples.robot_execution_failures.download_robot_execution_failures"
)
UCI_MLD_REF_URL = "https://raw.githubusercontent.com/MaxBenChrist/robot-failure-dataset/master/lp1.data.txt"
module_path = os.path.dirname(__file__)
data_file_name = os.path.join(module_path, "data", "robotfailure-mld", "lp1.data")
def download_robot_execution_failures(file_name=data_file_name):
"""
Download the Robot Execution Failures LP1 Data Set[#1] from the UCI Machine Learning Repository [#2] and store it
locally.
:return:
Examples
========
>>> from tsfresh.examples import download_robot_execution_failures
>>> download_robot_execution_failures()
"""
if os.path.exists(file_name):
_logger.warning(
"You have already downloaded the Robot Execution Failures LP1 Data Set."
)
return
os.makedirs(os.path.dirname(file_name), exist_ok=True)
if not os.access(os.path.dirname(file_name), os.W_OK):
raise RuntimeError(
"You don't have the necessary permissions to download the Robot Execution Failures LP1 Data "
"Set into the module path. Consider installing the module in a virtualenv you "
"own or run this function with appropriate permissions."
)
r = requests.get(UCI_MLD_REF_URL)
if r.status_code != 200:
raise RuntimeError(
"Could not download the Robot Execution Failures LP1 Data Set from the UCI Machine Learning "
"Repository. HTTP status code: {}".format(r.status_code)
)
with open(file_name, "w") as f:
f.write(r.text)
def load_robot_execution_failures(multiclass=False, file_name=data_file_name):
"""
Load the Robot Execution Failures LP1 Data Set[1].
The Time series are passed as a flat DataFrame.
Examples
========
>>> from tsfresh.examples import load_robot_execution_failures
>>> df, y = load_robot_execution_failures()
>>> print(df.shape)
(1320, 8)
:param multiclass: If True, return all target labels. The default returns only "normal" vs all other labels.
:type multiclass: bool
:return: time series data as :class:`pandas.DataFrame` and target vector as :class:`pandas.Series`
:rtype: tuple
"""
if not os.path.exists(file_name):
raise RuntimeError(UCI_MLD_REF_MSG)
id_to_target = {}
df_rows = []
with open(file_name) as f:
cur_id = 0
time = 0
for line in f.readlines():
# New sample --> increase id, reset time and determine target
if line[0] not in ["\t", "\n"]:
cur_id += 1
time = 0
if multiclass:
id_to_target[cur_id] = line.strip()
else:
id_to_target[cur_id] = line.strip() == "normal"
# Data row --> split and convert values, create complete df row
elif line[0] == "\t":
values = list(map(int, line.split("\t")[1:]))
df_rows.append([cur_id, time] + values)
time += 1
df = pd.DataFrame(
df_rows, columns=["id", "time", "F_x", "F_y", "F_z", "T_x", "T_y", "T_z"]
)
y = pd.Series(id_to_target)
return df, y | /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/tsfresh/examples/robot_execution_failures.py | 0.67694 | 0.377168 | robot_execution_failures.py | pypi |
import logging
import os
import shutil
from io import BytesIO
from zipfile import ZipFile
import pandas as pd
import requests
_logger = logging.getLogger(__name__)
module_path = os.path.dirname(__file__)
data_file_name = os.path.join(module_path, "data", "UCI HAR Dataset")
def download_har_dataset(folder_name=data_file_name):
"""
Download human activity recognition dataset from UCI ML Repository and store it at /tsfresh/notebooks/data.
Examples
========
>>> from tsfresh.examples import har_dataset
>>> har_dataset.download_har_dataset()
"""
zipurl = "https://github.com/MaxBenChrist/human-activity-dataset/blob/master/UCI%20HAR%20Dataset.zip?raw=True"
if not os.access(module_path, os.W_OK):
raise RuntimeError(
"You don't have the necessary permissions to download the Human Activity Dataset "
"Set into the module path. Consider installing the module in a virtualenv you "
"own or run this function with appropriate permissions."
)
if os.path.exists(os.path.join(folder_name, "UCI HAR Dataset")):
_logger.warning("You have already downloaded the Human Activity Data Set.")
return
os.makedirs(folder_name, exist_ok=True)
r = requests.get(zipurl, stream=True)
if r.status_code != 200:
raise RuntimeError(
"Could not download the Human Activity Data Set from GitHub."
"HTTP status code: {}".format(r.status_code)
)
with ZipFile(BytesIO(r.content)) as zfile:
zfile.extractall(path=folder_name)
def load_har_dataset(folder_name=data_file_name):
data_file_name_dataset = os.path.join(
folder_name,
"UCI HAR Dataset",
"train",
"Inertial Signals",
"body_acc_x_train.txt",
)
try:
return pd.read_csv(data_file_name_dataset, delim_whitespace=True, header=None)
except OSError:
raise OSError(
"File {} was not found. Have you downloaded the dataset with download_har_dataset() "
"before?".format(data_file_name_dataset)
)
def load_har_classes(folder_name=data_file_name):
data_file_name_classes = os.path.join(
folder_name, "UCI HAR Dataset", "train", "y_train.txt"
)
try:
return pd.read_csv(
data_file_name_classes, delim_whitespace=True, header=None, squeeze=True
)
except OSError:
raise OSError(
"File {} was not found. Have you downloaded the dataset with download_har_dataset() "
"before?".format(data_file_name_classes)
) | /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/tsfresh/examples/har_dataset.py | 0.450118 | 0.231473 | har_dataset.py | pypi |
# Thanks to Andreas W. Kempa-Liehr for providing this snippet
import logging
import numpy as np
import pandas as pd
_logger = logging.getLogger(__name__)
class velocity:
"""
Simulates the velocity of a dissipative soliton (kind of self organized particle) [6]_.
The equilibrium velocity without noise R=0 for
$\tau>1.0/\\kappa_3$ is $\\kappa_3 \\sqrt{(tau - 1.0/\\kappa_3)/Q}.
Before the drift-bifurcation $\tau \\le 1.0/\\kappa_3$ the velocity is zero.
References
----------
.. [6] Andreas Kempa-Liehr (2013, p. 159-170)
Dynamics of Dissipative Soliton
Dissipative Solitons in Reaction Diffusion Systems.
Springer: Berlin
>>> ds = velocity(tau=3.5) # Dissipative soliton with equilibrium velocity 1.5e-3
>>> print(ds.label) # Discriminating before or beyond Drift-Bifurcation
1
# Equilibrium velocity
>>> print(ds.deterministic)
0.0015191090506254991
# Simulated velocity as a time series with 20000 time steps being disturbed by Gaussian white noise
>>> v = ds.simulate(20000)
"""
def __init__(self, tau=3.8, kappa_3=0.3, Q=1950.0, R=3e-4, delta_t=0.05, seed=None):
"""
:param tau: Bifurcation parameter determining the intrinsic velocity of the dissipative soliton,
which is zero for tau<=1.0/kappa_3 and np.sqrt(kappa_3**3/Q * (tau - 1.0/kappa_3)) otherwise
:type tau: float
:param kappa_3: Inverse bifurcation point.
:type kappa_3:
:param Q: Shape parameter of dissipative soliton
:type Q: float
:param R: Noise amplitude
:type R: float
:param delta_t: temporal discretization
:type delta_t: float
"""
# done: add start seed
self.delta_t = delta_t
self.kappa_3 = kappa_3
self.Q = Q
self.tau = tau
self.a = self.delta_t * kappa_3 ** 2 * (tau - 1.0 / kappa_3)
self.b = self.delta_t * Q / kappa_3
self.label = int(tau > 1.0 / kappa_3)
self.c = np.sqrt(self.delta_t) * R
self.delta_t = self.delta_t
if seed is not None:
np.random.seed(seed)
if tau <= 1.0 / kappa_3:
self.deterministic = 0.0
else:
self.deterministic = kappa_3 ** 1.5 * np.sqrt((tau - 1.0 / kappa_3) / Q)
def __call__(self, v):
"""
returns deterministic dynamic = acceleration (without noise)
:param v: initial velocity vector
:rtype v: ndarray
:return: velocity vector of next time step
:return type: ndarray
"""
return v * (1.0 + self.a - self.b * np.dot(v, v))
def simulate(self, N, v0=np.zeros(2)):
"""
:param N: number of time steps
:type N: int
:param v0: initial velocity vector
:type v0: ndarray
:return: time series of velocity vectors with shape (N, v0.shape[0])
:rtype: ndarray
"""
v = [v0] # first value is initial condition
n = N - 1 # Because we are returning the initial condition,
# only (N-1) time steps are computed
gamma = np.random.randn(n, v0.size)
for i in range(n):
next_v = self.__call__(v[i]) + self.c * gamma[i]
v.append(next_v)
v_vec = np.array(v)
return v_vec
def sample_tau(n=10, kappa_3=0.3, ratio=0.5, rel_increase=0.15):
"""
Return list of control parameters
:param n: number of samples
:type n: int
:param kappa_3: inverse bifurcation point
:type kappa_3: float
:param ratio: ratio (default 0.5) of samples before and beyond drift-bifurcation
:type ratio: float
:param rel_increase: relative increase from bifurcation point
:type rel_increase: float
:return: tau. List of sampled bifurcation parameter
:rtype tau: list
"""
assert ratio > 0 and ratio <= 1
assert kappa_3 > 0
assert rel_increase > 0 and rel_increase <= 1
tau_c = 1.0 / kappa_3
tau_max = tau_c * (1.0 + rel_increase)
tau = tau_c + (tau_max - tau_c) * (np.random.rand(n) - ratio)
return tau.tolist()
def load_driftbif(n, length, m=2, classification=True, kappa_3=0.3, seed=False):
"""
Simulates n time-series with length time steps each for the m-dimensional velocity of a dissipative soliton
classification=True:
target 0 means tau<=1/0.3, Dissipative Soliton with Brownian motion (purely noise driven)
target 1 means tau> 1/0.3, Dissipative Soliton with Active Brownian motion (intrinsiv velocity with overlaid noise)
classification=False:
target is bifurcation parameter tau
:param n: number of samples
:type n: int
:param length: length of the time series
:type length: int
:param m: number of spatial dimensions (default m=2) the dissipative soliton is propagating in
:type m: int
:param classification: distinguish between classification (default True) and regression target
:type classification: bool
:param kappa_3: inverse bifurcation parameter (default 0.3)
:type kappa_3: float
:param seed: random seed (default False)
:type seed: float
:return: X, y. Time series container and target vector
:rtype X: pandas.DataFrame
:rtype y: pandas.DataFrame
"""
# todo: add ratio of classes
if m > 2:
logging.warning(
"You set the dimension parameter for the dissipative soliton to m={}, however it is only"
"properly defined for m=1 or m=2.".format(m)
)
id = np.repeat(range(n), length * m)
dimensions = list(np.repeat(range(m), length)) * n
labels = list()
values = list()
ls_tau = sample_tau(n, kappa_3=kappa_3)
for i, tau in enumerate(ls_tau):
ds = velocity(tau=tau, kappa_3=kappa_3, seed=seed)
if classification:
labels.append(ds.label)
else:
labels.append(ds.tau)
values.append(ds.simulate(length, v0=np.zeros(m)).transpose().flatten())
time = np.stack([ds.delta_t * np.arange(length)] * n * m).flatten()
df = pd.DataFrame(
{
"id": id,
"time": time,
"value": np.stack(values).flatten(),
"dimension": dimensions,
}
)
y = pd.Series(labels)
y.index = range(n)
return df, y | /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/tsfresh/examples/driftbif_simulation.py | 0.807347 | 0.825379 | driftbif_simulation.py | pypi |
import logging
import warnings
from collections.abc import Iterable
import pandas as pd
from tsfresh import defaults
from tsfresh.feature_extraction import feature_calculators
from tsfresh.feature_extraction.data import to_tsdata
from tsfresh.feature_extraction.settings import ComprehensiveFCParameters
from tsfresh.utilities import profiling
from tsfresh.utilities.distribution import (
ApplyDistributor,
DistributorBaseClass,
MapDistributor,
MultiprocessingDistributor,
)
from tsfresh.utilities.string_manipulation import convert_to_output_format
_logger = logging.getLogger(__name__)
def extract_features(
timeseries_container,
default_fc_parameters=None,
kind_to_fc_parameters=None,
column_id=None,
column_sort=None,
column_kind=None,
column_value=None,
chunksize=defaults.CHUNKSIZE,
n_jobs=defaults.N_PROCESSES,
show_warnings=defaults.SHOW_WARNINGS,
disable_progressbar=defaults.DISABLE_PROGRESSBAR,
impute_function=defaults.IMPUTE_FUNCTION,
profile=defaults.PROFILING,
profiling_filename=defaults.PROFILING_FILENAME,
profiling_sorting=defaults.PROFILING_SORTING,
distributor=None,
pivot=True,
):
"""
Extract features from
* a :class:`pandas.DataFrame` containing the different time series
or
* a dictionary of :class:`pandas.DataFrame` each containing one type of time series
In both cases a :class:`pandas.DataFrame` with the calculated features will be returned.
For a list of all the calculated time series features, please see the
:class:`~tsfresh.feature_extraction.settings.ComprehensiveFCParameters` class,
which is used to control which features with which parameters are calculated.
For a detailed explanation of the different parameters (e.g. the columns) and data formats
please see :ref:`data-formats-label`.
Examples
========
>>> from tsfresh.examples import load_robot_execution_failures
>>> from tsfresh import extract_features
>>> df, _ = load_robot_execution_failures()
>>> X = extract_features(df, column_id='id', column_sort='time')
:param timeseries_container: The pandas.DataFrame with the time series to compute the features for, or a
dictionary of pandas.DataFrames.
:type timeseries_container: pandas.DataFrame or dict
:param default_fc_parameters: mapping from feature calculator names to parameters. Only those names
which are keys in this dict will be calculated. See the class:`ComprehensiveFCParameters` for
more information.
:type default_fc_parameters: dict
:param kind_to_fc_parameters: mapping from kind names to objects of the same type as the ones for
default_fc_parameters. If you put a kind as a key here, the fc_parameters
object (which is the value), will be used instead of the default_fc_parameters. This means that kinds, for
which kind_of_fc_parameters doe not have any entries, will be ignored by the feature selection.
:type kind_to_fc_parameters: dict
:param column_id: The name of the id column to group by. Please see :ref:`data-formats-label`.
:type column_id: str
:param column_sort: The name of the sort column. Please see :ref:`data-formats-label`.
:type column_sort: str
:param column_kind: The name of the column keeping record on the kind of the value.
Please see :ref:`data-formats-label`.
:type column_kind: str
:param column_value: The name for the column keeping the value itself. Please see :ref:`data-formats-label`.
:type column_value: str
:param n_jobs: The number of processes to use for parallelization. If zero, no parallelization is used.
:type n_jobs: int
:param chunksize: The size of one chunk that is submitted to the worker
process for the parallelisation. Where one chunk is defined as a
singular time series for one id and one kind. If you set the chunksize
to 10, then it means that one task is to calculate all features for 10
time series. If it is set it to None, depending on distributor,
heuristics are used to find the optimal chunksize. If you get out of
memory exceptions, you can try it with the dask distributor and a
smaller chunksize.
:type chunksize: None or int
:param show_warnings: Show warnings during the feature extraction (needed for debugging of calculators).
:type show_warnings: bool
:param disable_progressbar: Do not show a progressbar while doing the calculation.
:type disable_progressbar: bool
:param impute_function: None, if no imputing should happen or the function to call for
imputing the result dataframe. Imputing will never happen on the input data.
:type impute_function: None or callable
:param profile: Turn on profiling during feature extraction
:type profile: bool
:param profiling_sorting: How to sort the profiling results (see the documentation of the profiling package for
more information)
:type profiling_sorting: basestring
:param profiling_filename: Where to save the profiling results.
:type profiling_filename: basestring
:param distributor: Advanced parameter: set this to a class name that you want to use as a
distributor. See the utilities/distribution.py for more information. Leave to None, if you want
TSFresh to choose the best distributor.
:type distributor: class
:return: The (maybe imputed) DataFrame containing extracted features.
:rtype: pandas.DataFrame
"""
# Always use the standardized way of storing the data.
# See the function normalize_input_to_internal_representation for more information.
# Use the standard setting if the user did not supply ones himself.
if default_fc_parameters is None and kind_to_fc_parameters is None:
default_fc_parameters = ComprehensiveFCParameters()
elif default_fc_parameters is None and kind_to_fc_parameters is not None:
default_fc_parameters = {}
# If requested, do profiling (advanced feature)
if profile:
profiler = profiling.start_profiling()
with warnings.catch_warnings():
if not show_warnings:
warnings.simplefilter("ignore")
else:
warnings.simplefilter("default")
result = _do_extraction(
df=timeseries_container,
column_id=column_id,
column_value=column_value,
column_kind=column_kind,
column_sort=column_sort,
n_jobs=n_jobs,
chunk_size=chunksize,
disable_progressbar=disable_progressbar,
show_warnings=show_warnings,
default_fc_parameters=default_fc_parameters,
kind_to_fc_parameters=kind_to_fc_parameters,
distributor=distributor,
pivot=pivot,
)
# Impute the result if requested
if impute_function is not None:
impute_function(result)
# Turn off profiling if it was turned on
if profile:
profiling.end_profiling(
profiler, filename=profiling_filename, sorting=profiling_sorting
)
return result
def _do_extraction(
df,
column_id,
column_value,
column_kind,
column_sort,
default_fc_parameters,
kind_to_fc_parameters,
n_jobs,
chunk_size,
disable_progressbar,
show_warnings,
distributor,
pivot,
):
"""
Wrapper around the _do_extraction_on_chunk, which calls it on all chunks in the data frame.
A chunk is a subset of the data, with a given kind and id - so a single time series.
The data is separated out into those single time series and the _do_extraction_on_chunk is
called on each of them. The results are then combined into a single pandas DataFrame.
The call is either happening in parallel or not and is showing a progress bar or not depending
on the given flags.
:param df: The dataframe in the normalized format which is used for extraction.
:type df: pd.DataFrame
:param default_fc_parameters: mapping from feature calculator names to parameters. Only those names
which are keys in this dict will be calculated. See the class:`ComprehensiveFCParameters` for
more information.
:type default_fc_parameters: dict
:param kind_to_fc_parameters: mapping from kind names to objects of the same type as the ones for
default_fc_parameters. If you put a kind as a key here, the fc_parameters
object (which is the value), will be used instead of the default_fc_parameters.
:type kind_to_fc_parameters: dict
:param column_id: The name of the id column to group by.
:type column_id: str
:param column_kind: The name of the column keeping record on the kind of the value.
:type column_kind: str
:param column_value: The name for the column keeping the value itself.
:type column_value: str
:param chunk_size: The size of one chunk for the parallelization
:type chunk_size: None or int
:param n_jobs: The number of processes to use for parallelization. If zero, no parallelization is used.
:type n_jobs: int
:param disable_progressbar: Do not show a progressbar while doing the calculation.
:type disable_progressbar: bool
:param show_warnings: Show warnings during the feature extraction (needed for debugging of calculators).
:type show_warnings: bool
:param distributor: Advanced parameter: See the utilities/distribution.py for more information.
Leave to None, if you want TSFresh to choose the best distributor.
:type distributor: DistributorBaseClass
:return: the extracted features
:rtype: pd.DataFrame
"""
data = to_tsdata(df, column_id, column_kind, column_value, column_sort)
if distributor is None:
if isinstance(data, Iterable):
if n_jobs == 0 or n_jobs == 1:
distributor = MapDistributor(
disable_progressbar=disable_progressbar,
progressbar_title="Feature Extraction",
)
else:
distributor = MultiprocessingDistributor(
n_workers=n_jobs,
disable_progressbar=disable_progressbar,
progressbar_title="Feature Extraction",
show_warnings=show_warnings,
)
else:
distributor = ApplyDistributor(
meta=[
(data.column_id, "int64"),
("variable", "object"),
("value", "float64"),
]
)
if not isinstance(distributor, DistributorBaseClass):
raise ValueError("the passed distributor is not an DistributorBaseClass object")
kwargs = dict(
default_fc_parameters=default_fc_parameters,
kind_to_fc_parameters=kind_to_fc_parameters,
show_warnings=show_warnings,
)
result = distributor.map_reduce(
_do_extraction_on_chunk,
data=data,
chunk_size=chunk_size,
function_kwargs=kwargs,
)
if not pivot:
return result
return_df = data.pivot(result)
return return_df
def _do_extraction_on_chunk(
chunk, default_fc_parameters, kind_to_fc_parameters, show_warnings=True
):
"""
Main function of this module: use the feature calculators defined in the
default_fc_parameters or kind_to_fc_parameters parameters and extract all
features on the chunk.
The chunk consists of the chunk id, the chunk kind and the data (as a Series),
which is then converted to a numpy array - so a single time series.
Returned is a list of the extracted features. Each one is a tuple consisting of
{ the id of the chunk,
the feature name in the format <kind>__<feature>__<parameters>,
the numeric value of the feature or np.nan , }
The <parameters> are in the form described in :mod:`~tsfresh.utilities.string_manipulation`.
:param chunk: A tuple of sample_id, kind, data
:param default_fc_parameters: A dictionary of feature calculators.
:param kind_to_fc_parameters: A dictionary of fc_parameters for special kinds or None.
:param show_warnings: Surpress warnings (some feature calculators are quite verbose)
:return: A list of calculated features.
"""
sample_id, kind, data = chunk
if kind_to_fc_parameters and kind in kind_to_fc_parameters:
fc_parameters = kind_to_fc_parameters[kind]
else:
fc_parameters = default_fc_parameters
def _f():
for f_or_function_name, parameter_list in fc_parameters.items():
if callable(f_or_function_name):
func = f_or_function_name
else:
func = getattr(feature_calculators, f_or_function_name)
# If the function uses the index, pass is at as a pandas Series.
# Otherwise, convert to numpy array
if getattr(func, "input", None) == "pd.Series":
# If it has a required index type, check that the data has the right index type.
index_type = getattr(func, "index_type", None)
if index_type is not None:
try:
assert isinstance(data.index, index_type)
except AssertionError:
warnings.warn(
"{} requires the data to have a index of type {}. Results will "
"not be calculated".format(f_or_function_name, index_type)
)
continue
x = data
else:
x = data.values
if getattr(func, "fctype", None) == "combiner":
result = func(x, param=parameter_list)
else:
if parameter_list:
result = (
(convert_to_output_format(param), func(x, **param))
for param in parameter_list
)
else:
result = [("", func(x))]
for key, item in result:
feature_name = str(kind) + "__" + func.__name__
if key:
feature_name += "__" + str(key)
yield (sample_id, feature_name, item)
with warnings.catch_warnings():
if not show_warnings:
warnings.simplefilter("ignore")
else:
warnings.simplefilter("default")
return list(_f()) | /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/tsfresh/feature_extraction/extraction.py | 0.729423 | 0.385606 | extraction.py | pypi |
from builtins import range
from collections import UserDict
from inspect import getfullargspec
from itertools import product
import cloudpickle
import pandas as pd
from tsfresh.feature_extraction import feature_calculators
from tsfresh.utilities.string_manipulation import get_config_from_string
def from_columns(columns, columns_to_ignore=None):
"""
Creates a mapping from kind names to fc_parameters objects
(which are itself mappings from feature calculators to settings)
to extract only the features contained in the columns.
To do so, for every feature name in columns this method
1. split the column name into col, feature, params part
2. decide which feature we are dealing with (aggregate with/without params or apply)
3. add it to the new name_to_function dict
4. set up the params
:param columns: containing the feature names
:type columns: list of str
:param columns_to_ignore: columns which do not contain tsfresh feature names
:type columns_to_ignore: list of str
:return: The kind_to_fc_parameters object ready to be used in the extract_features function.
:rtype: dict
"""
kind_to_fc_parameters = {}
if columns_to_ignore is None:
columns_to_ignore = []
for col in columns:
if col in columns_to_ignore:
continue
if not isinstance(col, str):
raise TypeError("Column name {} should be a string or unicode".format(col))
# Split according to our separator into <col_name>, <feature_name>, <feature_params>
parts = col.split("__")
n_parts = len(parts)
if n_parts == 1:
raise ValueError(
"Splitting of columnname {} resulted in only one part.".format(col)
)
kind = parts[0]
feature_name = parts[1]
if kind not in kind_to_fc_parameters:
kind_to_fc_parameters[kind] = {}
if not hasattr(feature_calculators, feature_name):
raise ValueError("Unknown feature name {}".format(feature_name))
config = get_config_from_string(parts)
if config:
if feature_name in kind_to_fc_parameters[kind]:
kind_to_fc_parameters[kind][feature_name].append(config)
else:
kind_to_fc_parameters[kind][feature_name] = [config]
else:
kind_to_fc_parameters[kind][feature_name] = None
return kind_to_fc_parameters
class PickableSettings(UserDict):
"""Base object for all settings, which is a pickable dict.
For user-specified functions, the settings dictionary might include functions as a key.
These functions unfortunately can not easily be transported to workers in multiprocessing
or multi-cloud setups, as they are not pickable by default.
Therefore, we change the pickle-behavior of this class and use cloudpickle for
pickling and unpickling the keys of the dictionary, before pickling the full object.
cloudpickle is able to pickle much more functions than pickle can and pickle will
only see the already encoded keys (not the raw functions).
"""
def __getstate__(self):
"""Called on pickling. Encode the keys by cloudpickling them"""
state = {cloudpickle.dumps(key): value for key, value in self.items()}
return state
def __setstate__(self, state):
"""Called on un-pickling. cloudunpickle the keys again"""
state = {cloudpickle.loads(key): value for key, value in state.items()}
# please note that the internal dictionary is stored as "data" in the UserDict
self.__dict__.update(data=state)
# todo: this classes' docstrings are not completely up-to-date
class ComprehensiveFCParameters(PickableSettings):
"""
Create a new ComprehensiveFCParameters instance. You have to pass this instance to the
extract_feature instance.
It is basically a dictionary (and also based on one), which is a mapping from
string (the same names that are in the feature_calculators.py file) to a list of dictionary of parameters,
which should be used when the function with this name is called.
Only those strings (function names), that are keys in this dictionary, will be later used to extract
features - so whenever you delete a key from this dict, you disable the calculation of this feature.
You can use the settings object with
>>> from tsfresh.feature_extraction import extract_features, ComprehensiveFCParameters
>>> extract_features(df, default_fc_parameters=ComprehensiveFCParameters())
to extract all features (which is the default nevertheless) or you change the ComprehensiveFCParameters
object to other types (see below).
"""
def __init__(self):
name_to_param = {}
for name, func in feature_calculators.__dict__.items():
if (
callable(func)
and hasattr(func, "fctype")
and len(getfullargspec(func).args) == 1
):
name_to_param[name] = None
name_to_param.update(
{
"time_reversal_asymmetry_statistic": [
{"lag": lag} for lag in range(1, 4)
],
"c3": [{"lag": lag} for lag in range(1, 4)],
"cid_ce": [{"normalize": True}, {"normalize": False}],
"symmetry_looking": [{"r": r * 0.05} for r in range(20)],
"large_standard_deviation": [{"r": r * 0.05} for r in range(1, 20)],
"quantile": [
{"q": q} for q in [0.1, 0.2, 0.3, 0.4, 0.6, 0.7, 0.8, 0.9]
],
"autocorrelation": [{"lag": lag} for lag in range(10)],
"agg_autocorrelation": [
{"f_agg": s, "maxlag": 40} for s in ["mean", "median", "var"]
],
"partial_autocorrelation": [{"lag": lag} for lag in range(10)],
"number_cwt_peaks": [{"n": n} for n in [1, 5]],
"number_peaks": [{"n": n} for n in [1, 3, 5, 10, 50]],
"binned_entropy": [{"max_bins": max_bins} for max_bins in [10]],
"index_mass_quantile": [
{"q": q} for q in [0.1, 0.2, 0.3, 0.4, 0.6, 0.7, 0.8, 0.9]
],
"cwt_coefficients": [
{"widths": width, "coeff": coeff, "w": w}
for width in [(2, 5, 10, 20)]
for coeff in range(15)
for w in (2, 5, 10, 20)
],
"spkt_welch_density": [{"coeff": coeff} for coeff in [2, 5, 8]],
"ar_coefficient": [
{"coeff": coeff, "k": k} for coeff in range(10 + 1) for k in [10]
],
"change_quantiles": [
{"ql": ql, "qh": qh, "isabs": b, "f_agg": f}
for ql in [0.0, 0.2, 0.4, 0.6, 0.8]
for qh in [0.2, 0.4, 0.6, 0.8, 1.0]
for b in [False, True]
for f in ["mean", "var"]
if ql < qh
],
"fft_coefficient": [
{"coeff": k, "attr": a}
for a, k in product(["real", "imag", "abs", "angle"], range(100))
],
"fft_aggregated": [
{"aggtype": s} for s in ["centroid", "variance", "skew", "kurtosis"]
],
"value_count": [{"value": value} for value in [0, 1, -1]],
"range_count": [
{"min": -1, "max": 1},
{"min": -1e12, "max": 0},
{"min": 0, "max": 1e12},
],
"approximate_entropy": [
{"m": 2, "r": r} for r in [0.1, 0.3, 0.5, 0.7, 0.9]
],
"friedrich_coefficients": (
lambda m: [
{"coeff": coeff, "m": m, "r": 30} for coeff in range(m + 1)
]
)(3),
"max_langevin_fixed_point": [{"m": 3, "r": 30}],
"linear_trend": [
{"attr": "pvalue"},
{"attr": "rvalue"},
{"attr": "intercept"},
{"attr": "slope"},
{"attr": "stderr"},
],
"agg_linear_trend": [
{"attr": attr, "chunk_len": i, "f_agg": f}
for attr in ["rvalue", "intercept", "slope", "stderr"]
for i in [5, 10, 50]
for f in ["max", "min", "mean", "var"]
],
"augmented_dickey_fuller": [
{"attr": "teststat"},
{"attr": "pvalue"},
{"attr": "usedlag"},
],
"number_crossing_m": [{"m": 0}, {"m": -1}, {"m": 1}],
"energy_ratio_by_chunks": [
{"num_segments": 10, "segment_focus": i} for i in range(10)
],
"ratio_beyond_r_sigma": [
{"r": x} for x in [0.5, 1, 1.5, 2, 2.5, 3, 5, 6, 7, 10]
],
"linear_trend_timewise": [
{"attr": "pvalue"},
{"attr": "rvalue"},
{"attr": "intercept"},
{"attr": "slope"},
{"attr": "stderr"},
],
"count_above": [{"t": 0}],
"count_below": [{"t": 0}],
"lempel_ziv_complexity": [{"bins": x} for x in [2, 3, 5, 10, 100]],
"fourier_entropy": [{"bins": x} for x in [2, 3, 5, 10, 100]],
"permutation_entropy": [
{"tau": 1, "dimension": x} for x in [3, 4, 5, 6, 7]
],
"query_similarity_count": [{"query": None, "threshold": 0.0}],
"matrix_profile": [
{"threshold": 0.98, "feature": f}
for f in ["min", "max", "mean", "median", "25", "75"]
],
"mean_n_absolute_max": [
{
"number_of_maxima": 3,
"number_of_maxima": 5,
"number_of_maxima": 7,
}
],
}
)
super().__init__(name_to_param)
class MinimalFCParameters(ComprehensiveFCParameters):
"""
This class is a child class of the ComprehensiveFCParameters class
and has the same functionality as its base class. The only difference is,
that most of the feature calculators are disabled and only a small
subset of calculators will be calculated at all. Those are denoted by an attribute called "minimal".
Use this class for quick tests of your setup before calculating all
features which could take some time depending of your data set size.
You should use this object when calling the extract function, like so:
>>> from tsfresh.feature_extraction import extract_features, MinimalFCParameters
>>> extract_features(df, default_fc_parameters=MinimalFCParameters())
"""
def __init__(self):
ComprehensiveFCParameters.__init__(self)
for fname, f in feature_calculators.__dict__.items():
if fname in self and (
not hasattr(f, "minimal") or not getattr(f, "minimal")
):
del self[fname]
class EfficientFCParameters(ComprehensiveFCParameters):
"""
This class is a child class of the ComprehensiveFCParameters class
and has the same functionality as its base class.
The only difference is, that the features with high computational costs are not calculated. Those are denoted by
the attribute "high_comp_cost".
You should use this object when calling the extract function, like so:
>>> from tsfresh.feature_extraction import extract_features, EfficientFCParameters
>>> extract_features(df, default_fc_parameters=EfficientFCParameters())
"""
def __init__(self):
ComprehensiveFCParameters.__init__(self)
# drop all features with high computational costs
for fname, f in feature_calculators.__dict__.items():
if fname in self and hasattr(f, "high_comp_cost"):
del self[fname]
class IndexBasedFCParameters(ComprehensiveFCParameters):
"""
This class is a child class of the ComprehensiveFCParameters class
and has the same functionality as its base class.
The only difference is that only the features that require a pd.Series as an input are
included. Those have an attribute "input" with value "pd.Series".
"""
def __init__(self):
ComprehensiveFCParameters.__init__(self)
# drop all features with high computational costs
for fname, f in feature_calculators.__dict__.items():
if fname in self and getattr(f, "input", None) != "pd.Series":
del self[fname]
class TimeBasedFCParameters(ComprehensiveFCParameters):
"""
This class is a child class of the ComprehensiveFCParameters class
and has the same functionality as its base class.
The only difference is, that only the features that require a DatetimeIndex are included. Those
have an attribute "index_type" with value pd.DatetimeIndex.
"""
def __init__(self):
ComprehensiveFCParameters.__init__(self)
# drop all features with high computational costs
for fname, f in feature_calculators.__dict__.items():
if fname in self and getattr(f, "index_type", False) != pd.DatetimeIndex:
del self[fname] | /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/tsfresh/feature_extraction/settings.py | 0.597725 | 0.527621 | settings.py | pypi |
import numpy as np
import pandas as pd
from tsfresh import defaults
from tsfresh.feature_selection.relevance import calculate_relevance_table
from tsfresh.utilities.dataframe_functions import check_for_nans_in_columns
def select_features(
X,
y,
test_for_binary_target_binary_feature=defaults.TEST_FOR_BINARY_TARGET_BINARY_FEATURE,
test_for_binary_target_real_feature=defaults.TEST_FOR_BINARY_TARGET_REAL_FEATURE,
test_for_real_target_binary_feature=defaults.TEST_FOR_REAL_TARGET_BINARY_FEATURE,
test_for_real_target_real_feature=defaults.TEST_FOR_REAL_TARGET_REAL_FEATURE,
fdr_level=defaults.FDR_LEVEL,
hypotheses_independent=defaults.HYPOTHESES_INDEPENDENT,
n_jobs=defaults.N_PROCESSES,
show_warnings=defaults.SHOW_WARNINGS,
chunksize=defaults.CHUNKSIZE,
ml_task="auto",
multiclass=False,
n_significant=1,
):
"""
Check the significance of all features (columns) of feature matrix X and return a possibly reduced feature matrix
only containing relevant features.
The feature matrix must be a pandas.DataFrame in the format:
+-------+-----------+-----------+-----+-----------+
| index | feature_1 | feature_2 | ... | feature_N |
+=======+===========+===========+=====+===========+
| A | ... | ... | ... | ... |
+-------+-----------+-----------+-----+-----------+
| B | ... | ... | ... | ... |
+-------+-----------+-----------+-----+-----------+
| ... | ... | ... | ... | ... |
+-------+-----------+-----------+-----+-----------+
| ... | ... | ... | ... | ... |
+-------+-----------+-----------+-----+-----------+
| ... | ... | ... | ... | ... |
+-------+-----------+-----------+-----+-----------+
Each column will be handled as a feature and tested for its significance to the target.
The target vector must be a pandas.Series or numpy.array in the form
+-------+--------+
| index | target |
+=======+========+
| A | ... |
+-------+--------+
| B | ... |
+-------+--------+
| . | ... |
+-------+--------+
| . | ... |
+-------+--------+
and must contain all id's that are in the feature matrix. If y is a numpy.array without index, it is assumed
that y has the same order and length than X and the rows correspond to each other.
Examples
========
>>> from tsfresh.examples import load_robot_execution_failures
>>> from tsfresh import extract_features, select_features
>>> df, y = load_robot_execution_failures()
>>> X_extracted = extract_features(df, column_id='id', column_sort='time')
>>> X_selected = select_features(X_extracted, y)
:param X: Feature matrix in the format mentioned before which will be reduced to only the relevant features.
It can contain both binary or real-valued features at the same time.
:type X: pandas.DataFrame
:param y: Target vector which is needed to test which features are relevant. Can be binary or real-valued.
:type y: pandas.Series or numpy.ndarray
:param test_for_binary_target_binary_feature: Which test to be used for binary target, binary feature
(currently unused)
:type test_for_binary_target_binary_feature: str
:param test_for_binary_target_real_feature: Which test to be used for binary target, real feature
:type test_for_binary_target_real_feature: str
:param test_for_real_target_binary_feature: Which test to be used for real target, binary feature (currently unused)
:type test_for_real_target_binary_feature: str
:param test_for_real_target_real_feature: Which test to be used for real target, real feature (currently unused)
:type test_for_real_target_real_feature: str
:param fdr_level: The FDR level that should be respected, this is the theoretical expected percentage of irrelevant
features among all created features.
:type fdr_level: float
:param hypotheses_independent: Can the significance of the features be assumed to be independent?
Normally, this should be set to False as the features are never
independent (e.g. mean and median)
:type hypotheses_independent: bool
:param n_jobs: Number of processes to use during the p-value calculation
:type n_jobs: int
:param show_warnings: Show warnings during the p-value calculation (needed for debugging of calculators).
:type show_warnings: bool
:param chunksize: The size of one chunk that is submitted to the worker
process for the parallelisation. Where one chunk is defined as
the data for one feature. If you set the chunksize
to 10, then it means that one task is to filter 10 features.
If it is set it to None, depending on distributor,
heuristics are used to find the optimal chunksize. If you get out of
memory exceptions, you can try it with the dask distributor and a
smaller chunksize.
:type chunksize: None or int
:param ml_task: The intended machine learning task. Either `'classification'`, `'regression'` or `'auto'`.
Defaults to `'auto'`, meaning the intended task is inferred from `y`.
If `y` has a boolean, integer or object dtype, the task is assumed to be classification,
else regression.
:type ml_task: str
:param multiclass: Whether the problem is multiclass classification. This modifies the way in which features
are selected. Multiclass requires the features to be statistically significant for
predicting n_significant features.
:type multiclass: bool
:param n_significant: The number of classes for which features should be statistically significant predictors
to be regarded as 'relevant'. Only specify when multiclass=True
:type n_significant: int
:return: The same DataFrame as X, but possibly with reduced number of columns ( = features).
:rtype: pandas.DataFrame
:raises: ``ValueError`` when the target vector does not fit to the feature matrix
or `ml_task` is not one of `'auto'`, `'classification'` or `'regression'`.
"""
assert isinstance(X, pd.DataFrame), "Please pass features in X as pandas.DataFrame."
check_for_nans_in_columns(X)
assert isinstance(y, (pd.Series, np.ndarray)), (
"The type of target vector y must be one of: " "pandas.Series, numpy.ndarray"
)
assert len(y) > 1, "y must contain at least two samples."
assert len(X) == len(y), "X and y must contain the same number of samples."
assert (
len(set(y)) > 1
), "Feature selection is only possible if more than 1 label/class is provided"
if isinstance(y, pd.Series) and set(X.index) != set(y.index):
raise ValueError("Index of X and y must be identical if provided")
if isinstance(y, np.ndarray):
y = pd.Series(y, index=X.index)
relevance_table = calculate_relevance_table(
X,
y,
ml_task=ml_task,
multiclass=multiclass,
n_significant=n_significant,
n_jobs=n_jobs,
show_warnings=show_warnings,
chunksize=chunksize,
test_for_binary_target_real_feature=test_for_binary_target_real_feature,
fdr_level=fdr_level,
hypotheses_independent=hypotheses_independent,
)
relevant_features = relevance_table[relevance_table.relevant].feature
return X.loc[:, relevant_features] | /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/tsfresh/feature_selection/selection.py | 0.77081 | 0.455199 | selection.py | pypi |
import warnings
from functools import partial, reduce
from multiprocessing import Pool
import numpy as np
import pandas as pd
from statsmodels.stats.multitest import multipletests
from tsfresh import defaults
from tsfresh.feature_selection.significance_tests import (
target_binary_feature_binary_test,
target_binary_feature_real_test,
target_real_feature_binary_test,
target_real_feature_real_test,
)
from tsfresh.utilities.distribution import initialize_warnings_in_workers
def calculate_relevance_table(
X,
y,
ml_task="auto",
multiclass=False,
n_significant=1,
n_jobs=defaults.N_PROCESSES,
show_warnings=defaults.SHOW_WARNINGS,
chunksize=defaults.CHUNKSIZE,
test_for_binary_target_binary_feature=defaults.TEST_FOR_BINARY_TARGET_BINARY_FEATURE,
test_for_binary_target_real_feature=defaults.TEST_FOR_BINARY_TARGET_REAL_FEATURE,
test_for_real_target_binary_feature=defaults.TEST_FOR_REAL_TARGET_BINARY_FEATURE,
test_for_real_target_real_feature=defaults.TEST_FOR_REAL_TARGET_REAL_FEATURE,
fdr_level=defaults.FDR_LEVEL,
hypotheses_independent=defaults.HYPOTHESES_INDEPENDENT,
):
"""
Calculate the relevance table for the features contained in feature matrix `X` with respect to target vector `y`.
The relevance table is calculated for the intended machine learning task `ml_task`.
To accomplish this for each feature from the input pandas.DataFrame an univariate feature significance test
is conducted. Those tests generate p values that are then evaluated by the Benjamini Hochberg procedure to
decide which features to keep and which to delete.
We are testing
:math:`H_0` = the Feature is not relevant and should not be added
against
:math:`H_1` = the Feature is relevant and should be kept
or in other words
:math:`H_0` = Target and Feature are independent / the Feature has no influence on the target
:math:`H_1` = Target and Feature are associated / dependent
When the target is binary this becomes
:math:`H_0 = \\left( F_{\\text{target}=1} = F_{\\text{target}=0} \\right)`
:math:`H_1 = \\left( F_{\\text{target}=1} \\neq F_{\\text{target}=0} \\right)`
Where :math:`F` is the distribution of the target.
In the same way we can state the hypothesis when the feature is binary
:math:`H_0 = \\left( T_{\\text{feature}=1} = T_{\\text{feature}=0} \\right)`
:math:`H_1 = \\left( T_{\\text{feature}=1} \\neq T_{\\text{feature}=0} \\right)`
Here :math:`T` is the distribution of the target.
TODO: And for real valued?
:param X: Feature matrix in the format mentioned before which will be reduced to only the relevant features.
It can contain both binary or real-valued features at the same time.
:type X: pandas.DataFrame
:param y: Target vector which is needed to test which features are relevant. Can be binary or real-valued.
:type y: pandas.Series or numpy.ndarray
:param ml_task: The intended machine learning task. Either `'classification'`, `'regression'` or `'auto'`.
Defaults to `'auto'`, meaning the intended task is inferred from `y`.
If `y` has a boolean, integer or object dtype, the task is assumed to be classification,
else regression.
:type ml_task: str
:param multiclass: Whether the problem is multiclass classification. This modifies the way in which features
are selected. Multiclass requires the features to be statistically significant for
predicting n_significant classes.
:type multiclass: bool
:param n_significant: The number of classes for which features should be statistically significant predictors
to be regarded as 'relevant'
:type n_significant: int
:param test_for_binary_target_binary_feature: Which test to be used for binary target, binary feature
(currently unused)
:type test_for_binary_target_binary_feature: str
:param test_for_binary_target_real_feature: Which test to be used for binary target, real feature
:type test_for_binary_target_real_feature: str
:param test_for_real_target_binary_feature: Which test to be used for real target, binary feature (currently unused)
:type test_for_real_target_binary_feature: str
:param test_for_real_target_real_feature: Which test to be used for real target, real feature (currently unused)
:type test_for_real_target_real_feature: str
:param fdr_level: The FDR level that should be respected, this is the theoretical expected percentage of irrelevant
features among all created features.
:type fdr_level: float
:param hypotheses_independent: Can the significance of the features be assumed to be independent?
Normally, this should be set to False as the features are never
independent (e.g. mean and median)
:type hypotheses_independent: bool
:param n_jobs: Number of processes to use during the p-value calculation
:type n_jobs: int
:param show_warnings: Show warnings during the p-value calculation (needed for debugging of calculators).
:type show_warnings: bool
:param chunksize: The size of one chunk that is submitted to the worker
process for the parallelisation. Where one chunk is defined as
the data for one feature. If you set the chunksize
to 10, then it means that one task is to filter 10 features.
If it is set it to None, depending on distributor,
heuristics are used to find the optimal chunksize. If you get out of
memory exceptions, you can try it with the dask distributor and a
smaller chunksize.
:type chunksize: None or int
:return: A pandas.DataFrame with each column of the input DataFrame X as index with information on the significance
of this particular feature. The DataFrame has the columns
"feature",
"type" (binary, real or const),
"p_value" (the significance of this feature as a p-value, lower means more significant)
"relevant" (True if the Benjamini Hochberg procedure rejected the null hypothesis [the feature is
not relevant] for this feature).
If the problem is `multiclass` with n classes, the DataFrame will contain n
columns named "p_value_CLASSID" instead of the "p_value" column.
`CLASSID` refers here to the different values set in `y`.
There will also be n columns named `relevant_CLASSID`, indicating whether
the feature is relevant for that class.
:rtype: pandas.DataFrame
"""
# Make sure X and y both have the exact same indices
y = y.sort_index()
X = X.sort_index()
assert list(y.index) == list(X.index), "The index of X and y need to be the same"
if ml_task not in ["auto", "classification", "regression"]:
raise ValueError(
"ml_task must be one of: 'auto', 'classification', 'regression'"
)
elif ml_task == "auto":
ml_task = infer_ml_task(y)
if multiclass:
assert (
ml_task == "classification"
), "ml_task must be classification for multiclass problem"
assert (
len(y.unique()) >= n_significant
), "n_significant must not exceed the total number of classes"
if len(y.unique()) <= 2:
warnings.warn(
"Two or fewer classes, binary feature selection will be used (multiclass = False)"
)
multiclass = False
with warnings.catch_warnings():
if not show_warnings:
warnings.simplefilter("ignore")
else:
warnings.simplefilter("default")
if n_jobs == 0 or n_jobs == 1:
map_function = map
else:
pool = Pool(
processes=n_jobs,
initializer=initialize_warnings_in_workers,
initargs=(show_warnings,),
)
map_function = partial(pool.map, chunksize=chunksize)
relevance_table = pd.DataFrame(index=pd.Series(X.columns, name="feature"))
relevance_table["feature"] = relevance_table.index
relevance_table["type"] = pd.Series(
map_function(
get_feature_type, [X[feature] for feature in relevance_table.index]
),
index=relevance_table.index,
)
table_real = relevance_table[relevance_table.type == "real"].copy()
table_binary = relevance_table[relevance_table.type == "binary"].copy()
table_const = relevance_table[relevance_table.type == "constant"].copy()
table_const["p_value"] = np.NaN
table_const["relevant"] = False
if not table_const.empty:
warnings.warn(
"[test_feature_significance] Constant features: {}".format(
", ".join(map(str, table_const.feature))
),
RuntimeWarning,
)
if len(table_const) == len(relevance_table):
if n_jobs < 0 or n_jobs > 1:
pool.close()
pool.terminate()
pool.join()
return table_const
if ml_task == "classification":
tables = []
for label in y.unique():
_test_real_feature = partial(
target_binary_feature_real_test,
y=(y == label),
test=test_for_binary_target_real_feature,
)
_test_binary_feature = partial(
target_binary_feature_binary_test, y=(y == label)
)
tmp = _calculate_relevance_table_for_implicit_target(
table_real,
table_binary,
X,
_test_real_feature,
_test_binary_feature,
hypotheses_independent,
fdr_level,
map_function,
)
if multiclass:
tmp = tmp.reset_index(drop=True)
tmp.columns = tmp.columns.map(
lambda x: x + "_" + str(label)
if x != "feature" and x != "type"
else x
)
tables.append(tmp)
if multiclass:
relevance_table = reduce(
lambda left, right: pd.merge(
left, right, on=["feature", "type"], how="outer"
),
tables,
)
relevance_table["n_significant"] = relevance_table.filter(
regex="^relevant_", axis=1
).sum(axis=1)
relevance_table["relevant"] = (
relevance_table["n_significant"] >= n_significant
)
relevance_table.index = relevance_table["feature"]
else:
relevance_table = combine_relevance_tables(tables)
elif ml_task == "regression":
_test_real_feature = partial(target_real_feature_real_test, y=y)
_test_binary_feature = partial(target_real_feature_binary_test, y=y)
relevance_table = _calculate_relevance_table_for_implicit_target(
table_real,
table_binary,
X,
_test_real_feature,
_test_binary_feature,
hypotheses_independent,
fdr_level,
map_function,
)
if n_jobs < 0 or n_jobs > 1:
pool.close()
pool.terminate()
pool.join()
# set constant features to be irrelevant for all classes in multiclass case
if multiclass:
for column in relevance_table.filter(regex="^relevant_", axis=1).columns:
table_const[column] = False
table_const["n_significant"] = 0
table_const.drop(columns=["p_value"], inplace=True)
relevance_table = pd.concat([relevance_table, table_const], axis=0)
if sum(relevance_table["relevant"]) == 0:
warnings.warn(
"No feature was found relevant for {} for fdr level = {} (which corresponds to the maximal percentage "
"of irrelevant features, consider using an higher fdr level or add other features.".format(
ml_task, fdr_level
),
RuntimeWarning,
)
return relevance_table
def _calculate_relevance_table_for_implicit_target(
table_real,
table_binary,
X,
test_real_feature,
test_binary_feature,
hypotheses_independent,
fdr_level,
map_function,
):
table_real["p_value"] = pd.Series(
map_function(test_real_feature, [X[feature] for feature in table_real.index]),
index=table_real.index,
)
table_binary["p_value"] = pd.Series(
map_function(
test_binary_feature, [X[feature] for feature in table_binary.index]
),
index=table_binary.index,
)
relevance_table = pd.concat([table_real, table_binary])
method = "fdr_bh" if hypotheses_independent else "fdr_by"
relevance_table["relevant"] = multipletests(
relevance_table.p_value, fdr_level, method
)[0]
return relevance_table.sort_values("p_value")
def infer_ml_task(y):
"""
Infer the machine learning task to select for.
The result will be either `'regression'` or `'classification'`.
If the target vector only consists of integer typed values or objects, we assume the task is `'classification'`.
Else `'regression'`.
:param y: The target vector y.
:type y: pandas.Series
:return: 'classification' or 'regression'
:rtype: str
"""
if y.dtype.kind in np.typecodes["AllInteger"] or y.dtype == np.object:
ml_task = "classification"
else:
ml_task = "regression"
return ml_task
def combine_relevance_tables(relevance_tables):
"""
Create a combined relevance table out of a list of relevance tables,
aggregating the p-values and the relevances.
:param relevance_tables: A list of relevance tables
:type relevance_tables: List[pd.DataFrame]
:return: The combined relevance table
:rtype: pandas.DataFrame
"""
def _combine(a, b):
a.relevant |= b.relevant
a.p_value = a.p_value.combine(b.p_value, min, 1)
return a
return reduce(_combine, relevance_tables)
def get_feature_type(feature_column):
"""
For a given feature, determine if it is real, binary or constant.
Here binary means that only two unique values occur in the feature.
:param feature_column: The feature column
:type feature_column: pandas.Series
:return: 'constant', 'binary' or 'real'
"""
n_unique_values = len(set(feature_column.values))
if n_unique_values == 1:
return "constant"
elif n_unique_values == 2:
return "binary"
else:
return "real" | /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/tsfresh/feature_selection/relevance.py | 0.74055 | 0.513973 | relevance.py | pypi |
import pandas as pd
from tsfresh import defaults
from tsfresh.feature_extraction import extract_features
from tsfresh.feature_selection import select_features
from tsfresh.utilities.dataframe_functions import (
get_ids,
impute,
restrict_input_to_index,
)
def extract_relevant_features(
timeseries_container,
y,
X=None,
default_fc_parameters=None,
kind_to_fc_parameters=None,
column_id=None,
column_sort=None,
column_kind=None,
column_value=None,
show_warnings=defaults.SHOW_WARNINGS,
disable_progressbar=defaults.DISABLE_PROGRESSBAR,
profile=defaults.PROFILING,
profiling_filename=defaults.PROFILING_FILENAME,
profiling_sorting=defaults.PROFILING_SORTING,
test_for_binary_target_binary_feature=defaults.TEST_FOR_BINARY_TARGET_BINARY_FEATURE,
test_for_binary_target_real_feature=defaults.TEST_FOR_BINARY_TARGET_REAL_FEATURE,
test_for_real_target_binary_feature=defaults.TEST_FOR_REAL_TARGET_BINARY_FEATURE,
test_for_real_target_real_feature=defaults.TEST_FOR_REAL_TARGET_REAL_FEATURE,
fdr_level=defaults.FDR_LEVEL,
hypotheses_independent=defaults.HYPOTHESES_INDEPENDENT,
n_jobs=defaults.N_PROCESSES,
distributor=None,
chunksize=defaults.CHUNKSIZE,
ml_task="auto",
):
"""
High level convenience function to extract time series features from `timeseries_container`. Then return feature
matrix `X` possibly augmented with relevant features with respect to target vector `y`.
For more details see the documentation of :func:`~tsfresh.feature_extraction.extraction.extract_features` and
:func:`~tsfresh.feature_selection.selection.select_features`.
Examples
========
>>> from tsfresh.examples import load_robot_execution_failures
>>> from tsfresh import extract_relevant_features
>>> df, y = load_robot_execution_failures()
>>> X = extract_relevant_features(df, y, column_id='id', column_sort='time')
:param timeseries_container: The pandas.DataFrame with the time series to compute the features for, or a
dictionary of pandas.DataFrames.
See :func:`~tsfresh.feature_extraction.extraction.extract_features`.
:param X: A DataFrame containing additional features
:type X: pandas.DataFrame
:param y: The target vector
:type y: pandas.Series
:param default_fc_parameters: mapping from feature calculator names to parameters. Only those names
which are keys in this dict will be calculated. See the class:`ComprehensiveFCParameters` for
more information.
:type default_fc_parameters: dict
:param kind_to_fc_parameters: mapping from kind names to objects of the same type as the ones for
default_fc_parameters. If you put a kind as a key here, the fc_parameters
object (which is the value), will be used instead of the default_fc_parameters.
:type kind_to_fc_parameters: dict
:param column_id: The name of the id column to group by. Please see :ref:`data-formats-label`.
:type column_id: str
:param column_sort: The name of the sort column. Please see :ref:`data-formats-label`.
:type column_sort: str
:param column_kind: The name of the column keeping record on the kind of the value.
Please see :ref:`data-formats-label`.
:type column_kind: str
:param column_value: The name for the column keeping the value itself. Please see :ref:`data-formats-label`.
:type column_value: str
:param chunksize: The size of one chunk that is submitted to the worker
process for the parallelisation. Where one chunk is defined as a
singular time series for one id and one kind. If you set the chunksize
to 10, then it means that one task is to calculate all features for 10
time series. If it is set it to None, depending on distributor,
heuristics are used to find the optimal chunksize. If you get out of
memory exceptions, you can try it with the dask distributor and a
smaller chunksize.
:type chunksize: None or int
:param n_jobs: The number of processes to use for parallelization. If zero, no parallelization is used.
:type n_jobs: int
:param distributor: Advanced parameter: set this to a class name that you want to use as a
distributor. See the utilities/distribution.py for more information. Leave to None, if you want
TSFresh to choose the best distributor.
:type distributor: class
:param show_warnings: Show warnings during the feature extraction (needed for debugging of calculators).
:type show_warnings: bool
:param disable_progressbar: Do not show a progressbar while doing the calculation.
:type disable_progressbar: bool
:param profile: Turn on profiling during feature extraction
:type profile: bool
:param profiling_sorting: How to sort the profiling results (see the documentation of the profiling package for
more information)
:type profiling_sorting: basestring
:param profiling_filename: Where to save the profiling results.
:type profiling_filename: basestring
:param test_for_binary_target_binary_feature: Which test to be used for binary target, binary feature
(currently unused)
:type test_for_binary_target_binary_feature: str
:param test_for_binary_target_real_feature: Which test to be used for binary target, real feature
:type test_for_binary_target_real_feature: str
:param test_for_real_target_binary_feature: Which test to be used for real target, binary feature (currently unused)
:type test_for_real_target_binary_feature: str
:param test_for_real_target_real_feature: Which test to be used for real target, real feature (currently unused)
:type test_for_real_target_real_feature: str
:param fdr_level: The FDR level that should be respected, this is the theoretical expected percentage of irrelevant
features among all created features.
:type fdr_level: float
:param hypotheses_independent: Can the significance of the features be assumed to be independent?
Normally, this should be set to False as the features are never
independent (e.g. mean and median)
:type hypotheses_independent: bool
:param ml_task: The intended machine learning task. Either `'classification'`, `'regression'` or `'auto'`.
Defaults to `'auto'`, meaning the intended task is inferred from `y`.
If `y` has a boolean, integer or object dtype, the task is assumed to be classification,
else regression.
:type ml_task: str
:return: Feature matrix X, possibly extended with relevant time series features.
"""
assert isinstance(
y, pd.Series
), "y needs to be a pandas.Series, received type: {}.".format(type(y))
assert (
len(set(y)) > 1
), "Feature selection is only possible if more than 1 label/class is provided"
if X is not None:
timeseries_container = restrict_input_to_index(
timeseries_container, column_id, X.index
)
ids_container = get_ids(df_or_dict=timeseries_container, column_id=column_id)
ids_y = set(y.index)
if ids_container != ids_y:
if len(ids_container - ids_y) > 0:
raise ValueError(
"The following ids are in the time series container but are missing in y: "
"{}".format(ids_container - ids_y)
)
if len(ids_y - ids_container) > 0:
raise ValueError(
"The following ids are in y but are missing inside the time series container: "
"{}".format(ids_y - ids_container)
)
X_ext = extract_features(
timeseries_container,
default_fc_parameters=default_fc_parameters,
kind_to_fc_parameters=kind_to_fc_parameters,
show_warnings=show_warnings,
disable_progressbar=disable_progressbar,
profile=profile,
profiling_filename=profiling_filename,
profiling_sorting=profiling_sorting,
n_jobs=n_jobs,
column_id=column_id,
column_sort=column_sort,
column_kind=column_kind,
column_value=column_value,
distributor=distributor,
impute_function=impute,
)
X_sel = select_features(
X_ext,
y,
test_for_binary_target_binary_feature=test_for_binary_target_binary_feature,
test_for_binary_target_real_feature=test_for_binary_target_real_feature,
test_for_real_target_binary_feature=test_for_real_target_binary_feature,
test_for_real_target_real_feature=test_for_real_target_real_feature,
fdr_level=fdr_level,
hypotheses_independent=hypotheses_independent,
n_jobs=n_jobs,
show_warnings=show_warnings,
chunksize=chunksize,
ml_task=ml_task,
)
if X is None:
X = X_sel
else:
X = pd.merge(X, X_sel, left_index=True, right_index=True, how="left")
return X | /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/tsfresh/convenience/relevant_extraction.py | 0.826607 | 0.364622 | relevant_extraction.py | pypi |
from functools import partial
import pandas as pd
from tsfresh.feature_extraction.extraction import _do_extraction_on_chunk
from tsfresh.feature_extraction.settings import ComprehensiveFCParameters
def _feature_extraction_on_chunk_helper(
df,
column_id,
column_kind,
column_sort,
column_value,
default_fc_parameters,
kind_to_fc_parameters,
):
"""
Helper function wrapped around _do_extraction_on_chunk to use the correct format
of the "chunk" and output a pandas dataframe.
Is used e.g. in the convenience functions for dask and spark.
For the definitions of the parameters, please see these convenience functions.
"""
if default_fc_parameters is None and kind_to_fc_parameters is None:
default_fc_parameters = ComprehensiveFCParameters()
elif default_fc_parameters is None and kind_to_fc_parameters is not None:
default_fc_parameters = {}
if column_sort is not None:
df = df.sort_values(column_sort)
chunk = df[column_id].iloc[0], df[column_kind].iloc[0], df[column_value]
features = _do_extraction_on_chunk(
chunk,
default_fc_parameters=default_fc_parameters,
kind_to_fc_parameters=kind_to_fc_parameters,
)
features = pd.DataFrame(features, columns=[column_id, "variable", "value"])
features["value"] = features["value"].astype("double")
return features[[column_id, "variable", "value"]]
def dask_feature_extraction_on_chunk(
df,
column_id,
column_kind,
column_value,
column_sort=None,
default_fc_parameters=None,
kind_to_fc_parameters=None,
):
"""
Extract features on a grouped dask dataframe given the column names and the extraction settings.
This wrapper function should only be used if you have a dask dataframe as input.
All format handling (input and output) needs to be done before or after that.
Examples
========
For example if you want to extract features on the robot example dataframe (stored as csv):
Import statements:
>>> from dask import dataframe as dd
>>> from tsfresh.convenience.bindings import dask_feature_extraction_on_chunk
>>> from tsfresh.feature_extraction.settings import MinimalFCParameters
Read in the data
>>> df = dd.read_csv("robot.csv")
Prepare the data into correct format.
The format needs to be a grouped dataframe (grouped by time series id and feature kind),
where each group chunk consists of a dataframe with exactly 4 columns: ``column_id``,
``column_kind``, ``column_sort`` and ``column_value``.
You can find the description of the columns in :ref:`data-formats-label`.
Please note: for this function to work you need to have all columns present!
If necessary create the columns and fill them with dummy values.
>>> df = df.melt(id_vars=["id", "time"],
... value_vars=["F_x", "F_y", "F_z", "T_x", "T_y", "T_z"],
... var_name="kind", value_name="value")
>>> df_grouped = df.groupby(["id", "kind"])
Call the feature extraction
>>> features = dask_feature_extraction_on_chunk(df_grouped, column_id="id", column_kind="kind",
... column_sort="time", column_value="value",
... default_fc_parameters=MinimalFCParameters())
Write out the data in a tabular format
>>> features = features.categorize(columns=["variable"])
>>> features = features.reset_index(drop=True) \\
... .pivot_table(index="id", columns="variable", values="value", aggfunc="mean")
>>> features.to_csv("output")
:param df: A dask dataframe grouped by id and kind.
:type df: dask.dataframe.groupby.DataFrameGroupBy
:param default_fc_parameters: mapping from feature calculator names to parameters. Only those names
which are keys in this dict will be calculated. See the class:`ComprehensiveFCParameters` for
more information.
:type default_fc_parameters: dict
:param kind_to_fc_parameters: mapping from kind names to objects of the same type as the ones for
default_fc_parameters. If you put a kind as a key here, the fc_parameters
object (which is the value), will be used instead of the default_fc_parameters. This means
that kinds, for which kind_of_fc_parameters doe not have any entries, will be ignored by
the feature selection.
:type kind_to_fc_parameters: dict
:param column_id: The name of the id column to group by.
:type column_id: str
:param column_sort: The name of the sort column.
:type column_sort: str or None
:param column_kind: The name of the column keeping record on the kind of the value.
:type column_kind: str
:param column_value: The name for the column keeping the value itself.
:type column_value: str
:return: A dask dataframe with the columns ``column_id``, "variable" and "value". The index is taken
from the grouped dataframe.
:rtype: dask.dataframe.DataFrame (id int64, variable object, value float64)
"""
feature_extraction = partial(
_feature_extraction_on_chunk_helper,
column_id=column_id,
column_kind=column_kind,
column_sort=column_sort,
column_value=column_value,
default_fc_parameters=default_fc_parameters,
kind_to_fc_parameters=kind_to_fc_parameters,
)
return df.apply(
feature_extraction,
meta=[(column_id, "int64"), ("variable", "object"), ("value", "float64")],
)
def spark_feature_extraction_on_chunk(
df,
column_id,
column_kind,
column_value,
column_sort=None,
default_fc_parameters=None,
kind_to_fc_parameters=None,
):
"""
Extract features on a grouped spark dataframe given the column names and the extraction settings.
This wrapper function should only be used if you have a spark dataframe as input.
All format handling (input and output) needs to be done before or after that.
Examples
========
For example if you want to extract features on the robot example dataframe (stored as csv):
Import statements:
>>> from tsfresh.convenience.bindings import spark_feature_extraction_on_chunk
>>> from tsfresh.feature_extraction.settings import MinimalFCParameters
Read in the data
>>> df = spark.read(...)
Prepare the data into correct format.
The format needs to be a grouped dataframe (grouped by time series id and feature kind),
where each group chunk consists of a dataframe with exactly 4 columns: ``column_id``,
``column_kind``, ``column_sort`` and ``column_value``.
You can find the description of the columns in :ref:`data-formats-label`.
Please note: for this function to work you need to have all columns present!
If necessary create the columns and fill them with dummy values.
>>> df = ...
>>> df_grouped = df.groupby(["id", "kind"])
Call the feature extraction
>>> features = spark_feature_extraction_on_chunk(df_grouped, column_id="id", column_kind="kind",
... column_sort="time", column_value="value",
... default_fc_parameters=MinimalFCParameters())
Write out the data in a tabular format
>>> features = features.groupby("id").pivot("variable").sum("value")
>>> features.write.csv("output")
:param df: A spark dataframe grouped by id and kind.
:type df: pyspark.sql.group.GroupedData
:param default_fc_parameters: mapping from feature calculator names to parameters. Only those names
which are keys in this dict will be calculated. See the class:`ComprehensiveFCParameters` for
more information.
:type default_fc_parameters: dict
:param kind_to_fc_parameters: mapping from kind names to objects of the same type as the ones for
default_fc_parameters. If you put a kind as a key here, the fc_parameters
object (which is the value), will be used instead of the default_fc_parameters.
This means that kinds, for which kind_of_fc_parameters doe not have any entries,
will be ignored by the feature selection.
:type kind_to_fc_parameters: dict
:param column_id: The name of the id column to group by.
:type column_id: str
:param column_sort: The name of the sort column.
:type column_sort: str or None
:param column_kind: The name of the column keeping record on the kind of the value.
:type column_kind: str
:param column_value: The name for the column keeping the value itself.
:type column_value: str
:return: A dask dataframe with the columns ``column_id``, "variable" and "value".
:rtype: pyspark.sql.DataFrame[id: bigint, variable: string, value: double]
"""
from pyspark.sql.functions import PandasUDFType, pandas_udf
feature_extraction = partial(
_feature_extraction_on_chunk_helper,
column_id=column_id,
column_kind=column_kind,
column_sort=column_sort,
column_value=column_value,
default_fc_parameters=default_fc_parameters,
kind_to_fc_parameters=kind_to_fc_parameters,
)
type_string = "{column_id} long, variable string, value double".format(
column_id=column_id
)
feature_extraction_udf = pandas_udf(type_string, PandasUDFType.GROUPED_MAP)(
feature_extraction
)
return df.apply(feature_extraction_udf) | /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/tsfresh/convenience/bindings.py | 0.727104 | 0.521837 | bindings.py | pypi |
import argparse
import os
import sys
import pandas as pd
from tsfresh import extract_features
def _preprocess(df):
"""
given a DataFrame where records are stored row-wise, rearrange it
such that records are stored column-wise.
"""
df = df.stack()
df.index.rename(["id", "time"], inplace=True) # .reset_index()
df.name = "value"
df = df.reset_index()
return df
def main(console_args=None):
parser = argparse.ArgumentParser(
description="Extract features from time series stored in a CSV file and "
"write them back into another CSV file. The time series in the CSV "
"file should either have one of the dataframe-formats described in "
"http://tsfresh.readthedocs.io/en/latest/text/data_formats.html, "
"which means you have to supply the --csv-with-headers flag "
"or should be in the form "
"[time series 1 values ..., time series 2 values ...] "
"where you should not add the --csv-with-headers flag. "
"The CSV is expected to be space-separated."
)
parser.add_argument(
"input_file_name", help="File name of the input CSV file to read in."
)
parser.add_argument(
"--output-file-name",
help="File name of the output CSV file to write to. "
"Defaults to input_file_name.features.csv",
default=None,
)
parser.add_argument(
"--column-sort",
help="Column name to be used to sort the rows. "
"Only available when --csv-with-headers is enabled.",
default=None,
)
parser.add_argument(
"--column-kind",
help="Column name where the kind column can be found."
"Only available when --csv-with-headers is enabled.",
default=None,
)
parser.add_argument(
"--column-value",
help="Column name where the values can be found."
"Only available when --csv-with-headers is enabled.",
default=None,
)
parser.add_argument(
"--column-id",
help="Column name where the ids can be found."
"Only available when --csv-with-headers is enabled.",
default=None,
)
parser.add_argument("--csv-with-headers", action="store_true", help="")
print(console_args)
args = parser.parse_args(console_args)
if (
args.column_id or args.column_kind or args.column_sort or args.column_value
) and (not args.csv_with_headers):
raise AttributeError(
"You can only pass in column-value, column-kind, column-id or column-sort if "
"--csv-with-headers is enabled."
)
if args.csv_with_headers:
column_kind = args.column_kind
column_sort = args.column_sort
column_value = args.column_value
column_id = args.column_id
header = 0
else:
column_kind = None
column_sort = "time"
column_value = "value"
column_id = "id"
header = None
# Read in CSV file
input_file_name = args.input_file_name
df = pd.read_csv(input_file_name, delim_whitespace=True, header=header)
if not args.csv_with_headers:
df = _preprocess(df)
df_features = extract_features(
df,
column_kind=column_kind,
column_sort=column_sort,
column_value=column_value,
column_id=column_id,
)
# re-cast index from float to int
df_features.index = df_features.index.astype("int")
# write to disk
default_out_file_name = os.path.splitext(input_file_name)[0] + ".features.csv"
output_file_name = args.output_file_name or default_out_file_name
df_features.to_csv(output_file_name)
if __name__ == "__main__":
main() | /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/tsfresh/scripts/run_tsfresh.py | 0.495606 | 0.336495 | run_tsfresh.py | pypi |
import json
from time import time
import b2luigi as luigi
import numpy as np
import pandas as pd
from tsfresh.feature_extraction import (
ComprehensiveFCParameters,
MinimalFCParameters,
extract_features,
)
class DataCreationTask(luigi.Task):
"""Create random data for testing"""
num_ids = luigi.IntParameter(default=100)
time_series_length = luigi.IntParameter()
random_seed = luigi.IntParameter()
def output(self):
yield self.add_to_output("data.csv")
def run(self):
np.random.seed(self.random_seed)
df = pd.concat(
[
pd.DataFrame(
{
"id": [i] * self.time_series_length,
"time": range(self.time_series_length),
"value": np.random.randn(self.time_series_length),
}
)
for i in range(self.num_ids)
]
)
with self._get_output_target("data.csv").open("w") as f:
df.to_csv(f)
@luigi.requires(DataCreationTask)
class TimingTask(luigi.Task):
"""Run tsfresh with the given parameters"""
feature_parameter = luigi.DictParameter(hashed=True)
n_jobs = luigi.IntParameter()
try_number = luigi.IntParameter()
def output(self):
yield self.add_to_output("result.json")
def run(self):
input_file = self._get_input_targets("data.csv")[0]
with input_file.open("r") as f:
df = pd.read_csv(f)
start_time = time()
extract_features(
df,
column_id="id",
column_sort="time",
n_jobs=self.n_jobs,
default_fc_parameters=self.feature_parameter,
disable_progressbar=True,
)
end_time = time()
single_parameter_name = list(self.feature_parameter.keys())[0]
single_parameter_params = self.feature_parameter[single_parameter_name]
result_json = {
"time": end_time - start_time,
"n_ids": self.num_ids,
"n_jobs": self.n_jobs,
"feature": single_parameter_name,
"number_parameters": len(single_parameter_params)
if single_parameter_params
else 0,
"time_series_length": int((df["id"] == 0).sum()),
"try_number": self.try_number,
}
with self._get_output_target("result.json").open("w") as f:
json.dump(result_json, f)
@luigi.requires(DataCreationTask)
class FullTimingTask(luigi.Task):
"""Run tsfresh with all calculators for comparison"""
n_jobs = luigi.IntParameter()
def output(self):
yield self.add_to_output("result.json")
def run(self):
input_file = self._get_input_targets("data.csv")[0]
with input_file.open("r") as f:
df = pd.read_csv(f)
start_time = time()
extract_features(
df,
column_id="id",
column_sort="time",
n_jobs=self.n_jobs,
disable_progressbar=True,
)
end_time = time()
result_json = {
"time": end_time - start_time,
"n_ids": self.num_ids,
"n_jobs": self.n_jobs,
"time_series_length": int((df["id"] == 0).sum()),
}
with self._get_output_target("result.json").open("w") as f:
json.dump(result_json, f)
class CombinerTask(luigi.Task):
"""Collect all tasks into a single result.csv file"""
def complete(self):
return False
def requires(self):
settings = ComprehensiveFCParameters()
for job in [0, 1, 4]:
for time_series_length in [100, 500, 1000, 5000]:
yield FullTimingTask(
time_series_length=time_series_length,
n_jobs=job,
num_ids=10,
random_seed=42,
)
yield FullTimingTask(
time_series_length=time_series_length,
n_jobs=job,
num_ids=100,
random_seed=42,
)
for feature_name in settings:
yield TimingTask(
feature_parameter={feature_name: settings[feature_name]},
time_series_length=time_series_length,
n_jobs=job,
num_ids=100,
try_number=0,
random_seed=42,
)
for try_number in range(3):
yield TimingTask(
feature_parameter={feature_name: settings[feature_name]},
n_jobs=job,
try_number=try_number,
num_ids=10,
time_series_length=time_series_length,
random_seed=42,
)
def output(self):
yield self.add_to_output("results.csv")
def run(self):
results = []
for input_file in self._get_input_targets("result.json"):
with input_file.open("r") as f:
results.append(json.load(f))
df = pd.DataFrame(results)
with self._get_output_target("results.csv").open("w") as f:
df.to_csv(f)
if __name__ == "__main__":
luigi.set_setting("result_path", "results")
luigi.process(CombinerTask()) | /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/tsfresh/scripts/measure_execution_time.py | 0.446977 | 0.242183 | measure_execution_time.py | pypi |
import itertools
import math
import warnings
from collections.abc import Generator, Iterable
from functools import partial
from itertools import islice, repeat, takewhile
from multiprocessing import Pool
from tqdm import tqdm
from tsfresh.feature_extraction.data import TsData
def _function_with_partly_reduce(chunk_list, map_function, kwargs):
"""
Small helper function to call a function (map_function)
on a list of data chunks (chunk_list) and convert the results into
a flattened list.
This function is used to send chunks of data with a size larger than 1 to
the workers in parallel and process these on the worker.
:param chunk_list: A list of data chunks to process.
:type chunk_list: list
:param map_function: A function, which is called on each chunk in the list separately.
:type map_function: callable
:return: A list of the results of the function evaluated on each chunk and flattened.
:rtype: list
"""
kwargs = kwargs or {}
results = (map_function(chunk, **kwargs) for chunk in chunk_list)
results = list(itertools.chain.from_iterable(results))
return results
def initialize_warnings_in_workers(show_warnings):
"""
Small helper function to initialize warnings module in multiprocessing workers.
On Windows, Python spawns fresh processes which do not inherit from warnings
state, so warnings must be enabled/disabled before running computations.
:param show_warnings: whether to show warnings or not.
:type show_warnings: bool
"""
warnings.catch_warnings()
if not show_warnings:
warnings.simplefilter("ignore")
else:
warnings.simplefilter("default")
class DistributorBaseClass:
"""
The distributor abstract base class.
The main purpose of the instances of the DistributorBaseClass subclasses is to evaluate a function
(called map_function) on a list of data items (called data).
Dependent on the implementation of the distribute function, this is done in parallel or using a cluster of nodes.
"""
def map_reduce(
self,
map_function,
data,
function_kwargs=None,
chunk_size=None,
data_length=None,
):
"""
This method contains the core functionality of the DistributorBaseClass class.
It maps the map_function to each element of the data and reduces the results to return a flattened list.
It needs to be implemented for each of the subclasses.
:param map_function: a function to apply to each data item.
:type map_function: callable
:param data: the data to use in the calculation
:type data: iterable
:param function_kwargs: parameters for the map function
:type function_kwargs: dict of string to parameter
:param chunk_size: If given, chunk the data according to this size. If not given, use an empirical value.
:type chunk_size: int
:param data_length: If the data is a generator, you have to set the length here. If it is none, the
length is deduced from the len of the data.
:type data_length: int
:return: the calculated results
:rtype: list
"""
raise NotImplementedError
class IterableDistributorBaseClass(DistributorBaseClass):
"""
Distributor Base Class that can handle all iterable items and calculate
a map_function on each item separately.
This is done on chunks of the data, meaning, that the DistributorBaseClass classes will chunk the data into chunks,
distribute the data and apply the map_function functions on the items separately.
Dependent on the implementation of the distribute function, this is done in parallel or using a cluster of nodes.
"""
@staticmethod
def partition(data, chunk_size):
"""
This generator partitions an iterable into slices of length `chunk_size`.
If the chunk size is not a divider of the data length, the last slice will be shorter.
Taken from
https://stackoverflow.com/questions/1915170/split-a-generator-iterable-every-n-items-in-python-splitevery
The important part here is, that the iterable is only
traversed once and the chunks are produced one at a time.
This is good for both memory as well as speed.
:param data: The data to partition.
:type data: Iterable
:param chunk_size: The chunk size. The last chunk might be smaller.
:type chunk_size: int
:return: A generator producing the chunks of data.
:rtype: Generator[Iterable]
"""
# Make sure we have an iterable
iterator = iter(data)
# takewhile(true, ...) generates an iterator until the items are empty
# (= we have reached the end)
# The islice(iterator, n) gets the next n elements from the iterator.
# The list(...) makes sure we do not pass
return takewhile(
bool, (list(islice(iterator, chunk_size)) for _ in repeat(None))
)
def __init__(self):
"""
Constructs the DistributorBaseClass class
"""
raise NotImplementedError
def calculate_best_chunk_size(self, data_length):
"""
Calculates the best chunk size for a list of length data_length. The current implemented formula is more or
less an empirical result for multiprocessing case on one machine.
:param data_length: A length which defines how many calculations there need to be.
:type data_length: int
:return: the calculated chunk size
:rtype: int
TODO: Investigate which is the best chunk size for different settings.
"""
chunk_size, extra = divmod(data_length, self.n_workers * 5)
if extra:
chunk_size += 1
return chunk_size
def map_reduce(
self,
map_function,
data,
function_kwargs=None,
chunk_size=None,
data_length=None,
):
"""
This method contains the core functionality of the DistributorBaseClass class.
It maps the map_function to each element of the data and reduces the results to return a flattened list.
How the jobs are calculated, is determined by the classes
:func:`tsfresh.utilities.distribution.DistributorBaseClass.distribute` method,
which can distribute the jobs in multiple threads, across multiple processing units etc.
To not transport each element of the data individually, the data is split into chunks, according to the chunk
size (or an empirical guess if none is given). By this, worker processes not tiny but adequate sized parts of
the data.
:param map_function: a function to apply to each data item.
:type map_function: callable
:param data: the data to use in the calculation
:type data: iterable
:param function_kwargs: parameters for the map function
:type function_kwargs: dict of string to parameter
:param chunk_size: If given, chunk the data according to this size. If not given, use an empirical value.
:type chunk_size: int
:param data_length: If the data is a generator, you have to set the length here. If it is none, the
length is deduced from the len of the data.
:type data_length: int
:return: the calculated results
:rtype: list
"""
if not isinstance(data, Iterable):
raise ValueError(
"You passed data, which can not be handled by this distributor!"
)
if data_length is None:
data_length = len(data)
if not chunk_size:
chunk_size = self.calculate_best_chunk_size(data_length)
chunk_generator = self.partition(data, chunk_size=chunk_size)
map_kwargs = {"map_function": map_function, "kwargs": function_kwargs}
if hasattr(self, "progressbar_title"):
total_number_of_expected_results = math.ceil(data_length / chunk_size)
result = tqdm(
self.distribute(
_function_with_partly_reduce, chunk_generator, map_kwargs
),
total=total_number_of_expected_results,
desc=self.progressbar_title,
disable=self.disable_progressbar,
)
else:
result = (
self.distribute(
_function_with_partly_reduce, chunk_generator, map_kwargs
),
)
result = list(itertools.chain.from_iterable(result))
self.close()
return result
def distribute(self, func, partitioned_chunks, kwargs):
"""
This abstract base function distributes the work among workers, which can be threads or nodes in a cluster.
Must be implemented in the derived classes.
:param func: the function to send to each worker.
:type func: callable
:param partitioned_chunks: The list of data chunks - each element is again
a list of chunks - and should be processed by one worker.
:type partitioned_chunks: iterable
:param kwargs: parameters for the map function
:type kwargs: dict of string to parameter
:return: The result of the calculation as a list - each item should be the result of the application of func
to a single element.
"""
raise NotImplementedError
def close(self):
"""
Abstract base function to clean the DistributorBaseClass after use, e.g. close the connection to a DaskScheduler
"""
pass
class MapDistributor(IterableDistributorBaseClass):
"""
Distributor using the python build-in map, which calculates each job sequentially one after the other.
"""
def __init__(
self, disable_progressbar=False, progressbar_title="Feature Extraction"
):
"""
Creates a new MapDistributor instance
:param disable_progressbar: whether to show a progressbar or not.
:type disable_progressbar: bool
:param progressbar_title: the title of the progressbar
:type progressbar_title: basestring
"""
self.disable_progressbar = disable_progressbar
self.progressbar_title = progressbar_title
def distribute(self, func, partitioned_chunks, kwargs):
"""
Calculates the features in a sequential fashion by pythons map command
:param func: the function to send to each worker.
:type func: callable
:param partitioned_chunks: The list of data chunks - each element is again
a list of chunks - and should be processed by one worker.
:type partitioned_chunks: iterable
:param kwargs: parameters for the map function
:type kwargs: dict of string to parameter
:return: The result of the calculation as a list - each item should be the result of the application of func
to a single element.
"""
return map(partial(func, **kwargs), partitioned_chunks)
def calculate_best_chunk_size(self, data_length):
"""
For the map command, which calculates the features sequentially, a the chunk_size of 1 will be used.
:param data_length: A length which defines how many calculations there need to be.
:type data_length: int
"""
return 1
class LocalDaskDistributor(IterableDistributorBaseClass):
"""
Distributor using a local dask cluster and inproc communication.
"""
def __init__(self, n_workers):
"""
Initiates a LocalDaskDistributor instance.
:param n_workers: How many workers should the local dask cluster have?
:type n_workers: int
"""
import tempfile
from distributed import Client, LocalCluster
# attribute .local_dir_ is the path where the local dask workers store temporary files
self.local_dir_ = tempfile.mkdtemp()
cluster = LocalCluster(
n_workers=n_workers, processes=False, local_directory=self.local_dir_
)
self.client = Client(cluster)
self.n_workers = n_workers
def distribute(self, func, partitioned_chunks, kwargs):
"""
Calculates the features in a parallel fashion by distributing the map command to the dask workers on a local
machine
:param func: the function to send to each worker.
:type func: callable
:param partitioned_chunks: The list of data chunks - each element is again
a list of chunks - and should be processed by one worker.
:type partitioned_chunks: iterable
:param kwargs: parameters for the map function
:type kwargs: dict of string to parameter
:return: The result of the calculation as a list - each item should be the result of the application of func
to a single element.
"""
if isinstance(partitioned_chunks, Iterable):
# since dask 2.0.0 client map no longer accepts iterables
partitioned_chunks = list(partitioned_chunks)
result = self.client.gather(
self.client.map(partial(func, **kwargs), partitioned_chunks)
)
return [item for sublist in result for item in sublist]
def close(self):
"""
Closes the connection to the local Dask Scheduler
"""
self.client.close()
class ClusterDaskDistributor(IterableDistributorBaseClass):
"""
Distributor using a dask cluster, meaning that the calculation is spread over a cluster
"""
def __init__(self, address):
"""
Sets up a distributor that connects to a Dask Scheduler to distribute the calculation of the features
:param address: the ip address and port number of the Dask Scheduler
:type address: str
"""
from distributed import Client
self.client = Client(address=address)
def calculate_best_chunk_size(self, data_length):
"""
Uses the number of dask workers in the cluster (during execution time, meaning when you start the extraction)
to find the optimal chunk_size.
:param data_length: A length which defines how many calculations there need to be.
:type data_length: int
"""
n_workers = len(self.client.scheduler_info()["workers"])
chunk_size, extra = divmod(data_length, n_workers * 5)
if extra:
chunk_size += 1
return chunk_size
def distribute(self, func, partitioned_chunks, kwargs):
"""
Calculates the features in a parallel fashion by distributing the map command to the dask workers on a cluster
:param func: the function to send to each worker.
:type func: callable
:param partitioned_chunks: The list of data chunks - each element is again
a list of chunks - and should be processed by one worker.
:type partitioned_chunks: iterable
:param kwargs: parameters for the map function
:type kwargs: dict of string to parameter
:return: The result of the calculation as a list - each item should be the result of the application of func
to a single element.
"""
if isinstance(partitioned_chunks, Iterable):
# since dask 2.0.0 client map no longer accepts iterables
partitioned_chunks = list(partitioned_chunks)
result = self.client.gather(
self.client.map(partial(func, **kwargs), partitioned_chunks)
)
return [item for sublist in result for item in sublist]
def close(self):
"""
Closes the connection to the Dask Scheduler
"""
self.client.close()
class MultiprocessingDistributor(IterableDistributorBaseClass):
"""
Distributor using a multiprocessing Pool to calculate the jobs in parallel on the local machine.
"""
def __init__(
self,
n_workers,
disable_progressbar=False,
progressbar_title="Feature Extraction",
show_warnings=True,
):
"""
Creates a new MultiprocessingDistributor instance
:param n_workers: How many workers should the multiprocessing pool have?
:type n_workers: int
:param disable_progressbar: whether to show a progressbar or not.
:type disable_progressbar: bool
:param progressbar_title: the title of the progressbar
:type progressbar_title: basestring
:param show_warnings: whether to show warnings or not.
:type show_warnings: bool
"""
self.pool = Pool(
processes=n_workers,
initializer=initialize_warnings_in_workers,
initargs=(show_warnings,),
)
self.n_workers = n_workers
self.disable_progressbar = disable_progressbar
self.progressbar_title = progressbar_title
def distribute(self, func, partitioned_chunks, kwargs):
"""
Calculates the features in a parallel fashion by distributing the map command to a thread pool
:param func: the function to send to each worker.
:type func: callable
:param partitioned_chunks: The list of data chunks - each element is again
a list of chunks - and should be processed by one worker.
:type partitioned_chunks: iterable
:param kwargs: parameters for the map function
:type kwargs: dict of string to parameter
:return: The result of the calculation as a list - each item should be the result of the application of func
to a single element.
"""
return self.pool.imap_unordered(partial(func, **kwargs), partitioned_chunks)
def close(self):
"""
Collects the result from the workers and closes the thread pool.
"""
self.pool.close()
self.pool.terminate()
self.pool.join()
class ApplyDistributor(DistributorBaseClass):
def __init__(self, meta):
self.meta = meta
def map_reduce(
self,
map_function,
data,
function_kwargs=None,
chunk_size=None,
data_length=None,
):
return data.apply(map_function, meta=self.meta, **function_kwargs) | /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/tsfresh/utilities/distribution.py | 0.7413 | 0.485234 | distribution.py | pypi |
import ast
import numpy as np
def get_config_from_string(parts):
"""
Helper function to extract the configuration of a certain function from the column name.
The column name parts (split by "__") should be passed to this function. It will skip the
kind name and the function name and only use the parameter parts. These parts will be split up on "_"
into the parameter name and the parameter value. This value is transformed into a python object
(for example is "(1, 2, 3)" transformed into a tuple consisting of the ints 1, 2 and 3).
Returns None of no parameters are in the column name.
:param parts: The column name split up on "__"
:type parts: list
:return: a dictionary with all parameters, which are encoded in the column name.
:rtype: dict
"""
relevant_parts = parts[2:]
if not relevant_parts:
return
config_kwargs = [s.rsplit("_", 1)[0] for s in relevant_parts]
config_values = [s.rsplit("_", 1)[1] for s in relevant_parts]
dict_if_configs = {}
for key, value in zip(config_kwargs, config_values):
if value.lower() == "nan":
dict_if_configs[key] = np.NaN
elif value.lower() == "-inf":
dict_if_configs[key] = np.NINF
elif value.lower() == "inf":
dict_if_configs[key] = np.PINF
else:
dict_if_configs[key] = ast.literal_eval(value)
return dict_if_configs
def convert_to_output_format(param):
"""
Helper function to convert parameters to a valid string, that can be used in a column name.
Does the opposite which is used in the from_columns function.
The parameters are sorted by their name and written out in the form
<param name>_<param value>__<param name>_<param value>__ ...
If a <param_value> is a string, this method will wrap it with parenthesis ", so "<param_value>"
:param param: The dictionary of parameters to write out
:type param: dict
:return: The string of parsed parameters
:rtype: str
"""
def add_parenthesis_if_string_value(x):
if isinstance(x, str):
return '"' + str(x) + '"'
else:
return str(x)
return "__".join(
str(key) + "_" + add_parenthesis_if_string_value(param[key])
for key in sorted(param.keys())
) | /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/tsfresh/utilities/string_manipulation.py | 0.809427 | 0.556641 | string_manipulation.py | pypi |
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.exceptions import NotFittedError
from tsfresh.utilities.dataframe_functions import (
get_range_values_per_column,
impute_dataframe_range,
)
class PerColumnImputer(BaseEstimator, TransformerMixin):
"""
Sklearn-compatible estimator, for column-wise imputing DataFrames by replacing all ``NaNs`` and ``infs``
with with average/extreme values from the same columns. It is basically a wrapper around
:func:`~tsfresh.utilities.dataframe_functions.impute`.
Each occurring ``inf`` or ``NaN`` in the DataFrame is replaced by
* ``-inf`` -> ``min``
* ``+inf`` -> ``max``
* ``NaN`` -> ``median``
This estimator - as most of the sklearn estimators - works in a two step procedure. First, the ``.fit``
function is called where for each column the min, max and median are computed.
Secondly, the ``.transform`` function is called which replaces the occurances of ``NaNs`` and ``infs`` using
the column-wise computed min, max and median values.
"""
def __init__(
self,
col_to_NINF_repl_preset=None,
col_to_PINF_repl_preset=None,
col_to_NAN_repl_preset=None,
):
"""
Create a new PerColumnImputer instance, optionally with dictionaries containing replacements for
``NaNs`` and ``infs``.
:param col_to_NINF_repl: Dictionary mapping column names to ``-inf`` replacement values
:type col_to_NINF_repl: dict
:param col_to_PINF_repl: Dictionary mapping column names to ``+inf`` replacement values
:type col_to_PINF_repl: dict
:param col_to_NAN_repl: Dictionary mapping column names to ``NaN`` replacement values
:type col_to_NAN_repl: dict
"""
self._col_to_NINF_repl = None
self._col_to_PINF_repl = None
self._col_to_NAN_repl = None
self.col_to_NINF_repl_preset = col_to_NINF_repl_preset
self.col_to_PINF_repl_preset = col_to_PINF_repl_preset
self.col_to_NAN_repl_preset = col_to_NAN_repl_preset
def fit(self, X, y=None):
"""
Compute the min, max and median for all columns in the DataFrame. For more information,
please see the :func:`~tsfresh.utilities.dataframe_functions.get_range_values_per_column` function.
:param X: DataFrame to calculate min, max and median values on
:type X: pandas.DataFrame
:param y: Unneeded.
:type y: Any
:return: the estimator with the computed min, max and median values
:rtype: Imputer
"""
if not isinstance(X, pd.DataFrame):
X = pd.DataFrame(X)
col_to_max, col_to_min, col_to_median = get_range_values_per_column(X)
if self.col_to_NINF_repl_preset is not None:
if not set(X.columns) >= set(self.col_to_NINF_repl_preset.keys()):
raise ValueError(
"Preset dictionary 'col_to_NINF_repl_preset' contain more keys "
"than the column names in X"
)
col_to_min.update(self.col_to_NINF_repl_preset)
self._col_to_NINF_repl = col_to_min
if self.col_to_PINF_repl_preset is not None:
if not set(X.columns) >= set(self.col_to_PINF_repl_preset.keys()):
raise ValueError(
"Preset dictionary 'col_to_PINF_repl_preset' contain more keys "
"than the column names in X"
)
col_to_max.update(self.col_to_PINF_repl_preset)
self._col_to_PINF_repl = col_to_max
if self.col_to_NAN_repl_preset is not None:
if not set(X.columns) >= set(self.col_to_NAN_repl_preset.keys()):
raise ValueError(
"Preset dictionary 'col_to_NAN_repl_preset' contain more keys "
"than the column names in X"
)
col_to_median.update(self.col_to_NAN_repl_preset)
self._col_to_NAN_repl = col_to_median
return self
def transform(self, X):
"""
Column-wise replace all ``NaNs``, ``-inf`` and ``+inf`` in the DataFrame `X` with average/extreme
values from the provided dictionaries.
:param X: DataFrame to impute
:type X: pandas.DataFrame
:return: imputed DataFrame
:rtype: pandas.DataFrame
:raise RuntimeError: if the replacement dictionaries are still of None type.
This can happen if the transformer was not fitted.
"""
if not isinstance(X, pd.DataFrame):
X = pd.DataFrame(X)
if (
self._col_to_NINF_repl is None
or self._col_to_PINF_repl is None
or self._col_to_NAN_repl is None
):
raise NotFittedError("PerColumnImputer is not fitted")
X = impute_dataframe_range(
X, self._col_to_PINF_repl, self._col_to_NINF_repl, self._col_to_NAN_repl
)
return X | /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/tsfresh/transformers/per_column_imputer.py | 0.913698 | 0.612252 | per_column_imputer.py | pypi |
from functools import partial
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from tsfresh import defaults
from tsfresh.feature_extraction.settings import from_columns
from tsfresh.transformers.feature_augmenter import FeatureAugmenter
from tsfresh.transformers.feature_selector import FeatureSelector
from tsfresh.utilities.dataframe_functions import (
get_range_values_per_column,
impute_dataframe_range,
)
# Pro: It offers more control
# Contra: The Transformer is more than an Augmenter
class RelevantFeatureAugmenter(BaseEstimator, TransformerMixin):
"""
Sklearn-compatible estimator to calculate relevant features out of a time series and add them to a data sample.
As many other sklearn estimators, this estimator works in two steps:
In the fit phase, all possible time series features are calculated using the time series, that is set by the
set_timeseries_container function (if the features are not manually changed by handing in a
feature_extraction_settings object). Then, their significance and relevance to the target is computed using
statistical methods and only the relevant ones are selected using the Benjamini Hochberg procedure. These features
are stored internally.
In the transform step, the information on which features are relevant from the fit step is used and those features
are extracted from the time series. These extracted features are then added to the input data sample.
This estimator is a wrapper around most of the functionality in the tsfresh package. For more information on the
subtasks, please refer to the single modules and functions, which are:
* Settings for the feature extraction: :class:`~tsfresh.feature_extraction.settings.ComprehensiveFCParameters`
* Feature extraction method: :func:`~tsfresh.feature_extraction.extraction.extract_features`
* Extracted features: :mod:`~tsfresh.feature_extraction.feature_calculators`
* Feature selection: :func:`~tsfresh.feature_selection.feature_selector.check_fs_sig_bh`
This estimator works analogue to the :class:`~tsfresh.transformers.feature_augmenter.FeatureAugmenter` with
the difference that this estimator does only output and calculate the relevant features,
whereas the other outputs all features.
Also for this estimator, two datasets play a crucial role:
1. the time series container with the timeseries data. This container (for the format see
:mod:`~tsfresh.feature_extraction.extraction`) contains the data which is used for calculating the
features. It must be groupable by ids which are used to identify which feature should be attached to which row
in the second dataframe:
2. the input data, where the features will be added to.
Imagine the following situation: You want to classify 10 different financial shares and you have their development
in the last year as a time series. You would then start by creating features from the metainformation of the
shares, e.g. how long they were on the market etc. and filling up a table - the features of one stock in one row.
>>> # Fill in the information of the stocks and the target
>>> X_train, X_test, y_train = pd.DataFrame(), pd.DataFrame(), pd.Series()
You can then extract all the relevant features from the time development of the shares, by using this estimator:
>>> train_time_series, test_time_series = read_in_timeseries() # get the development of the shares
>>> from tsfresh.transformers import RelevantFeatureAugmenter
>>> augmenter = RelevantFeatureAugmenter()
>>> augmenter.set_timeseries_container(train_time_series)
>>> augmenter.fit(X_train, y_train)
>>> augmenter.set_timeseries_container(test_time_series)
>>> X_test_with_features = augmenter.transform(X_test)
X_test_with_features will then contain the same information as X_test (with all the meta information you have
probably added) plus some relevant time series features calculated on the time series you handed in.
Please keep in mind that the time series you hand in before fit or transform must contain data for the rows that are
present in X.
If your set filter_only_tsfresh_features to True, your manually-created features that were present in X_train (or
X_test) before using this estimator are not touched. Otherwise, also those features are evaluated and may be
rejected from the data sample, because they are irrelevant.
For a description what the parameters column_id, column_sort, column_kind and column_value mean, please see
:mod:`~tsfresh.feature_extraction.extraction`.
You can control the feature extraction in the fit step (the feature extraction in the transform step is done
automatically) as well as the feature selection in the fit step by handing in settings.
However, the default settings which are used if you pass no flags are often quite sensible.
"""
def __init__(
self,
filter_only_tsfresh_features=True,
default_fc_parameters=None,
kind_to_fc_parameters=None,
column_id=None,
column_sort=None,
column_kind=None,
column_value=None,
timeseries_container=None,
chunksize=defaults.CHUNKSIZE,
n_jobs=defaults.N_PROCESSES,
show_warnings=defaults.SHOW_WARNINGS,
disable_progressbar=defaults.DISABLE_PROGRESSBAR,
profile=defaults.PROFILING,
profiling_filename=defaults.PROFILING_FILENAME,
profiling_sorting=defaults.PROFILING_SORTING,
test_for_binary_target_binary_feature=defaults.TEST_FOR_BINARY_TARGET_BINARY_FEATURE,
test_for_binary_target_real_feature=defaults.TEST_FOR_BINARY_TARGET_REAL_FEATURE,
test_for_real_target_binary_feature=defaults.TEST_FOR_REAL_TARGET_BINARY_FEATURE,
test_for_real_target_real_feature=defaults.TEST_FOR_REAL_TARGET_REAL_FEATURE,
fdr_level=defaults.FDR_LEVEL,
hypotheses_independent=defaults.HYPOTHESES_INDEPENDENT,
ml_task="auto",
multiclass=False,
n_significant=1,
multiclass_p_values="min",
):
"""
Create a new RelevantFeatureAugmenter instance.
:param filter_only_tsfresh_features: Whether to touch the manually-created features during feature selection or
not.
:type filter_only_tsfresh_features: bool
:param default_fc_parameters: mapping from feature calculator names to parameters. Only those names
which are keys in this dict will be calculated. See the class:`ComprehensiveFCParameters` for
more information.
:type default_fc_parameters: dict
:param kind_to_fc_parameters: mapping from kind names to objects of the same type as the ones for
default_fc_parameters. If you put a kind as a key here, the fc_parameters
object (which is the value), will be used instead of the default_fc_parameters. This means that kinds,
for which kind_of_fc_parameters doe not have any entries, will be ignored by the feature selection.
:type kind_to_fc_parameters: dict
:param column_id: The column with the id. See :mod:`~tsfresh.feature_extraction.extraction`.
:type column_id: basestring
:param column_sort: The column with the sort data. See :mod:`~tsfresh.feature_extraction.extraction`.
:type column_sort: basestring
:param column_kind: The column with the kind data. See :mod:`~tsfresh.feature_extraction.extraction`.
:type column_kind: basestring
:param column_value: The column with the values. See :mod:`~tsfresh.feature_extraction.extraction`.
:type column_value: basestring
:param chunksize: The size of one chunk that is submitted to the worker
process for the parallelisation. Where one chunk is defined as a
singular time series for one id and one kind. If you set the chunksize
to 10, then it means that one task is to calculate all features for 10
time series. If it is set it to None, depending on distributor,
heuristics are used to find the optimal chunksize. If you get out of
memory exceptions, you can try it with the dask distributor and a
smaller chunksize.
:type chunksize: None or int
:param n_jobs: The number of processes to use for parallelization. If zero, no parallelization is used.
:type n_jobs: int
:param show_warnings: Show warnings during the feature extraction (needed for debugging of calculators).
:type show_warnings: bool
:param disable_progressbar: Do not show a progressbar while doing the calculation.
:type disable_progressbar: bool
:param profile: Turn on profiling during feature extraction
:type profile: bool
:param profiling_sorting: How to sort the profiling results (see the documentation of the profiling package for
more information)
:type profiling_sorting: basestring
:param profiling_filename: Where to save the profiling results.
:type profiling_filename: basestring
:param test_for_binary_target_binary_feature: Which test to be used for binary target, binary feature
(currently unused)
:type test_for_binary_target_binary_feature: str
:param test_for_binary_target_real_feature: Which test to be used for binary target, real feature
:type test_for_binary_target_real_feature: str
:param test_for_real_target_binary_feature: Which test to be used for real target, binary feature
(currently unused)
:type test_for_real_target_binary_feature: str
:param test_for_real_target_real_feature: Which test to be used for real target, real feature (currently unused)
:type test_for_real_target_real_feature: str
:param fdr_level: The FDR level that should be respected, this is the theoretical expected percentage
of irrelevant features among all created features.
:type fdr_level: float
:param hypotheses_independent: Can the significance of the features be assumed to be independent?
Normally, this should be set to False as the features are never
independent (e.g. mean and median)
:type hypotheses_independent: bool
:param ml_task: The intended machine learning task. Either `'classification'`, `'regression'` or `'auto'`.
Defaults to `'auto'`, meaning the intended task is inferred from `y`.
If `y` has a boolean, integer or object dtype, the task is assumed to be classification,
else regression.
:type ml_task: str
:param multiclass: Whether the problem is multiclass classification. This modifies the way in which features
are selected. Multiclass requires the features to be statistically significant for
predicting n_significant classes.
:type multiclass: bool
:param n_significant: The number of classes for which features should be statistically significant predictors
to be regarded as 'relevant'
:type n_significant: int
:param multiclass_p_values: The desired method for choosing how to display multiclass p-values for each feature.
Either `'avg'`, `'max'`, `'min'`, `'all'`. Defaults to `'min'`, meaning the p-value
with the highest significance is chosen. When set to `'all'`, the attributes
`self.feature_importances_` and `self.p_values` are of type pandas.DataFrame, where
each column corresponds to a target class.
:type multiclass_p_values: str
"""
self.filter_only_tsfresh_features = filter_only_tsfresh_features
self.default_fc_parameters = default_fc_parameters
self.kind_to_fc_parameters = kind_to_fc_parameters
self.column_id = column_id
self.column_sort = column_sort
self.column_kind = column_kind
self.column_value = column_value
self.timeseries_container = timeseries_container
self.chunksize = chunksize
self.n_jobs = n_jobs
self.show_warnings = show_warnings
self.disable_progressbar = disable_progressbar
self.profile = profile
self.profiling_filename = profiling_filename
self.profiling_sorting = profiling_sorting
self.test_for_binary_target_binary_feature = (
test_for_binary_target_binary_feature
)
self.test_for_binary_target_real_feature = test_for_binary_target_real_feature
self.test_for_real_target_binary_feature = test_for_real_target_binary_feature
self.test_for_real_target_real_feature = test_for_real_target_real_feature
self.fdr_level = fdr_level
self.hypotheses_independent = hypotheses_independent
self.ml_task = ml_task
self.multiclass = multiclass
self.n_significant = n_significant
self.multiclass_p_values = multiclass_p_values
# attributes
self.feature_extractor = None
self.feature_selector = None
def set_timeseries_container(self, timeseries_container):
"""
Set the timeseries, with which the features will be calculated. For a format of the time series container,
please refer to :mod:`~tsfresh.feature_extraction.extraction`. The timeseries must contain the same indices
as the later DataFrame, to which the features will be added (the one you will pass to :func:`~transform` or
:func:`~fit`). You can call this function as often as you like, to change the timeseries later
(e.g. if you want to extract for different ids).
:param timeseries_container: The timeseries as a pandas.DataFrame or a dict. See
:mod:`~tsfresh.feature_extraction.extraction` for the format.
:type timeseries_container: pandas.DataFrame or dict
:return: None
:rtype: None
"""
self.timeseries_container = timeseries_container
def fit(self, X, y):
"""
Use the given timeseries from :func:`~set_timeseries_container` and calculate features from it and add them
to the data sample X (which can contain other manually-designed features).
Then determine which of the features of X are relevant for the given target y.
Store those relevant features internally to only extract them in the transform step.
If filter_only_tsfresh_features is True, only reject newly, automatically added features. If it is False,
also look at the features that are already present in the DataFrame.
:param X: The data frame without the time series features. The index rows should be present in the timeseries
and in the target vector.
:type X: pandas.DataFrame or numpy.array
:param y: The target vector to define, which features are relevant.
:type y: pandas.Series or numpy.array
:return: the fitted estimator with the information, which features are relevant.
:rtype: RelevantFeatureAugmenter
"""
self._fit_and_augment(X, y)
return self
def transform(self, X):
"""
After the fit step, it is known which features are relevant, Only extract those from the time series handed in
with the function :func:`~set_timeseries_container`.
If filter_only_tsfresh_features is False, also delete the irrelevant,
already present features in the data frame.
:param X: the data sample to add the relevant (and delete the irrelevant) features to.
:type X: pandas.DataFrame or numpy.array
:return: a data sample with the same information as X, but with added relevant time series features and
deleted irrelevant information (only if filter_only_tsfresh_features is False).
:rtype: pandas.DataFrame
"""
if self.timeseries_container is None:
raise RuntimeError(
"You have to provide a time series using the set_timeseries_container function before."
)
if self.feature_selector is None:
raise RuntimeError("You have to call fit before calling transform.")
if self.feature_selector.relevant_features is None:
raise RuntimeError("You have to call fit before calling transform.")
self.feature_extractor.set_timeseries_container(self.timeseries_container)
relevant_time_series_features = set(
self.feature_selector.relevant_features
) - set(pd.DataFrame(X).columns)
relevant_extraction_settings = from_columns(relevant_time_series_features)
# Set imputing strategy
impute_function = partial(
impute_dataframe_range,
col_to_max=self.col_to_max,
col_to_min=self.col_to_min,
col_to_median=self.col_to_median,
)
relevant_feature_extractor = FeatureAugmenter(
kind_to_fc_parameters=relevant_extraction_settings,
default_fc_parameters={},
column_id=self.feature_extractor.column_id,
column_sort=self.feature_extractor.column_sort,
column_kind=self.feature_extractor.column_kind,
column_value=self.feature_extractor.column_value,
chunksize=self.feature_extractor.chunksize,
n_jobs=self.feature_extractor.n_jobs,
show_warnings=self.feature_extractor.show_warnings,
disable_progressbar=self.feature_extractor.disable_progressbar,
impute_function=impute_function,
profile=self.feature_extractor.profile,
profiling_filename=self.feature_extractor.profiling_filename,
profiling_sorting=self.feature_extractor.profiling_sorting,
)
relevant_feature_extractor.set_timeseries_container(
self.feature_extractor.timeseries_container
)
X_augmented = relevant_feature_extractor.transform(X)
if self.filter_only_tsfresh_features:
return X_augmented.copy().loc[
:, self.feature_selector.relevant_features + X.columns.tolist()
]
else:
return X_augmented.copy().loc[:, self.feature_selector.relevant_features]
def fit_transform(self, X, y):
"""
Equivalent to :func:`~fit` followed by :func:`~transform`; however, this is faster than performing those steps
separately, because it avoids re-extracting relevant features for training data.
:param X: The data frame without the time series features. The index rows should be present in the timeseries
and in the target vector.
:type X: pandas.DataFrame or numpy.array
:param y: The target vector to define, which features are relevant.
:type y: pandas.Series or numpy.array
:return: a data sample with the same information as X, but with added relevant time series features and
deleted irrelevant information (only if filter_only_tsfresh_features is False).
:rtype: pandas.DataFrame
"""
X_augmented = self._fit_and_augment(X, y)
selected_features = X_augmented.copy().loc[
:, self.feature_selector.relevant_features
]
if self.filter_only_tsfresh_features:
selected_features = pd.merge(
selected_features, X, left_index=True, right_index=True, how="left"
)
return selected_features
def _fit_and_augment(self, X, y):
"""
Helper for the :func:`~fit` and :func:`~fit_transform` functions, which does most of the work described in
:func:`~fit`.
:param X: The data frame without the time series features. The index rows should be present in the timeseries
and in the target vector.
:type X: pandas.DataFrame or numpy.array
:param y: The target vector to define, which features are relevant.
:type y: pandas.Series or numpy.array
:return: a data sample with the extraced time series features. If filter_only_tsfresh_features is False
the data sample will also include the information in X.
:rtype: pandas.DataFrame
"""
if self.timeseries_container is None:
raise RuntimeError(
"You have to provide a time series using the set_timeseries_container function before."
)
self.feature_extractor = FeatureAugmenter(
default_fc_parameters=self.default_fc_parameters,
kind_to_fc_parameters=self.kind_to_fc_parameters,
column_id=self.column_id,
column_sort=self.column_sort,
column_kind=self.column_kind,
column_value=self.column_value,
timeseries_container=self.timeseries_container,
chunksize=self.chunksize,
n_jobs=self.n_jobs,
show_warnings=self.show_warnings,
disable_progressbar=self.disable_progressbar,
profile=self.profile,
profiling_filename=self.profiling_filename,
profiling_sorting=self.profiling_sorting,
)
self.feature_selector = FeatureSelector(
test_for_binary_target_binary_feature=self.test_for_binary_target_binary_feature,
test_for_binary_target_real_feature=self.test_for_binary_target_real_feature,
test_for_real_target_binary_feature=self.test_for_real_target_binary_feature,
test_for_real_target_real_feature=self.test_for_real_target_real_feature,
fdr_level=self.fdr_level,
hypotheses_independent=self.hypotheses_independent,
n_jobs=self.n_jobs,
chunksize=self.chunksize,
ml_task=self.ml_task,
multiclass=self.multiclass,
n_significant=self.n_significant,
multiclass_p_values=self.multiclass_p_values,
)
if self.filter_only_tsfresh_features:
# Do not merge the time series features to the old features
X_tmp = pd.DataFrame(index=X.index)
else:
X_tmp = X
X_augmented = self.feature_extractor.transform(X_tmp)
(
self.col_to_max,
self.col_to_min,
self.col_to_median,
) = get_range_values_per_column(X_augmented)
X_augmented = impute_dataframe_range(
X_augmented,
col_to_max=self.col_to_max,
col_to_median=self.col_to_median,
col_to_min=self.col_to_min,
)
self.feature_selector.fit(X_augmented, y)
return X_augmented | /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/tsfresh/transformers/relevant_feature_augmenter.py | 0.880964 | 0.610207 | relevant_feature_augmenter.py | pypi |
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
import tsfresh.defaults
from tsfresh.feature_extraction import extract_features
from tsfresh.utilities.dataframe_functions import restrict_input_to_index
class FeatureAugmenter(BaseEstimator, TransformerMixin):
"""
Sklearn-compatible estimator, for calculating and adding many features calculated from a given time series
to the data. It is basically a wrapper around :func:`~tsfresh.feature_extraction.extract_features`.
The features include basic ones like min, max or median, and advanced features like fourier
transformations or statistical tests. For a list of all possible features, see the module
:mod:`~tsfresh.feature_extraction.feature_calculators`. The column name of each added feature contains the name
of the function of that module, which was used for the calculation.
For this estimator, two datasets play a crucial role:
1. the time series container with the timeseries data. This container (for the format see :ref:`data-formats-label`)
contains the data which is used for calculating the
features. It must be groupable by ids which are used to identify which feature should be attached to which row
in the second dataframe.
2. the input data X, where the features will be added to. Its rows are identifies by the index and each index in
X must be present as an id in the time series container.
Imagine the following situation: You want to classify 10 different financial shares and you have their development
in the last year as a time series. You would then start by creating features from the metainformation of the
shares, e.g. how long they were on the market etc. and filling up a table - the features of one stock in one row.
This is the input array X, which each row identified by e.g. the stock name as an index.
>>> df = pandas.DataFrame(index=["AAA", "BBB", ...])
>>> # Fill in the information of the stocks
>>> df["started_since_days"] = ... # add a feature
You can then extract all the features from the time development of the shares, by using this estimator.
The time series container must include a column of ids, which are the same as the index of X.
>>> time_series = read_in_timeseries() # get the development of the shares
>>> from tsfresh.transformers import FeatureAugmenter
>>> augmenter = FeatureAugmenter(column_id="id")
>>> augmenter.set_timeseries_container(time_series)
>>> df_with_time_series_features = augmenter.transform(df)
The settings for the feature calculation can be controlled with the settings object.
If you pass ``None``, the default settings are used.
Please refer to :class:`~tsfresh.feature_extraction.settings.ComprehensiveFCParameters` for
more information.
This estimator does not select the relevant features, but calculates and adds all of them to the DataFrame. See the
:class:`~tsfresh.transformers.relevant_feature_augmenter.RelevantFeatureAugmenter` for calculating and selecting
features.
For a description what the parameters column_id, column_sort, column_kind and column_value mean, please see
:mod:`~tsfresh.feature_extraction.extraction`.
"""
def __init__(
self,
default_fc_parameters=None,
kind_to_fc_parameters=None,
column_id=None,
column_sort=None,
column_kind=None,
column_value=None,
timeseries_container=None,
chunksize=tsfresh.defaults.CHUNKSIZE,
n_jobs=tsfresh.defaults.N_PROCESSES,
show_warnings=tsfresh.defaults.SHOW_WARNINGS,
disable_progressbar=tsfresh.defaults.DISABLE_PROGRESSBAR,
impute_function=tsfresh.defaults.IMPUTE_FUNCTION,
profile=tsfresh.defaults.PROFILING,
profiling_filename=tsfresh.defaults.PROFILING_FILENAME,
profiling_sorting=tsfresh.defaults.PROFILING_SORTING,
):
"""
Create a new FeatureAugmenter instance.
:param default_fc_parameters: mapping from feature calculator names to parameters. Only those names
which are keys in this dict will be calculated. See the class:`ComprehensiveFCParameters` for
more information.
:type default_fc_parameters: dict
:param kind_to_fc_parameters: mapping from kind names to objects of the same type as the ones for
default_fc_parameters. If you put a kind as a key here, the fc_parameters
object (which is the value), will be used instead of the default_fc_parameters. This means that kinds,
for which kind_of_fc_parameters doe not have any entries, will be ignored by the feature selection.
:type kind_to_fc_parameters: dict
:param column_id: The column with the id. See :mod:`~tsfresh.feature_extraction.extraction`.
:type column_id: basestring
:param column_sort: The column with the sort data. See :mod:`~tsfresh.feature_extraction.extraction`.
:type column_sort: basestring
:param column_kind: The column with the kind data. See :mod:`~tsfresh.feature_extraction.extraction`.
:type column_kind: basestring
:param column_value: The column with the values. See :mod:`~tsfresh.feature_extraction.extraction`.
:type column_value: basestring
:param n_jobs: The number of processes to use for parallelization. If zero, no parallelization is used.
:type n_jobs: int
:param chunksize: The size of one chunk that is submitted to the worker
process for the parallelisation. Where one chunk is defined as a
singular time series for one id and one kind. If you set the chunksize
to 10, then it means that one task is to calculate all features for 10
time series. If it is set it to None, depending on distributor,
heuristics are used to find the optimal chunksize. If you get out of
memory exceptions, you can try it with the dask distributor and a
smaller chunksize.
:type chunksize: None or int
:param show_warnings: Show warnings during the feature extraction (needed for debugging of calculators).
:type show_warnings: bool
:param disable_progressbar: Do not show a progressbar while doing the calculation.
:type disable_progressbar: bool
:param impute_function: None, if no imputing should happen or the function to call for imputing
the result dataframe. Imputing will never happen on the input data.
:type impute_function: None or function
:param profile: Turn on profiling during feature extraction
:type profile: bool
:param profiling_sorting: How to sort the profiling results (see the documentation of the profiling package for
more information)
:type profiling_sorting: basestring
:param profiling_filename: Where to save the profiling results.
:type profiling_filename: basestring
"""
self.default_fc_parameters = default_fc_parameters
self.kind_to_fc_parameters = kind_to_fc_parameters
self.column_id = column_id
self.column_sort = column_sort
self.column_kind = column_kind
self.column_value = column_value
self.n_jobs = n_jobs
self.chunksize = chunksize
self.show_warnings = show_warnings
self.disable_progressbar = disable_progressbar
self.impute_function = impute_function
self.profile = profile
self.profiling_filename = profiling_filename
self.profiling_sorting = profiling_sorting
self.timeseries_container = timeseries_container
def set_timeseries_container(self, timeseries_container):
"""
Set the timeseries, with which the features will be calculated. For a format of the time series container,
please refer to :mod:`~tsfresh.feature_extraction.extraction`. The timeseries must contain the same indices
as the later DataFrame, to which the features will be added (the one you will pass to :func:`~transform`). You
can call this function as often as you like, to change the timeseries later (e.g. if you want to extract for
different ids).
:param timeseries_container: The timeseries as a pandas.DataFrame or a dict. See
:mod:`~tsfresh.feature_extraction.extraction` for the format.
:type timeseries_container: pandas.DataFrame or dict
:return: None
:rtype: None
"""
self.timeseries_container = timeseries_container
def fit(self, X=None, y=None):
"""
The fit function is not needed for this estimator. It just does nothing and is here for compatibility reasons.
:param X: Unneeded.
:type X: Any
:param y: Unneeded.
:type y: Any
:return: The estimator instance itself
:rtype: FeatureAugmenter
"""
return self
def transform(self, X):
"""
Add the features calculated using the timeseries_container and add them to the corresponding rows in the input
pandas.DataFrame X.
To save some computing time, you should only include those time serieses in the container, that you
need. You can set the timeseries container with the method :func:`set_timeseries_container`.
:param X: the DataFrame to which the calculated timeseries features will be added. This is *not* the
dataframe with the timeseries itself.
:type X: pandas.DataFrame
:return: The input DataFrame, but with added features.
:rtype: pandas.DataFrame
"""
if self.timeseries_container is None:
raise RuntimeError(
"You have to provide a time series using the set_timeseries_container function before."
)
# Extract only features for the IDs in X.index
timeseries_container_X = restrict_input_to_index(
self.timeseries_container, self.column_id, X.index
)
extracted_features = extract_features(
timeseries_container_X,
default_fc_parameters=self.default_fc_parameters,
kind_to_fc_parameters=self.kind_to_fc_parameters,
column_id=self.column_id,
column_sort=self.column_sort,
column_kind=self.column_kind,
column_value=self.column_value,
chunksize=self.chunksize,
n_jobs=self.n_jobs,
show_warnings=self.show_warnings,
disable_progressbar=self.disable_progressbar,
impute_function=self.impute_function,
profile=self.profile,
profiling_filename=self.profiling_filename,
profiling_sorting=self.profiling_sorting,
)
X = pd.merge(
X, extracted_features, left_index=True, right_index=True, how="left"
)
return X | /rtm-tsfresh-1.1.102.tar.gz/rtm-tsfresh-1.1.102/tsfresh/transformers/feature_augmenter.py | 0.882504 | 0.614625 | feature_augmenter.py | pypi |
__version__ = '0.1.4'
import sounddevice as _sd
from pa_ringbuffer import init as _init_ringbuffer
from _rtmixer import ffi as _ffi, lib as _lib
RingBuffer = _init_ringbuffer(_ffi, _lib)
# Get constants from C library
for _k, _v in vars(_lib).items():
if _k.isupper():
globals()[_k] = _v
class _Base(_sd._StreamBase):
"""Base class for Mixer et al."""
def __init__(self, kind, qsize=16, **kwargs):
callback = _ffi.addressof(_lib, 'callback')
self._action_q = RingBuffer(_ffi.sizeof('struct action*'), qsize)
self._result_q = RingBuffer(_ffi.sizeof('struct action*'), qsize)
self._state = _ffi.new('struct state*', dict(
input_channels=0,
output_channels=0,
samplerate=0,
action_q=self._action_q._ptr,
result_q=self._result_q._ptr,
actions=_ffi.NULL,
))
_sd._StreamBase.__init__(
self, kind=kind, dtype='float32',
callback=callback, userdata=self._state, **kwargs)
self._state.samplerate = self.samplerate
self._actions = {}
self._temp_action_ptr = _ffi.new('struct action**')
@property
def actions(self):
"""The set of active "actions"."""
self._drain_result_q()
return self._actions.keys()
@property
def stats(self):
"""Get over-/underflow statistics from an *inactive* stream.
To get statistics from an :attr:`~sounddevice.Stream.active`
stream, use `fetch_and_reset_stats()`.
"""
if self.active:
raise RuntimeError('Accessing .stats on an active stream')
return _ffi.new('struct stats*', self._state.stats)
def cancel(self, action, time=0, allow_belated=True):
"""Initiate stopping a running action.
This creates another action that is sent to the callback in
order to stop the given *action*.
This function typically returns before the *action* is actually
stopped. Use `wait()` (on either one of the two actions) to
wait until it's done.
"""
cancel_action = _ffi.new('struct action*', dict(
type=CANCEL,
actual_time=-1.0 if allow_belated else 0.0,
requested_time=time,
action=action,
))
self._enqueue(cancel_action)
return cancel_action
def fetch_and_reset_stats(self, time=0, allow_belated=True):
"""Fetch and reset over-/underflow statistics of the stream.
The statistics will be available in the ``stats`` field of the
returned action.
"""
action = _ffi.new('struct action*', dict(
type=FETCH_AND_RESET_STATS,
actual_time=-1.0 if allow_belated else 0.0,
requested_time=time,
))
self._enqueue(action)
return action
def wait(self, action=None, sleeptime=10):
"""Wait for *action* to be finished.
Between repeatedly checking if the action is finished, this
waits for *sleeptime* milliseconds.
If no *action* is given, this waits for all actions.
"""
if action is None:
while self.actions:
_sd.sleep(sleeptime)
else:
while action in self.actions:
_sd.sleep(sleeptime)
def _check_channels(self, channels, kind):
"""Check if number of channels or mapping was given."""
assert kind in ('input', 'output')
try:
channels, mapping = len(channels), channels
except TypeError:
mapping = tuple(range(1, channels + 1))
max_channels = _sd._split(self.channels)[kind == 'output']
if max(mapping) > max_channels:
raise ValueError('Channel number too large')
if min(mapping) < 1:
raise ValueError('Channel numbers start with 1')
return channels, mapping
def _enqueue(self, action, keep_alive=None):
self._drain_result_q()
self._temp_action_ptr[0] = action
ret = self._action_q.write(self._temp_action_ptr)
if ret != 1:
raise RuntimeError('Action queue is full')
assert action not in self._actions
self._actions[action] = keep_alive
def _drain_result_q(self):
"""Get actions from the result queue and discard them."""
while self._result_q.readinto(self._temp_action_ptr):
try:
del self._actions[self._temp_action_ptr[0]]
except KeyError:
assert False
class Mixer(_Base):
"""PortAudio output stream for realtime mixing.
Takes the same keyword arguments as `sounddevice.OutputStream`,
except *callback* (a callback function implemented in C is used
internally) and *dtype* (which is always ``'float32'``).
Uses default values from `sounddevice.default` (except *dtype*,
which is always ``'float32'``).
Has the same methods and attributes as `sounddevice.OutputStream`
(except :meth:`~sounddevice.Stream.write` and
:attr:`~sounddevice.Stream.write_available`), plus the following:
"""
def __init__(self, **kwargs):
_Base.__init__(self, kind='output', **kwargs)
self._state.output_channels = self.channels
def play_buffer(self, buffer, channels, start=0, allow_belated=True):
"""Send a buffer to the callback to be played back.
After calling this, the *buffer* must not be written to anymore.
"""
channels, mapping = self._check_channels(channels, 'output')
buffer = _ffi.from_buffer(buffer)
_, samplesize = _sd._split(self.samplesize)
action = _ffi.new('struct action*', dict(
type=PLAY_BUFFER,
actual_time=-1.0 if allow_belated else 0.0,
requested_time=start,
buffer=_ffi.cast('float*', buffer),
total_frames=len(buffer) // channels // samplesize,
channels=channels,
mapping=mapping,
))
self._enqueue(action, keep_alive=buffer)
return action
def play_ringbuffer(self, ringbuffer, channels=None, start=0,
allow_belated=True):
"""Send a `RingBuffer` to the callback to be played back.
By default, the number of channels is obtained from the ring
buffer's :attr:`~RingBuffer.elementsize`.
"""
_, samplesize = _sd._split(self.samplesize)
if channels is None:
channels = ringbuffer.elementsize // samplesize
channels, mapping = self._check_channels(channels, 'output')
if ringbuffer.elementsize != samplesize * channels:
raise ValueError('Incompatible elementsize')
action = _ffi.new('struct action*', dict(
type=PLAY_RINGBUFFER,
actual_time=-1.0 if allow_belated else 0.0,
requested_time=start,
ringbuffer=ringbuffer._ptr,
total_frames=ULONG_MAX,
channels=channels,
mapping=mapping,
))
self._enqueue(action, keep_alive=ringbuffer)
return action
class Recorder(_Base):
"""PortAudio input stream for realtime recording.
Takes the same keyword arguments as `sounddevice.InputStream`,
except *callback* (a callback function implemented in C is used
internally) and *dtype* (which is always ``'float32'``).
Uses default values from `sounddevice.default` (except *dtype*,
which is always ``'float32'``).
Has the same methods and attributes as `Mixer`, except that
`play_buffer()` and `play_ringbuffer()` are replaced by:
"""
def __init__(self, **kwargs):
_Base.__init__(self, kind='input', **kwargs)
self._state.input_channels = self.channels
def record_buffer(self, buffer, channels, start=0, allow_belated=True):
"""Send a buffer to the callback to be recorded into.
"""
channels, mapping = self._check_channels(channels, 'input')
buffer = _ffi.from_buffer(buffer)
samplesize, _ = _sd._split(self.samplesize)
action = _ffi.new('struct action*', dict(
type=RECORD_BUFFER,
actual_time=-1.0 if allow_belated else 0.0,
requested_time=start,
buffer=_ffi.cast('float*', buffer),
total_frames=len(buffer) // channels // samplesize,
channels=channels,
mapping=mapping,
))
self._enqueue(action, keep_alive=buffer)
return action
def record_ringbuffer(self, ringbuffer, channels=None, start=0,
allow_belated=True):
"""Send a `RingBuffer` to the callback to be recorded into.
By default, the number of channels is obtained from the ring
buffer's :attr:`~RingBuffer.elementsize`.
"""
samplesize, _ = _sd._split(self.samplesize)
if channels is None:
channels = ringbuffer.elementsize // samplesize
channels, mapping = self._check_channels(channels, 'input')
if ringbuffer.elementsize != samplesize * channels:
raise ValueError('Incompatible elementsize')
action = _ffi.new('struct action*', dict(
type=RECORD_RINGBUFFER,
actual_time=-1.0 if allow_belated else 0.0,
requested_time=start,
ringbuffer=ringbuffer._ptr,
total_frames=ULONG_MAX,
channels=channels,
mapping=mapping,
))
self._enqueue(action, keep_alive=ringbuffer)
return action
class MixerAndRecorder(Mixer, Recorder):
"""PortAudio stream for realtime mixing and recording.
Takes the same keyword arguments as `sounddevice.Stream`, except
*callback* (a callback function implemented in C is used internally)
and *dtype* (which is always ``'float32'``).
Uses default values from `sounddevice.default` (except *dtype*,
which is always ``'float32'``).
Inherits all methods and attributes from `Mixer` and `Recorder`.
"""
def __init__(self, **kwargs):
_Base.__init__(self, kind='duplex', **kwargs)
self._state.input_channels = self.channels[0]
self._state.output_channels = self.channels[1] | /rtmixer-0.1.4-cp310-cp310-win_amd64.whl/rtmixer.py | 0.789437 | 0.19216 | rtmixer.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /rtml_distributions-1.0.tar.gz/rtml_distributions-1.0/rtml_distributions/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
import random
from .connection_gene import ConnectionGene
from .node_gene import NodeGene, Type
from .counter import Counter
def random_bool():
return random.choice((True, False))
class Genome:
nodes = {}
connections = {}
hidden_nodes = []
def addNode(self, node: NodeGene):
self.nodes[node.ID] = node
def remNode(self, node: NodeGene):
del self.nodes[node.ID]
for con in self.connections.keys():
connection = self.connections[con]
if node.ID == connection.inNode or node.ID == connection.outNode:
self.connections[con].disable()
def addConnection(self, connection: ConnectionGene):
if connection in self.connections.values():
self.connections[connection.innovation].enable()
if not creates_loop(self.connections, connection):
self.connections[connection.innovation] = connection
def remConnection(self, connection: ConnectionGene):
self.connections[connection.innovation].disable()
def addInput(self, counter):
self.addNode(NodeGene(Type.INPUT, counter.getNumber()))
def addHidden(self, counter):
node = NodeGene(Type.HIDDEN, counter.getNumber())
self.addNode(node)
self.hidden_nodes.append(node)
def addOutput(self, counter):
self.addNode(NodeGene(Type.OUTPUT, counter.getNumber()))
def addConnectionMutation(self, innovation: Counter):
node1 = random.choice(list(self.nodes.values()))
node2 = random.choice(list(self.nodes.values()))
while node2 == node1:
node2 = random.choice(list(self.nodes.values()))
reverse = False
if node1.TYPE == Type.HIDDEN and node2.TYPE == Type.INPUT:
reverse = True
elif node1.TYPE == Type.OUTPUT and node2.TYPE == Type.HIDDEN:
reverse = True
elif node1.TYPE == Type.OUTPUT and node2.TYPE == Type.INPUT:
reverse = True
connectionExists = False
for con in self.connections.values():
if con.inNode == node1.ID and con.outNode == node2.ID:
connectionExists = True
break
elif con.inNode == node2.ID and con.outNode == node1.ID:
connectionExists = True
break
if connectionExists:
return
new_con = ConnectionGene(node2.ID if reverse else node1.ID, node1.ID if reverse else node2.ID,
random.uniform(-1, 1), True, innovation.getNumber())
self.addConnection(new_con)
def remConnectionMutation(self):
self.remConnection(random.choice(list(self.connections.keys()))) if self.connections != {} else None
def addNodeMutation(self, nCounter: Counter, iCounter: Counter):
if self.connections != {}:
con = random.choice(list(self.connections.values()))
inNode = self.nodes[con.inNode]
outNode = self.nodes[con.outNode]
con.disable()
newNode = NodeGene(Type.HIDDEN, nCounter.getNumber())
inToNew = ConnectionGene(inNode.ID, newNode.ID, 1.0, True, iCounter.getNumber())
newToOut = ConnectionGene(newNode.ID, outNode.ID, con.weight, True, iCounter.getNumber())
self.addNode(newNode)
self.addConnection(inToNew)
self.addConnection(newToOut)
def remNodeMutation(self):
if self.nodes != {}:
node = random.choice(self.hidden_nodes)
self.remNode(node)
def addWeightMutation(self, strength: float):
if len(list(self.connections.keys())) > 0:
con = random.choice(list(self.connections.values()))
con.weight += random.uniform(-1, 1)*strength
@staticmethod
def countMatchingGenes(self, genome1, genome2):
matchingGenes = 0
nodeKeys1 = sorted(list(genome1.nodes.keys()))
nodeKeys2 = sorted(list(genome2.nodes.keys()))
highestInnovation1 = nodeKeys1[-1]
highestInnovation2 = nodeKeys2[-1]
indices = max(highestInnovation1, highestInnovation2)
for i in range(indices):
node1 = i in genome1.nodes.keys()
node2 = i in genome2.nodes.keys()
if node1 and node2:
matchingGenes += 1
conKeys1 = sorted(list(genome1.connections.keys()))
conKeys2 = sorted(list(genome2.connections.keys()))
highestInnovation1 = conKeys1[-1]
highestInnovation2 = conKeys2[-1]
indices = max(highestInnovation1, highestInnovation2)
for i in range(indices):
con1 = i in genome1.connections.keys()
con2 = i in genome2.connections.keys()
if con1 and con2:
matchingGenes += 1
return matchingGenes
@staticmethod
def countDisjointGenes(self, genome1, genome2):
disjointGenes = 0
nodeKeys1 = sorted(list(genome1.nodes.keys()))
nodeKeys2 = sorted(list(genome2.nodes.keys()))
highestInnovation1 = nodeKeys1[-1]
highestInnovation2 = nodeKeys2[-1]
indices = max(highestInnovation1, highestInnovation2)
for i in range(indices):
node1 = i in genome1.nodes.keys()
node2 = i in genome2.nodes.keys()
if (not node1 and node2 and highestInnovation1 > i) or (node1 and not node2 and highestInnovation2 > i):
disjointGenes += 1
return disjointGenes
@staticmethod
def countExcessGenes(self, genome1, genome2):
excessGenes = 0
nodeKeys1 = sorted(list(genome1.nodes.keys()))
nodeKeys2 = sorted(list(genome2.nodes.keys()))
highestInnovation1 = nodeKeys1[-1]
highestInnovation2 = nodeKeys2[-1]
indices = max(highestInnovation1, highestInnovation2)
for i in range(indices):
node1 = i in genome1.nodes.keys()
node2 = i in genome2.nodes.keys()
if (not node1 and node2 and highestInnovation1 < i) or (node1 and not node2 and highestInnovation2 < i):
excessGenes += 1
conKeys1 = sorted(list(genome1.connections.keys()))
conKeys2 = sorted(list(genome2.connections.keys()))
highestInnovation1 = conKeys1[-1]
highestInnovation2 = conKeys2[-1]
indices = max(highestInnovation1, highestInnovation2)
for i in range(indices):
con1 = i in genome1.connections.keys()
con2 = i in genome2.connections.keys()
if (not con1 and con2 and highestInnovation1 < i) or (con1 and not con2 and highestInnovation2 < i):
excessGenes += 1
return excessGenes
@staticmethod
def averageWeightDiff(self, genome1, genome2):
matchingGenes = 0
weightDiff = 0
conKeys1 = sorted(list(genome1.connections.keys()))
conKeys2 = sorted(list(genome2.connections.keys()))
highestInnovation1 = conKeys1[-1]
highestInnovation2 = conKeys2[-1]
indices = max(highestInnovation1, highestInnovation2)
for i in range(indices):
con1 = i in genome1.connections.keys()
con2 = i in genome2.connections.keys()
connection1 = genome1.connections[con1]
connection2 = genome2.connections[con2]
if con1 and con2:
matchingGenes += 1
weightDiff += abs(connection1.weight - connection2.weight)
return weightDiff / matchingGenes
@staticmethod
def compatibilityDistance(self, genome1, genome2, c1: int, c2: int, c3: int):
E = self.countExcessGenes(genome1, genome2)
D = self.countDisjointGenes(genome1, genome2)
W = self.averageWeightDiff(genome1, genome2)
N = 1
o = c1 * E / N + c2 * D / N + c3 * W
return o
def __str__(self):
a = str(self.nodes)
b = str(self.connections)
return f"""Genome:
- Nodes: {len(self.nodes.keys())} {a}
- Connections: {len(self.connections.keys())} {b}
"""
@staticmethod
def crossover(self, parent1: Genome, parent2: Genome):
child = Genome()
for p1node in parent1.nodes.values():
child.addNode(p1node.copy())
for p1con in parent1.connections.values():
if p1con.innovation in parent2.connections.keys(): # matching gene
childConnection = p1con.copy() if random_bool else parent2.connections[p1con.innovation].copy()
child.addConnection(childConnection)
else: # disjoint / excess
child.addConnection(p1con.copy())
return child
def creates_loop(connections: dict, con):
i, o = con.inNode, con.outNode
if i == o:
return True
conList = []
for c in connections.values():
conList.append((c.inNode, c.outNode))
visited = {o}
while True:
n = 0
for a, b in conList:
if a in visited and b not in visited:
if b == i:
return True
visited.add(b)
n += 1
if n == 0:
return False
setattr(Genome, 'crossover', crossover) | /rtneat-python-0.0.1.tar.gz/rtneat-python-0.0.1/src/rtneat/genome.py | 0.461745 | 0.361531 | genome.py | pypi |
import os
import shutil
import logging
from automover.utils import get_size
class Torrent(object):
"""A torrent file as returned by client"""
def __init__(self, client, torrent_id, finish_time, ratio, path, is_complete):
"""A torrent file.
:param client: Torrent client to interact with.
:type client: Client.
:param torrent_id: The ID to identify the torrent by.
:type torrent_id: str.
:param finish_time: Time the torrent was finished.
:type finish_time: datetime.datetime.
:param ratio: Upload/Download ratio.
:type ratio: float.
:param path: Path to the torrent file.
:type path: str.
:param is_complete: Torrent is complete.
:type is_complete: bool.
"""
self.client = client
self.torrent_id = torrent_id
self.finish_time = finish_time
self.ratio = ratio
self.path = path
self.is_complete = is_complete
self.logger = logging.getLogger(__name__)
self.name = os.path.split(path)[1]
def _move(self, path, destination_path):
shutil.move(self.path, destination_path)
def move(self, destination):
destination_path = os.path.join(destination, self.name)
if os.path.isfile(destination_path) or os.path.isdir(destination_path):
self.logger.error('Unable to move %s because %s already exists', self.name, destination_path)
return False
self.client.stop(self)
self.client.set_path(self, destination)
self._move(self.path, destination_path)
self.path = destination
self.client.start(self)
self.logger.info('Moved %s to %s', self.name, destination)
def delete(self):
return self.client.delete(self)
def trackers(self):
return self.client.trackers(self)
def get_size(self):
return get_size(self.path)
def __repr__(self):
return 'Torrent("%s", "%s")' % (self.torrent_id, self.path)
class Client(object):
"""
Generic torrent client with all the needed bells and whistles not implemented.
"""
def list(self):
"""Returns list of all torrents in client with information.
:returns: list of Torrent.
"""
raise NotImplemented()
def delete(self, torrent):
"""Deletes a torrent
:param torrent: A torrent file object.
:type torrent: Torrent.
:returns: bool -- Delete was successful.
"""
raise NotImplemented()
def stop(self, torrent):
"""Stops a torrent
:param torrent: A torrent file object.
:type torrent: Torrent.
:returns: bool -- Stop was successful.
"""
raise NotImplemented()
def start(self, torrent):
"""Starts a torrent
:param torrent: A torrent file object.
:type torrent: Torrent.
:returns: bool -- Start was successful.
"""
raise NotImplemented()
def set_path(self, torrent, path):
"""Sets a part for a torrent
:param torrent: A torrent file object.
:type torrent: Torrent.
:param path: Path to file location.
:type path: str.
:returns: bool -- Change path was successful.
"""
raise NotImplemented()
def trackers(self, torrent):
"""Sets a part for a torrent
:param torrent: A torrent file object.
:type torrent: Torrent.
:returns: list of strs -- List of torrent trackers.
"""
raise NotImplemented() | /rtorrent-automover-2.3.1.tar.gz/rtorrent-automover-2.3.1/automover/client.py | 0.653238 | 0.191819 | client.py | pypi |
import argparse
import os
import re
from typing import Dict, MutableMapping, NoReturn, Optional, Union
import warnings
import benparse
FilePath = Union[bytes, str]
"""Type used for file paths"""
class RTorrentMigrate:
"""Class for bulk-converting the data dir or session dir of
rTorrent torrents
The data dir is the dir that the torrent contents are saved to
The session dir is the rtorrent session dir that the .torrent file
is located in
:param data_old: the current data dir that will be replaced
:param data_new: the new data dir that ``data_old`` will be
converted to
:param session_old: the current session dir that will be replaced
:param session_new: the new session dir that ``session_old`` will
be converted to
:param file_regex: only files that match this regex will be changed
used by :func:`migrate_dir`
default: ``'.+\\.torrent\\.rtorrent'``
:param verbose: verbose output
:param dry_run: do not save any changes made to disk
Either ``data_old`` + ``data_new`` or ``session_old`` +
``session_new`` must be given
If both are given, then both will be replaced
:Example:
>>> migrator = RTorrentMigrate(data_old='/torrents', \
data_new='/nas/torrents', verbose=True)
>>> migrator.migrate_dir('.rtorrent_session', ignore_errors=True)
"""
data_old: bytes
data_new: bytes
session_old: bytes
session_new: bytes
file_regex: str = r'.+\.torrent\.rtorrent'
verbose: bool
dry_run: bool
def migrate(self, old_file: str, new_file: Optional[str] = None) -> None:
"""Convert a single file
:param old_file: the file to read
:param new_file:
the file to save the modified output to. if ``new_file``
isn't given, ``old_file`` will be overwritten
"""
if new_file is None:
new_file = old_file
if self.verbose:
print(f'reading file: {old_file}')
with open(old_file, 'rb') as file:
# assume that the bencode is formatted correctly
bencode: Dict[bytes, bytes] = benparse.load(file) # type: ignore
data_changed = False
session_changed = False
if hasattr(self, 'data_old'):
if self.verbose:
current_value = self._decode_bytes(bencode[b'directory'])
# '!r' is used because mypy complains about it being
# omitted when `current_value` is a byte string
print(f'\tcurrent data path: {current_value!r}')
data_changed = self._bencode_replace(
bencode, b'directory', self.data_old, self.data_new
)
if self.verbose and data_changed:
new_value = self._decode_bytes(bencode[b'directory'])
print(f'\t\tchanged to: {new_value!r}')
if hasattr(self, 'session_old'):
if self.verbose:
current_value = self._decode_bytes(bencode[b'loaded_file'])
print(f'\tcurrent session path: {current_value!r}')
session_changed = self._bencode_replace(
bencode, b'loaded_file', self.session_old, self.session_new
)
if self.verbose and session_changed:
new_value = self._decode_bytes(bencode[b'loaded_file'])
print(f'\t\tchanged to: {new_value!r}')
if data_changed or session_changed:
if not self.dry_run:
if self.verbose:
print(f'\twriting to: {new_file}')
with open(new_file, 'wb') as file:
benparse.dump(bencode, file)
def migrate_dir(
self, torrent_dir: str, *,
file_regex: Optional[str] = None, ignore_errors: bool = False
) -> None:
"""Convert all files in a directory
:param torrent_dir: the directory to walk through
:param file_regex:
only files matching this regex will be
converted
defaults to the value specified in the
constructor
only the base filename is matched; dir names are ignored
the regex must match the entire filename
:param ignore_errors:
if ``True``, errors encountered when reading/parsing/writing
files will not be fatal, and the function will move on to
the next file
"""
if file_regex is None:
file_regex = self.file_regex
if ignore_errors:
onerror = self._warn_oserror
else:
onerror = self._raise_oserror
if self.verbose:
print(f'reading dir: {torrent_dir}')
for root, _, files in os.walk(
torrent_dir, onerror=onerror
):
for file in files:
if re.fullmatch(file_regex, file):
try:
self.migrate(os.path.join(root, file))
except Exception as exception:
if ignore_errors:
warnings.warn(f'{file}: {exception}')
else:
raise
@staticmethod
def _raise_oserror(exception: OSError) -> NoReturn:
"""Raises any errors generated by ``os.walk`` in
:func:`migrate_dir`
"""
raise exception
@staticmethod
def _warn_oserror(exception: OSError) -> None:
"""Raises a warning for any errors generated by ``os.walk`` in
:func:`migrate_dir` and continues
"""
warnings.warn(f'{exception.filename}: {exception}')
@staticmethod
def _decode_bytes(bytes_: bytes) -> Union[str, bytes]:
"""Attempt to decode the given byte string
The default encoding is used (utf-8)
If decoding fails, the original byte string is returned instead
The purpose of this function is to decode file paths used in
the output when ``verbose`` is `True`. The encoding is
hard-coded to avoid confusion since it doesn't affect operation
"""
try:
return bytes_.decode()
except UnicodeError:
return bytes_
@staticmethod
def _bencode_replace(
bencode: MutableMapping[bytes, bytes], key: bytes,
old: bytes, new: bytes
) -> bool:
"""Replaces the value of ``key`` in ``bencode`` with ``new`` if
it starts with ``old``
Only ``old`` is replaced. The rest of the string is left as-is
Raises ``KeyError`` if it can't find ``key``
Returns ``True`` if the bencode was changed, otherwise ``False``
The data directory corresponds to the key b'directory', and the
session directory corresponds to the key b'loaded_file'
"""
value = bencode[key]
if value.startswith(old):
new_value = new + value[len(old):]
bencode[key] = new_value
return True
return False
@staticmethod
def _format_path(path: FilePath) -> bytes:
"""Converts ``path`` to bytes and adds a trailing slash
The trailing slash is added to avoid partially replacing a dir
name
"""
if isinstance(path, str):
path = path.encode()
if not path.endswith(b'/'):
path += b'/'
return path
def __init__(
self, *,
data_old: Optional[FilePath] = None,
data_new: Optional[FilePath] = None,
session_old: Optional[FilePath] = None,
session_new: Optional[FilePath] = None,
file_regex: Optional[str] = None,
verbose: bool = False, dry_run: bool = False
):
if (data_old is None) != (data_new is None):
raise ValueError('data_old and data_new must both be defined')
if (session_old is None) != (session_new is None):
raise ValueError(
'session_old and session_new must both be defined'
)
if data_old is None and session_old is None:
raise ValueError(
'not enough arguments. must provide data_old+data_new and/or '
'session_old+session_new'
)
if data_old is not None:
self.data_old = self._format_path(data_old)
if data_new is not None:
self.data_new = self._format_path(data_new)
if session_old is not None:
self.session_old = self._format_path(session_old)
if session_new is not None:
self.session_new = self._format_path(session_new)
if file_regex is not None:
self.file_regex = file_regex
self.verbose = verbose
self.dry_run = dry_run
def _get_parser(include_epilog: bool = False) -> argparse.ArgumentParser:
"""Returns the arg parser used by :func:`main`
If ``include_epilog`` is `True`, the epilog of the parser will be
set to a usage example. Otherwise, it won't have an epilog
This option defaults to `False` because Sphinx has a dedicated page
for examples, so sphinx-argparse doesn't need it
"""
epilog = None
if include_epilog:
epilog = (
'example: '
'%(prog)s /path/to/dir -v --data /torrents /nas/torrents '
'--session /torrents/.session /nas/.rtorrent'
)
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Migrate rTorrent torrents to a new path',
epilog=epilog
)
parser.add_argument(
'rtorrent_files', nargs='+',
help='.rtorrent files to be changed. also accepts directories'
)
parser.add_argument(
'-v', '--verbose', action='store_true', help='enables verbose output'
)
parser.add_argument(
'-i', '--ignore', action='store_true',
help=(
'ignore errors when reading/writing files. continue to the next '
'file instead of aborting'
)
)
parser.add_argument(
'-n', '--dry-run', action='store_true',
help='perform a trial run with no changes made'
)
parser.add_argument(
'-d', '--data', nargs=2, metavar=('OLD', 'NEW'),
help='directory where the torrent contents are located'
)
parser.add_argument(
'-s', '--session', nargs=2, metavar=('OLD', 'NEW'),
help='rtorrent session directory where the .torrent files are located'
)
parser.add_argument(
'-r', '--regex', default=RTorrentMigrate.file_regex,
help='only files whose filename match this regex will be changed'
)
return parser
def _main() -> None:
parser = _get_parser(True)
args = parser.parse_args()
if args.data is None and args.session is None:
parser.error('--data and/or --session must be provided')
kwargs = {}
kwargs['verbose'] = args.verbose
kwargs['dry_run'] = args.dry_run
kwargs['file_regex'] = args.regex
if args.data:
kwargs['data_old'] = args.data[0]
kwargs['data_new'] = args.data[1]
if args.session:
kwargs['session_old'] = args.session[0]
kwargs['session_new'] = args.session[1]
migrator = RTorrentMigrate(**kwargs)
for file in args.rtorrent_files:
if os.path.isdir(file):
migrator.migrate_dir(file, ignore_errors=args.ignore)
else:
try:
migrator.migrate(file)
except Exception as exception:
if args.ignore:
warnings.warn(f'{file}: {exception}')
else:
raise | /rtorrent_migrate-1.0.0-py3-none-any.whl/rtorrent_migrate/migrator.py | 0.811489 | 0.453625 | migrator.py | pypi |
from rtorrent.compat import is_py3
import os.path
import re
import rtorrent.lib.bencode as bencode
import hashlib
if is_py3():
from urllib.request import urlopen # @UnresolvedImport @UnusedImport
else:
from urllib2 import urlopen # @UnresolvedImport @Reimport
class TorrentParser():
def __init__(self, torrent):
"""Decode and parse given torrent
@param torrent: handles: urls, file paths, string of torrent data
@type torrent: str
@raise AssertionError: Can be raised for a couple reasons:
- If _get_raw_torrent() couldn't figure out
what X{torrent} is
- if X{torrent} isn't a valid bencoded torrent file
"""
self.torrent = torrent
self._raw_torrent = None # : testing yo
self._torrent_decoded = None # : what up
self.file_type = None
self._get_raw_torrent()
assert self._raw_torrent is not None, "Couldn't get raw_torrent."
if self._torrent_decoded is None:
self._decode_torrent()
assert isinstance(self._torrent_decoded, dict), "Invalid torrent file."
self._parse_torrent()
def _is_raw(self):
raw = False
if isinstance(self.torrent, (str, bytes)):
if isinstance(self._decode_torrent(self.torrent), dict):
raw = True
else:
# reset self._torrent_decoded (currently equals False)
self._torrent_decoded = None
return(raw)
def _get_raw_torrent(self):
"""Get raw torrent data by determining what self.torrent is"""
# already raw?
if self._is_raw():
self.file_type = "raw"
self._raw_torrent = self.torrent
return
# local file?
if os.path.isfile(self.torrent):
self.file_type = "file"
self._raw_torrent = open(self.torrent, "rb").read()
# url?
elif re.search("^(http|ftp)s?:\/\/", self.torrent, re.I):
self.file_type = "url"
self._raw_torrent = urlopen(self.torrent).read()
def _decode_torrent(self, raw_torrent=None):
if raw_torrent is None:
raw_torrent = self._raw_torrent
self._torrent_decoded = bencode.decode(raw_torrent)
return(self._torrent_decoded)
def _calc_info_hash(self):
self.info_hash = None
if "info" in self._torrent_decoded.keys():
info_encoded = bencode.encode(self._torrent_decoded["info"])
if info_encoded:
self.info_hash = hashlib.sha1(info_encoded).hexdigest().upper()
return(self.info_hash)
def _parse_torrent(self):
for k in self._torrent_decoded:
key = k.replace(" ", "_").lower()
setattr(self, key, self._torrent_decoded[k])
self._calc_info_hash()
class NewTorrentParser(object):
@staticmethod
def _read_file(fp):
return fp.read()
@staticmethod
def _write_file(fp):
fp.write()
return fp
@staticmethod
def _decode_torrent(data):
return bencode.decode(data)
def __init__(self, input):
self.input = input
self._raw_torrent = None
self._decoded_torrent = None
self._hash_outdated = False
if isinstance(self.input, (str, bytes)):
# path to file?
if os.path.isfile(self.input):
self._raw_torrent = self._read_file(open(self.input, "rb"))
else:
# assume input was the raw torrent data (do we really want
# this?)
self._raw_torrent = self.input
# file-like object?
elif self.input.hasattr("read"):
self._raw_torrent = self._read_file(self.input)
assert self._raw_torrent is not None, "Invalid input: input must be a path or a file-like object"
self._decoded_torrent = self._decode_torrent(self._raw_torrent)
assert isinstance(
self._decoded_torrent, dict), "File could not be decoded"
def _calc_info_hash(self):
self.info_hash = None
info_dict = self._torrent_decoded["info"]
self.info_hash = hashlib.sha1(bencode.encode(
info_dict)).hexdigest().upper()
return(self.info_hash)
def set_tracker(self, tracker):
self._decoded_torrent["announce"] = tracker
def get_tracker(self):
return self._decoded_torrent.get("announce") | /rtorrent_python_dl-0.2.10-py3-none-any.whl/rtorrent/lib/torrentparser.py | 0.483405 | 0.217888 | torrentparser.py | pypi |
from rtorrent.common import _py3, cmd_exists, find_torrent, \
is_valid_port, bool_to_int, convert_version_tuple_to_str
from rtorrent.lib.torrentparser import TorrentParser
from rtorrent.rpc import Method
from rtorrent.torrent import Torrent
import os.path
import rtorrent.rpc #@UnresolvedImport
import sys
import time
if _py3:
import xmlrpc.client as xmlrpclib #@UnresolvedImport
from urllib.request import urlopen #@UnresolvedImport
else:
import xmlrpclib #@UnresolvedImport @Reimport
from urllib2 import urlopen #@UnresolvedImport @Reimport
__version__ = "0.2.9"
__author__ = "Chris Lucas"
__contact__ = "chris@chrisjlucas.com"
__license__ = "MIT"
MIN_RTORRENT_VERSION = (0, 8, 1)
MIN_RTORRENT_VERSION_STR = convert_version_tuple_to_str(MIN_RTORRENT_VERSION)
class RTorrent:
""" Create a new rTorrent connection """
rpc_prefix = None
def __init__(self, url, _verbose=False):
self.url = url #: From X{__init__(self, url)}
self._verbose = _verbose
self.torrents = [] #: List of L{Torrent} instances
self._rpc_methods = [] #: List of rTorrent RPC methods
self._torrent_cache = []
assert self._verify_conn(self._get_xmlrpc_conn()) \
is True, "rTorrent connection failed"
self.client_version_tuple = tuple([int(i) for i in \
self._get_xmlrpc_conn().system.client_version().split(".")])
assert self._meets_version_requirement() is True, \
"Error: Minimum rTorrent version required is {0}".format(
MIN_RTORRENT_VERSION_STR)
self.update()
self.get_torrents()
def _get_xmlrpc_conn(self):
"""Get ServerProxy instance"""
return(xmlrpclib.ServerProxy(self.url, verbose=self._verbose))
def _verify_conn(self, conn):
"""Verify given ServerProxy connection is to an rTorrent XMLRPC server"""
try:
self._rpc_methods = conn.system.listMethods()
except xmlrpclib.ProtocolError as err:
sys.stderr.write("*** Exception caught: ProtocolError\n")
sys.stderr.write("URL: {0}\n".format(err.url))
sys.stderr.write("Error code: {0}\n".format(err.errcode))
sys.stderr.write("Error message: {0}\n".format(err.errmsg))
return(False)
except xmlrpclib.ResponseError:
sys.stderr.write("*** Exception caught: ResponseError")
return(False)
# simple check, probably sufficient
if "system.client_version" not in self._rpc_methods \
or "system.library_version" not in self._rpc_methods:
return(False)
else:
return(True)
def _meets_version_requirement(self):
"""Check if rTorrent version is meets requirements"""
if hasattr(self, "client_version_tuple"):
return(self.client_version_tuple >= MIN_RTORRENT_VERSION)
else:
return(False)
def get_rpc_methods(self):
"""Get list of raw RPC commands supported by rTorrent
@return: raw RPC commands
@rtype: list
"""
return(self._rpc_methods)
def get_torrents(self, view="main"):
"""Get list of all torrents in specified view
@return: list of L{Torrent} instances
@rtype: list
@todo: add validity check for specified view
"""
self.torrents = []
methods = rtorrent.torrent.methods
retriever_methods = [m for m in methods \
if m.is_retriever() and m.is_available(self)]
m = rtorrent.rpc.Multicall(self)
m.add("d.multicall", view, "d.get_hash=",
*[method.rpc_call + "=" for method in retriever_methods])
results = m.call()[0] # only sent one call, only need first result
for result in results:
results_dict = {}
# build results_dict
for m, r in zip(retriever_methods, result[1:]): # result[0] is the info_hash
results_dict[m.varname] = rtorrent.rpc.process_result(m, r)
self.torrents.append(
Torrent(self, info_hash=result[0], **results_dict)
)
self._manage_torrent_cache()
return(self.torrents)
def _manage_torrent_cache(self):
"""Carry tracker/peer/file lists over to new torrent list"""
for torrent in self._torrent_cache:
new_torrent = rtorrent.common.find_torrent(torrent.info_hash,
self.torrents)
if new_torrent != -1:
new_torrent.files = torrent.files
new_torrent.peers = torrent.peers
new_torrent.trackers = torrent.trackers
self._torrent_cache = self.torrents
def _get_load_function(self, file_type, start, verbose):
"""Determine correct "load torrent" RPC method"""
func_name = None
if file_type == "url":
# url strings can be input directly
if start and verbose: func_name = "load_start_verbose"
elif start: func_name = "load_start"
elif verbose: func_name = "load_verbose"
else: func_name = "load"
elif file_type in ["file", "raw"]:
if start and verbose: func_name = "load_raw_start_verbose"
elif start: func_name = "load_raw_start"
elif verbose: func_name = "load_raw_verbose"
else: func_name = "load_raw"
return(func_name)
def load_torrent(self, torrent, start=False, verbose=False, verify_load=True):
"""
Loads torrent into rTorrent (with various enhancements)
@param torrent: can be a url, a path to a local file, or the raw data
of a torrent file
@type torrent: str
@param start: start torrent when loaded
@type start: bool
@param verbose: print error messages to rTorrent log
@type verbose: bool
@param verify_load: verify that torrent was added to rTorrent successfully
@type verify_load: bool
@return: Depends on verify_load:
- if verify_load is True, (and the torrent was
loaded successfully), it'll return a L{Torrent} instance
- if verify_load is False, it'll return None
@rtype: L{Torrent} instance or None
@raise AssertionError: If the torrent wasn't successfully added to rTorrent
- Check L{TorrentParser} for the AssertionError's
it raises
@note: Because this function includes url verification (if a url was input)
as well as verification as to whether the torrent was successfully added,
this function doesn't execute instantaneously. If that's what you're
looking for, use load_torrent_simple() instead.
"""
p = self._get_xmlrpc_conn()
tp = TorrentParser(torrent)
torrent = xmlrpclib.Binary(tp._raw_torrent)
info_hash = tp.info_hash
func_name = self._get_load_function("raw", start, verbose)
# load torrent
getattr(p, func_name)(torrent)
if verify_load:
MAX_RETRIES = 3
i = 0
while i < MAX_RETRIES:
self.get_torrents()
if info_hash in [t.info_hash for t in self.torrents]: break
time.sleep(1) # was still getting AssertionErrors, delay should help
i += 1
assert info_hash in [t.info_hash for t in self.torrents], "Adding torrent was unsuccessful."
return(find_torrent(info_hash, self.torrents))
def load_torrent_simple(self, torrent, file_type,
start=False, verbose=False):
"""Loads torrent into rTorrent
@param torrent: can be a url, a path to a local file, or the raw data
of a torrent file
@type torrent: str
@param file_type: valid options: "url", "file", or "raw"
@type file_type: str
@param start: start torrent when loaded
@type start: bool
@param verbose: print error messages to rTorrent log
@type verbose: bool
@return: None
@raise AssertionError: if incorrect file_type is specified
@note: This function was written for speed, it includes no enhancements.
If you input a url, it won't check if it's valid. You also can't get
verification that the torrent was successfully added to rTorrent.
Use load_torrent() if you would like these features.
"""
p = self._get_xmlrpc_conn()
assert file_type in ["raw", "file", "url"], \
"Invalid file_type, options are: 'url', 'file', 'raw'."
func_name = self._get_load_function(file_type, start, verbose)
if file_type == "file":
# since we have to assume we're connected to a remote rTorrent
# client, we have to read the file and send it to rT as raw
assert os.path.isfile(torrent), \
"Invalid path: \"{0}\"".format(torrent)
torrent = open(torrent, "rb").read()
if file_type in ["raw", "file"]: finput = xmlrpclib.Binary(torrent)
elif file_type == "url": finput = torrent
getattr(p, func_name)(finput)
def set_dht_port(self, port):
"""Set DHT port
@param port: port
@type port: int
@raise AssertionError: if invalid port is given
"""
assert is_valid_port(port), "Valid port range is 0-65535"
self.dht_port = self._p.set_dht_port(port)
def enable_check_hash(self):
"""Alias for set_check_hash(True)"""
self.set_check_hash(True)
def disable_check_hash(self):
"""Alias for set_check_hash(False)"""
self.set_check_hash(False)
def find_torrent(self, info_hash):
"""Frontend for rtorrent.common.find_torrent"""
return(rtorrent.common.find_torrent(info_hash, self.get_torrents()))
def poll(self):
""" poll rTorrent to get latest torrent/peer/tracker/file information
@note: This essentially refreshes every aspect of the rTorrent
connection, so it can be very slow if working with a remote
connection that has a lot of torrents loaded.
@return: None
"""
self.update()
torrents = self.get_torrents()
for t in torrents:
t.poll()
def update(self):
"""Refresh rTorrent client info
@note: All fields are stored as attributes to self.
@return: None
"""
multicall = rtorrent.rpc.Multicall(self)
retriever_methods = [m for m in methods \
if m.is_retriever() and m.is_available(self)]
for method in retriever_methods:
multicall.add(method)
multicall.call()
def _build_class_methods(class_obj):
# multicall add class
caller = lambda self, multicall, method, *args:\
multicall.add(method, self.rpc_id, *args)
caller.__doc__ = """Same as Multicall.add(), but with automatic inclusion
of the rpc_id
@param multicall: A L{Multicall} instance
@type: multicall: Multicall
@param method: L{Method} instance or raw rpc method
@type: Method or str
@param args: optional arguments to pass
"""
setattr(class_obj, "multicall_add", caller)
def __compare_rpc_methods(rt_new, rt_old):
from pprint import pprint
rt_new_methods = set(rt_new.get_rpc_methods())
rt_old_methods = set(rt_old.get_rpc_methods())
print("New Methods:")
pprint(rt_new_methods - rt_old_methods)
print("Methods not in new rTorrent:")
pprint(rt_old_methods - rt_new_methods)
def __check_supported_methods(rt):
from pprint import pprint
supported_methods = set([m.rpc_call for m in
methods + \
rtorrent.file.methods + \
rtorrent.torrent.methods + \
rtorrent.tracker.methods + \
rtorrent.peer.methods])
all_methods = set(rt.get_rpc_methods())
print("Methods NOT in supported methods")
pprint(all_methods - supported_methods)
print("Supported methods NOT in all methods")
pprint(supported_methods - all_methods)
methods = [
# RETRIEVERS
Method(RTorrent, 'get_xmlrpc_size_limit', 'get_xmlrpc_size_limit'),
Method(RTorrent, 'get_proxy_address', 'get_proxy_address'),
Method(RTorrent, 'get_split_suffix', 'get_split_suffix'),
Method(RTorrent, 'get_up_limit', 'get_upload_rate'),
Method(RTorrent, 'get_max_memory_usage', 'get_max_memory_usage'),
Method(RTorrent, 'get_max_open_files', 'get_max_open_files'),
Method(RTorrent, 'get_min_peers_seed', 'get_min_peers_seed'),
Method(RTorrent, 'get_use_udp_trackers', 'get_use_udp_trackers'),
Method(RTorrent, 'get_preload_min_size', 'get_preload_min_size'),
Method(RTorrent, 'get_max_uploads', 'get_max_uploads'),
Method(RTorrent, 'get_max_peers', 'get_max_peers'),
Method(RTorrent, 'get_timeout_sync', 'get_timeout_sync'),
Method(RTorrent, 'get_receive_buffer_size', 'get_receive_buffer_size'),
Method(RTorrent, 'get_split_file_size', 'get_split_file_size'),
Method(RTorrent, 'get_dht_throttle', 'get_dht_throttle'),
Method(RTorrent, 'get_max_peers_seed', 'get_max_peers_seed'),
Method(RTorrent, 'get_min_peers', 'get_min_peers'),
Method(RTorrent, 'get_tracker_numwant', 'get_tracker_numwant'),
Method(RTorrent, 'get_max_open_sockets', 'get_max_open_sockets'),
Method(RTorrent, 'get_session', 'get_session'),
Method(RTorrent, 'get_ip', 'get_ip'),
Method(RTorrent, 'get_scgi_dont_route', 'get_scgi_dont_route'),
Method(RTorrent, 'get_hash_read_ahead', 'get_hash_read_ahead'),
Method(RTorrent, 'get_http_cacert', 'get_http_cacert'),
Method(RTorrent, 'get_dht_port', 'get_dht_port'),
Method(RTorrent, 'get_handshake_log', 'get_handshake_log'),
Method(RTorrent, 'get_preload_type', 'get_preload_type'),
Method(RTorrent, 'get_max_open_http', 'get_max_open_http'),
Method(RTorrent, 'get_http_capath', 'get_http_capath'),
Method(RTorrent, 'get_max_downloads_global', 'get_max_downloads_global'),
Method(RTorrent, 'get_name', 'get_name'),
Method(RTorrent, 'get_session_on_completion', 'get_session_on_completion'),
Method(RTorrent, 'get_down_limit', 'get_download_rate'),
Method(RTorrent, 'get_down_total', 'get_down_total'),
Method(RTorrent, 'get_up_rate', 'get_up_rate'),
Method(RTorrent, 'get_hash_max_tries', 'get_hash_max_tries'),
Method(RTorrent, 'get_peer_exchange', 'get_peer_exchange'),
Method(RTorrent, 'get_down_rate', 'get_down_rate'),
Method(RTorrent, 'get_connection_seed', 'get_connection_seed'),
Method(RTorrent, 'get_http_proxy', 'get_http_proxy'),
Method(RTorrent, 'get_stats_preloaded', 'get_stats_preloaded'),
Method(RTorrent, 'get_timeout_safe_sync', 'get_timeout_safe_sync'),
Method(RTorrent, 'get_hash_interval', 'get_hash_interval'),
Method(RTorrent, 'get_port_random', 'get_port_random'),
Method(RTorrent, 'get_directory', 'get_directory'),
Method(RTorrent, 'get_port_open', 'get_port_open'),
Method(RTorrent, 'get_max_file_size', 'get_max_file_size'),
Method(RTorrent, 'get_stats_not_preloaded', 'get_stats_not_preloaded'),
Method(RTorrent, 'get_memory_usage', 'get_memory_usage'),
Method(RTorrent, 'get_connection_leech', 'get_connection_leech'),
Method(RTorrent, 'get_check_hash', 'get_check_hash',
boolean=True,
),
Method(RTorrent, 'get_session_lock', 'get_session_lock'),
Method(RTorrent, 'get_preload_required_rate', 'get_preload_required_rate'),
Method(RTorrent, 'get_max_uploads_global', 'get_max_uploads_global'),
Method(RTorrent, 'get_send_buffer_size', 'get_send_buffer_size'),
Method(RTorrent, 'get_port_range', 'get_port_range'),
Method(RTorrent, 'get_max_downloads_div', 'get_max_downloads_div'),
Method(RTorrent, 'get_max_uploads_div', 'get_max_uploads_div'),
Method(RTorrent, 'get_safe_sync', 'get_safe_sync'),
Method(RTorrent, 'get_bind', 'get_bind'),
Method(RTorrent, 'get_up_total', 'get_up_total'),
Method(RTorrent, 'get_client_version', 'system.client_version'),
Method(RTorrent, 'get_library_version', 'system.library_version'),
Method(RTorrent, 'get_api_version', 'system.api_version',
min_version=(0, 9, 1)
),
Method(RTorrent, "get_system_time", "system.time",
docstring="""Get the current time of the system rTorrent is running on
@return: time (posix)
@rtype: int""",
),
# MODIFIERS
Method(RTorrent, 'set_http_proxy', 'set_http_proxy'),
Method(RTorrent, 'set_max_memory_usage', 'set_max_memory_usage'),
Method(RTorrent, 'set_max_file_size', 'set_max_file_size'),
Method(RTorrent, 'set_bind', 'set_bind',
docstring="""Set address bind
@param arg: ip address
@type arg: str
""",
),
Method(RTorrent, 'set_up_limit', 'set_upload_rate',
docstring="""Set global upload limit (in bytes)
@param arg: speed limit
@type arg: int
""",
),
Method(RTorrent, 'set_port_random', 'set_port_random'),
Method(RTorrent, 'set_connection_leech', 'set_connection_leech'),
Method(RTorrent, 'set_tracker_numwant', 'set_tracker_numwant'),
Method(RTorrent, 'set_max_peers', 'set_max_peers'),
Method(RTorrent, 'set_min_peers', 'set_min_peers'),
Method(RTorrent, 'set_max_uploads_div', 'set_max_uploads_div'),
Method(RTorrent, 'set_max_open_files', 'set_max_open_files'),
Method(RTorrent, 'set_max_downloads_global', 'set_max_downloads_global'),
Method(RTorrent, 'set_session_lock', 'set_session_lock'),
Method(RTorrent, 'set_session', 'set_session'),
Method(RTorrent, 'set_split_suffix', 'set_split_suffix'),
Method(RTorrent, 'set_hash_interval', 'set_hash_interval'),
Method(RTorrent, 'set_handshake_log', 'set_handshake_log'),
Method(RTorrent, 'set_port_range', 'set_port_range'),
Method(RTorrent, 'set_min_peers_seed', 'set_min_peers_seed'),
Method(RTorrent, 'set_scgi_dont_route', 'set_scgi_dont_route'),
Method(RTorrent, 'set_preload_min_size', 'set_preload_min_size'),
Method(RTorrent, 'set_log.tracker', 'set_log.tracker'),
Method(RTorrent, 'set_max_uploads_global', 'set_max_uploads_global'),
Method(RTorrent, 'set_down_limit', 'set_download_rate',
docstring="""Set global download limit (in bytes)
@param arg: speed limit
@type arg: int
""",
),
Method(RTorrent, 'set_preload_required_rate', 'set_preload_required_rate'),
Method(RTorrent, 'set_hash_read_ahead', 'set_hash_read_ahead'),
Method(RTorrent, 'set_max_peers_seed', 'set_max_peers_seed'),
Method(RTorrent, 'set_max_uploads', 'set_max_uploads'),
Method(RTorrent, 'set_session_on_completion', 'set_session_on_completion'),
Method(RTorrent, 'set_max_open_http', 'set_max_open_http'),
Method(RTorrent, 'set_directory', 'set_directory'),
Method(RTorrent, 'set_http_cacert', 'set_http_cacert'),
Method(RTorrent, 'set_dht_throttle', 'set_dht_throttle'),
Method(RTorrent, 'set_hash_max_tries', 'set_hash_max_tries'),
Method(RTorrent, 'set_proxy_address', 'set_proxy_address'),
Method(RTorrent, 'set_split_file_size', 'set_split_file_size'),
Method(RTorrent, 'set_receive_buffer_size', 'set_receive_buffer_size'),
Method(RTorrent, 'set_use_udp_trackers', 'set_use_udp_trackers'),
Method(RTorrent, 'set_connection_seed', 'set_connection_seed'),
Method(RTorrent, 'set_xmlrpc_size_limit', 'set_xmlrpc_size_limit'),
Method(RTorrent, 'set_xmlrpc_dialect', 'set_xmlrpc_dialect'),
Method(RTorrent, 'set_safe_sync', 'set_safe_sync'),
Method(RTorrent, 'set_http_capath', 'set_http_capath'),
Method(RTorrent, 'set_send_buffer_size', 'set_send_buffer_size'),
Method(RTorrent, 'set_max_downloads_div', 'set_max_downloads_div'),
Method(RTorrent, 'set_name', 'set_name'),
Method(RTorrent, 'set_port_open', 'set_port_open'),
Method(RTorrent, 'set_timeout_sync', 'set_timeout_sync'),
Method(RTorrent, 'set_peer_exchange', 'set_peer_exchange'),
Method(RTorrent, 'set_ip', 'set_ip',
docstring="""Set IP
@param arg: ip address
@type arg: str
""",
),
Method(RTorrent, 'set_timeout_safe_sync', 'set_timeout_safe_sync'),
Method(RTorrent, 'set_preload_type', 'set_preload_type'),
Method(RTorrent, 'set_check_hash', 'set_check_hash',
docstring="""Enable/Disable hash checking on finished torrents
@param arg: True to enable, False to disable
@type arg: bool
""",
boolean=True,
),
]
_all_methods_list = [methods,
rtorrent.file.methods,
rtorrent.torrent.methods,
rtorrent.tracker.methods,
rtorrent.peer.methods,
]
class_methods_pair = {
RTorrent : methods,
rtorrent.file.File : rtorrent.file.methods,
rtorrent.torrent.Torrent : rtorrent.torrent.methods,
rtorrent.tracker.Tracker : rtorrent.tracker.methods,
rtorrent.peer.Peer : rtorrent.peer.methods,
}
for c in class_methods_pair.keys():
rtorrent.rpc._build_rpc_methods(c, class_methods_pair[c])
_build_class_methods(c) | /rtorrent-python-0.2.9.tar.gz/rtorrent-python-0.2.9/rtorrent/__init__.py | 0.400398 | 0.195748 | __init__.py | pypi |
from rtorrent.common import _py3
import os.path
import re
import rtorrent.lib.bencode as bencode
import hashlib
if _py3: from urllib.request import urlopen #@UnresolvedImport @UnusedImport
else: from urllib2 import urlopen #@UnresolvedImport @Reimport
class TorrentParser():
def __init__(self, torrent):
"""Decode and parse given torrent
@param torrent: handles: urls, file paths, string of torrent data
@type torrent: str
@raise AssertionError: Can be raised for a couple reasons:
- If _get_raw_torrent() couldn't figure out
what X{torrent} is
- if X{torrent} isn't a valid bencoded torrent file
"""
self.torrent = torrent
self._raw_torrent = None #: testing yo
self._torrent_decoded = None #: what up
self.file_type = None
self._get_raw_torrent()
assert self._raw_torrent is not None, "Couldn't get raw_torrent."
if self._torrent_decoded is None: self._decode_torrent()
assert isinstance(self._torrent_decoded, dict), "Invalid torrent file."
self._parse_torrent()
def _is_raw(self):
raw = False
if isinstance(self.torrent, (str, bytes)):
if isinstance(self._decode_torrent(self.torrent), dict):
raw = True
else:
# reset self._torrent_decoded (currently equals False)
self._torrent_decoded = None
return(raw)
def _get_raw_torrent(self):
"""Get raw torrent data by determining what self.torrent is"""
# already raw?
if self._is_raw():
self.file_type = "raw"
self._raw_torrent = self.torrent
return
# local file?
if os.path.isfile(self.torrent):
self.file_type = "file"
self._raw_torrent = open(self.torrent, "rb").read()
# url?
elif re.search("^(http|ftp):\/\/", self.torrent, re.I):
self.file_type = "url"
self._raw_torrent = urlopen(self.torrent).read()
def _decode_torrent(self, raw_torrent=None):
if raw_torrent is None: raw_torrent = self._raw_torrent
self._torrent_decoded = bencode.decode(raw_torrent)
return(self._torrent_decoded)
def _calc_info_hash(self):
self.info_hash = None
if "info" in self._torrent_decoded.keys():
info_dict = self._torrent_decoded["info"]
self.info_hash = hashlib.sha1(bencode.encode(info_dict)).hexdigest().upper()
return(self.info_hash)
def _parse_torrent(self):
for k in self._torrent_decoded:
key = k.replace(" ", "_").lower()
setattr(self, key, self._torrent_decoded[k])
self._calc_info_hash() | /rtorrent-python-0.2.9.tar.gz/rtorrent-python-0.2.9/rtorrent/lib/torrentparser.py | 0.411347 | 0.205117 | torrentparser.py | pypi |
import time
import urllib.parse
import xmlrpc.client
from collections.abc import Iterable
from typing import Any, Literal, Protocol, TypeAlias, TypedDict
from urllib.parse import quote
import bencodepy
from typing_extensions import NotRequired
from .scgi import SCGIServerProxy
__all__ = ["RTorrent", "MultiCall"]
Unknown: TypeAlias = Any
class MultiCall(TypedDict):
methodName: str
params: NotRequired[Any]
def _encode_tags(tags: Iterable[str] | None) -> str:
if not tags:
return ""
return ",".join(sorted(quote(t) for t in {x.strip() for x in tags} if t))
class _DownloadRpc(Protocol):
"""this is not a real class, it's a typing protocol for rpc typing"""
def save_resume(self, info_hash: str) -> None:
"""save resume data"""
def multicall2(self, _: Literal[""], view: str, *commands: str) -> Any:
"""run multiple rpc calls"""
class _SystemRpc(Protocol):
"""this is not a real class, it's a typing protocol for rpc typing"""
def multicall(self, commands: list[MultiCall]) -> Any:
"""run multiple rpc calls"""
class _TrackerRpc(Protocol):
"""this is not a real class, it's a typing protocol for rpc typing"""
def multicall(self, info_hash: str, _: Literal[""], *commands: str) -> None:
"""run multiple rpc calls"""
def is_enabled(self, tracker_id: str) -> int:
"""tracker_id is in format ``{info_hash}:t{index}``"""
class RTorrent:
"""
RTorrent rpc client
.. code-block:: python
rt = RTorrent('scgi://127.0.0.1:5000')
rt = RTorrent('scgi:///home/ubuntu/.local/rtorrent.sock')
If you are using ruTorrent or nginx scgi proxy, http(s) protocol are also supported
.. code-block:: python
rt = RTorrent('http://127.0.0.1')
Underling ``xmlrpc.client.ServerProxy`` are exposed as instance property ``.rpc``,
you can use ``rt.rpc`` for direct rpc call.
:param address: rtorrent rpc address
:param rutorrent_compatibility: compatibility for ruTorrent or flood.
"""
rpc: xmlrpc.client.ServerProxy
def __init__(self, address: str, rutorrent_compatibility: bool = True):
u = urllib.parse.urlparse(address)
if u.scheme == "scgi":
self.rpc = SCGIServerProxy(address)
else:
self.rpc = xmlrpc.client.ServerProxy(address)
self.rutorrent_compatibility: bool = rutorrent_compatibility
def get_session_path(self) -> str:
"""get current rtorrent session path"""
return self.rpc.session.path() # type: ignore
def add_torrent_by_file(
self,
content: bytes,
directory: str,
tags: list[str] | None = None,
) -> None:
"""
Add a torrent to the client by providing the torrent file content as bytes.
Args:
content: The content of the torrent file as bytes.
directory: The base directory where the downloaded files will be saved.
tags: A list of tags associated with the torrent. Defaults to None.
This argument is compatible with ruTorrent and flood.
"""
params: list[str | bytes] = [
"",
content,
'd.tied_to_file.set=""',
f'd.directory_base.set="{directory}"',
# custom.addtime is used by ruTorrent and flood.
f"d.custom.set=addtime,{int(time.time())}",
]
if tags:
params.append(f'd.custom1.set="{_encode_tags(tags)}"')
if self.rutorrent_compatibility:
t = bencodepy.bdecode(content)
if b"comment" in t:
params.append(
f'd.custom2.set="VRS24mrker{quote(t[b"comment"].decode().strip())}"'
)
self.rpc.load.raw_start_verbose(*params) # type: ignore
def stop_torrent(self, info_hash: str) -> None:
"""
Stop and close a torrent.
Args:
info_hash (str): The info hash of the torrent to be stopped.
Returns:
None: This function does not return anything.
"""
self.system.multicall(
[
MultiCall(methodName="d.stop", params=[info_hash]),
MultiCall(methodName="d.close", params=[info_hash]),
]
)
def start_torrent(self, info_hash: str) -> None:
self.system.multicall(
[
MultiCall(methodName="d.open", params=[info_hash]),
MultiCall(methodName="d.start", params=[info_hash]),
]
)
def download_list(self) -> list[str]:
"""get list of info hash for current downloads"""
return self.rpc.download_list() # type: ignore
@property
def system(self) -> _SystemRpc:
"""method call with ``system`` prefix
Example:
.. code-block:: python
rt.system.listMethods(...)
"""
return self.rpc.system # type: ignore
def system_list_methods(self) -> list[str]:
"""get supported methods"""
return self.rpc.system.listMethods() # type: ignore
@property
def d(self) -> _DownloadRpc:
"""method call with ``d`` prefix
Example:
.. code-block:: python
rt.d.save_resume(...)
rt.d.open(...)
"""
return self.rpc.d # type: ignore
def d_set_torrent_base_directory(self, info_hash: str, directory: str) -> None:
"""change base directory of a download.
you may need to stop/close torrent first.
"""
self.rpc.d.directory_base.set(info_hash, directory)
def d_save_resume(self, info_hash: str) -> None:
"""alias of ``d.save_resume``"""
self.rpc.d.save_resume(info_hash)
def d_set_tags(self, info_hash: str, tags: Iterable[str]) -> None:
"""set download tags, work with flood and ruTorrent."""
self.rpc.d.custom1.set(info_hash, _encode_tags(tags))
def d_tracker_send_scrape(self, info_hash: str, delay: Unknown) -> None:
self.rpc.d.tracker.send_scrape(info_hash, delay)
@property
def t(self) -> _TrackerRpc:
"""method call with ``t`` prefix
Example:
.. code-block:: python
rt.t.is_enabled(...)
"""
return self.rpc.t # type: ignore
def t_enable_tracker(self, info_hash: str, tracker_index: int) -> None:
"""enable a tracker of download"""
self.rpc.t.is_enabled.set(f"{info_hash}:t{tracker_index}", 1)
def t_disable_tracker(self, info_hash: str, tracker_index: int) -> None:
"""disable a tracker of download"""
self.rpc.t.is_enabled.set(f"{info_hash}:t{tracker_index}", 0)
def d_add_tracker(self, info_hash: str, url: str, *, group: int = 0) -> None:
"""add a tracker to download"""
self.rpc.d.tracker.insert(info_hash, group, url)
_methods = [
"system.methodExist",
"system.methodHelp",
"system.methodSignature",
"system.multicall",
"system.shutdown",
"system.capabilities",
"system.getCapabilities",
"add_peer",
"and",
"bind",
"build_branch",
"cat",
"catch",
"check_hash",
"choke_group.down.heuristics",
"choke_group.down.heuristics.set",
"choke_group.down.max",
"choke_group.down.max.set",
"choke_group.down.max.unlimited",
"choke_group.down.queued",
"choke_group.down.rate",
"choke_group.down.total",
"choke_group.down.unchoked",
"choke_group.general.size",
"choke_group.index_of",
"choke_group.insert",
"choke_group.list",
"choke_group.size",
"choke_group.tracker.mode",
"choke_group.tracker.mode.set",
"choke_group.up.heuristics",
"choke_group.up.heuristics.set",
"choke_group.up.max",
"choke_group.up.max.set",
"choke_group.up.max.unlimited",
"choke_group.up.queued",
"choke_group.up.rate",
"choke_group.up.total",
"choke_group.up.unchoked",
"close_low_diskspace",
"close_untied",
"compare",
"connection_leech",
"connection_seed",
"convert.date",
"convert.elapsed_time",
"convert.gm_date",
"convert.gm_time",
"convert.kb",
"convert.mb",
"convert.throttle",
"convert.time",
"convert.xb",
"d.accepting_seeders",
"d.accepting_seeders.disable",
"d.accepting_seeders.enable",
"d.base_filename",
"d.base_path",
"d.bitfield",
"d.bytes_done",
"d.check_hash",
"d.chunk_size",
"d.chunks_hashed",
"d.chunks_seen",
"d.close",
"d.close.directly",
"d.complete",
"d.completed_bytes",
"d.completed_chunks",
"d.connection_current",
"d.connection_current.set",
"d.connection_leech",
"d.connection_leech.set",
"d.connection_seed",
"d.connection_seed.set",
"d.create_link",
"d.creation_date",
"d.custom",
"d.custom.if_z",
"d.custom.items",
"d.custom.keys",
"d.custom.set",
"d.custom1",
"d.custom1.set",
"d.custom2",
"d.custom2.set",
"d.custom3",
"d.custom3.set",
"d.custom4",
"d.custom4.set",
"d.custom5",
"d.custom5.set",
"d.custom_throw",
"d.delete_link",
"d.delete_tied",
"d.directory",
"d.directory.set",
"d.directory_base",
"d.directory_base.set",
"d.disconnect.seeders",
"d.down.choke_heuristics",
"d.down.choke_heuristics.leech",
"d.down.choke_heuristics.seed",
"d.down.choke_heuristics.set",
"d.down.rate",
"d.down.sequential",
"d.down.sequential.set",
"d.down.total",
"d.downloads_max",
"d.downloads_max.set",
"d.downloads_min",
"d.downloads_min.set",
"d.erase",
"d.free_diskspace",
"d.group",
"d.group.name",
"d.group.set",
"d.hash",
"d.hashing",
"d.hashing_failed",
"d.hashing_failed.set",
"d.ignore_commands",
"d.ignore_commands.set",
"d.incomplete",
"d.is_active",
"d.is_hash_checked",
"d.is_hash_checking",
"d.is_meta",
"d.is_multi_file",
"d.is_not_partially_done",
"d.is_open",
"d.is_partially_done",
"d.is_pex_active",
"d.is_private",
"d.left_bytes",
"d.load_date",
"d.loaded_file",
"d.local_id",
"d.local_id_html",
"d.max_file_size",
"d.max_file_size.set",
"d.max_size_pex",
"d.message",
"d.message.set",
"d.mode",
"d.multicall.filtered",
"d.multicall2",
"d.name",
"d.open",
"d.pause",
"d.peer_exchange",
"d.peer_exchange.set",
"d.peers_accounted",
"d.peers_complete",
"d.peers_connected",
"d.peers_max",
"d.peers_max.set",
"d.peers_min",
"d.peers_min.set",
"d.peers_not_connected",
"d.priority",
"d.priority.set",
"d.priority_str",
"d.ratio",
"d.resume",
"d.save_full_session",
"d.save_resume",
"d.size_bytes",
"d.size_chunks",
"d.size_files",
"d.size_pex",
"d.skip.rate",
"d.skip.total",
"d.start",
"d.state",
"d.state_changed",
"d.state_counter",
"d.stop",
"d.throttle_name",
"d.throttle_name.set",
"d.tied_to_file",
"d.tied_to_file.set",
"d.timestamp.finished",
"d.timestamp.last_active",
"d.timestamp.started",
"d.tracker.insert",
"d.tracker.send_scrape",
"d.tracker_announce",
"d.tracker_announce.force",
"d.tracker_focus",
"d.tracker_numwant",
"d.tracker_numwant.set",
"d.tracker_size",
"d.try_close",
"d.try_start",
"d.try_stop",
"d.up.choke_heuristics",
"d.up.choke_heuristics.leech",
"d.up.choke_heuristics.seed",
"d.up.choke_heuristics.set",
"d.up.rate",
"d.up.total",
"d.update_priorities",
"d.uploads_max",
"d.uploads_max.set",
"d.uploads_min",
"d.uploads_min.set",
"d.views",
"d.views.has",
"d.views.push_back",
"d.views.push_back_unique",
"d.views.remove",
"d.wanted_chunks",
"dht",
"dht.add_bootstrap",
"dht.add_node",
"dht.mode.set",
"dht.port",
"dht.port.set",
"dht.statistics",
"dht.throttle.name",
"dht.throttle.name.set",
"dht_port",
"directory",
"directory.default",
"directory.default.set",
"directory.watch.added",
"download_list",
"download_rate",
"elapsed.greater",
"elapsed.less",
"encoding.add",
"encoding_list",
"encryption",
"equal",
"event.download.active",
"event.download.closed",
"event.download.erased",
"event.download.finished",
"event.download.hash_done",
"event.download.hash_failed",
"event.download.hash_final_failed",
"event.download.hash_queued",
"event.download.hash_removed",
"event.download.inactive",
"event.download.inserted",
"event.download.inserted_new",
"event.download.inserted_session",
"event.download.opened",
"event.download.paused",
"event.download.resumed",
"event.system.shutdown",
"event.system.startup_done",
"event.view.hide",
"event.view.show",
"execute",
"execute.capture",
"execute.capture_nothrow",
"execute.nothrow",
"execute.nothrow.bg",
"execute.raw",
"execute.raw.bg",
"execute.raw_nothrow",
"execute.raw_nothrow.bg",
"execute.throw",
"execute.throw.bg",
"execute2",
"d.completed_chunks",
"d.frozen_path",
"d.is_create_queued",
"d.is_created",
"d.is_open",
"d.is_resize_queued",
"d.last_touched",
"d.match_depth_next",
"d.match_depth_prev",
"d.multicall",
"d.offset",
"d.path",
"d.path_components",
"d.path_depth",
"d.prioritize_first",
"d.prioritize_first.disable",
"d.prioritize_first.enable",
"d.prioritize_last",
"d.prioritize_last.disable",
"d.prioritize_last.enable",
"d.priority",
"d.priority.set",
"d.range_first",
"d.range_second",
"d.set_create_queued",
"d.set_resize_queued",
"d.size_bytes",
"d.size_chunks",
"d.unset_create_queued",
"d.unset_resize_queued",
"false",
"fi.filename_last",
"fi.is_file",
"file.append",
"file.prioritize_toc",
"file.prioritize_toc.first",
"file.prioritize_toc.first.push_back",
"file.prioritize_toc.first.set",
"file.prioritize_toc.last",
"file.prioritize_toc.last.push_back",
"file.prioritize_toc.last.set",
"file.prioritize_toc.set",
"fs.homedir",
"fs.homedir.nothrow",
"fs.mkdir",
"fs.mkdir.nothrow",
"fs.mkdir.recursive",
"fs.mkdir.recursive.nothrow",
"greater",
"group.insert",
"group.insert_persistent_view",
"group.seeding.ratio.command",
"group.seeding.ratio.disable",
"group.seeding.ratio.enable",
"group2.seeding.ratio.max",
"group2.seeding.ratio.max.set",
"group2.seeding.ratio.min",
"group2.seeding.ratio.min.set",
"group2.seeding.ratio.upload",
"group2.seeding.ratio.upload.set",
"group2.seeding.view",
"group2.seeding.view.set",
"if",
"import",
"ip",
"key_layout",
"keys.layout",
"keys.layout.set",
"less",
"load.normal",
"load.raw",
"load.raw_start",
"load.raw_start_verbose",
"load.raw_verbose",
"load.start",
"load.start_throw",
"load.start_verbose",
"load.throw",
"load.verbose",
"log.add_output",
"log.append_file",
"log.append_gz_file",
"log.close",
"log.execute",
"log.open_file",
"log.open_file_pid",
"log.open_gz_file",
"log.open_gz_file_pid",
"log.rpc",
"log.vmmap.dump",
"match",
"math.add",
"math.avg",
"math.cnt",
"math.div",
"math.max",
"math.med",
"math.min",
"math.mod",
"math.mul",
"math.sub",
"max_downloads",
"max_downloads_div",
"max_downloads_global",
"max_memory_usage",
"max_peers",
"max_peers_seed",
"max_uploads",
"max_uploads_div",
"max_uploads_global",
"method.const",
"method.const.enable",
"method.erase",
"method.get",
"method.has_key",
"method.insert",
"method.insert.bool",
"method.insert.c_simple",
"method.insert.list",
"method.insert.s_c_simple",
"method.insert.simple",
"method.insert.string",
"method.insert.value",
"method.list_keys",
"method.redirect",
"method.rlookup",
"method.rlookup.clear",
"method.set",
"method.set_key",
"method.use_deprecated",
"method.use_deprecated.set",
"method.use_intermediate",
"method.use_intermediate.set",
"min_downloads",
"min_peers",
"min_peers_seed",
"min_uploads",
"network.bind_address",
"network.bind_address.set",
"network.http.cacert",
"network.http.cacert.set",
"network.http.capath",
"network.http.capath.set",
"network.http.current_open",
"network.http.dns_cache_timeout",
"network.http.dns_cache_timeout.set",
"network.http.max_open",
"network.http.max_open.set",
"network.http.proxy_address",
"network.http.proxy_address.set",
"network.http.ssl_verify_host",
"network.http.ssl_verify_host.set",
"network.http.ssl_verify_peer",
"network.http.ssl_verify_peer.set",
"network.listen.backlog",
"network.listen.backlog.set",
"network.listen.port",
"network.local_address",
"network.local_address.set",
"network.max_open_files",
"network.max_open_files.set",
"network.max_open_sockets",
"network.max_open_sockets.set",
"network.open_files",
"network.open_sockets",
"network.port_open",
"network.port_open.set",
"network.port_random",
"network.port_random.set",
"network.port_range",
"network.port_range.set",
"network.proxy_address",
"network.proxy_address.set",
"network.receive_buffer.size",
"network.receive_buffer.size.set",
"network.scgi.dont_route",
"network.scgi.dont_route.set",
"network.scgi.open_local",
"network.scgi.open_port",
"network.send_buffer.size",
"network.send_buffer.size.set",
"network.tos.set",
"network.total_handshakes",
"network.xmlrpc.dialect.set",
"network.xmlrpc.size_limit",
"network.xmlrpc.size_limit.set",
"not",
"on_ratio",
"or",
"p.address",
"p.banned",
"p.banned.set",
"p.call_target",
"p.client_version",
"p.completed_percent",
"p.disconnect",
"p.disconnect_delayed",
"p.down_rate",
"p.down_total",
"p.id",
"p.id_html",
"p.is_encrypted",
"p.is_incoming",
"p.is_obfuscated",
"p.is_preferred",
"p.is_snubbed",
"p.is_unwanted",
"p.multicall",
"p.options_str",
"p.peer_rate",
"p.peer_total",
"p.port",
"p.snubbed",
"p.snubbed.set",
"p.up_rate",
"p.up_total",
"pieces.hash.on_completion",
"pieces.hash.on_completion.set",
"pieces.hash.queue_size",
"pieces.memory.block_count",
"pieces.memory.current",
"pieces.memory.max",
"pieces.memory.max.set",
"pieces.memory.sync_queue",
"pieces.preload.min_rate",
"pieces.preload.min_rate.set",
"pieces.preload.min_size",
"pieces.preload.min_size.set",
"pieces.preload.type",
"pieces.preload.type.set",
"pieces.stats.total_size",
"pieces.stats_not_preloaded",
"pieces.stats_preloaded",
"pieces.sync.always_safe",
"pieces.sync.always_safe.set",
"pieces.sync.queue_size",
"pieces.sync.safe_free_diskspace",
"pieces.sync.timeout",
"pieces.sync.timeout.set",
"pieces.sync.timeout_safe",
"pieces.sync.timeout_safe.set",
"port_random",
"port_range",
"print",
"protocol.choke_heuristics.down.leech",
"protocol.choke_heuristics.down.leech.set",
"protocol.choke_heuristics.down.seed",
"protocol.choke_heuristics.down.seed.set",
"protocol.choke_heuristics.up.leech",
"protocol.choke_heuristics.up.leech.set",
"protocol.choke_heuristics.up.seed",
"protocol.choke_heuristics.up.seed.set",
"protocol.connection.leech",
"protocol.connection.leech.set",
"protocol.connection.seed",
"protocol.connection.seed.set",
"protocol.encryption.set",
"protocol.pex",
"protocol.pex.set",
"proxy_address",
"ratio.disable",
"ratio.enable",
"ratio.max",
"ratio.max.set",
"ratio.min",
"ratio.min.set",
"ratio.upload",
"ratio.upload.set",
"remove_untied",
"scgi_local",
"scgi_port",
"schedule",
"schedule2",
"schedule_remove",
"schedule_remove2",
"scheduler.max_active",
"scheduler.max_active.set",
"scheduler.simple.added",
"scheduler.simple.removed",
"scheduler.simple.update",
"session",
"session.name",
"session.name.set",
"session.on_completion",
"session.on_completion.set",
"session.path",
"session.path.set",
"session.save",
"session.use_lock",
"session.use_lock.set",
"start_tied",
"stop_untied",
"strings.choke_heuristics",
"strings.choke_heuristics.download",
"strings.choke_heuristics.upload",
"strings.connection_type",
"strings.encryption",
"strings.ip_filter",
"strings.ip_tos",
"strings.log_group",
"strings.tracker_event",
"strings.tracker_mode",
"system.api_version",
"system.client_version",
"system.cwd",
"system.cwd.set",
"system.daemon",
"system.daemon.set",
"system.env",
"system.file.allocate",
"system.file.allocate.set",
"system.file.max_size",
"system.file.max_size.set",
"system.file.split_size",
"system.file.split_size.set",
"system.file.split_suffix",
"system.file.split_suffix.set",
"system.file_status_cache.prune",
"system.file_status_cache.size",
"system.files.closed_counter",
"system.files.failed_counter",
"system.files.opened_counter",
"system.hostname",
"system.library_version",
"system.pid",
"system.shutdown.normal",
"system.shutdown.quick",
"system.time",
"system.time_seconds",
"system.time_usec",
"system.umask.set",
"t.activity_time_last",
"t.activity_time_next",
"t.can_scrape",
"t.disable",
"t.enable",
"t.failed_counter",
"t.failed_time_last",
"t.failed_time_next",
"t.group",
"t.id",
"t.is_busy",
"t.is_enabled",
"t.is_enabled.set",
"t.is_extra_tracker",
"t.is_open",
"t.is_usable",
"t.latest_event",
"t.latest_new_peers",
"t.latest_sum_peers",
"t.min_interval",
"t.multicall",
"t.normal_interval",
"t.scrape_complete",
"t.scrape_counter",
"t.scrape_downloaded",
"t.scrape_incomplete",
"t.scrape_time_last",
"t.success_counter",
"t.success_time_last",
"t.success_time_next",
"t.type",
"t.url",
"throttle.down",
"throttle.down.max",
"throttle.down.rate",
"throttle.global_down.max_rate",
"throttle.global_down.max_rate.set",
"throttle.global_down.max_rate.set_kb",
"throttle.global_down.rate",
"throttle.global_down.total",
"throttle.global_up.max_rate",
"throttle.global_up.max_rate.set",
"throttle.global_up.max_rate.set_kb",
"throttle.global_up.rate",
"throttle.global_up.total",
"throttle.ip",
"throttle.max_downloads",
"throttle.max_downloads.div",
"throttle.max_downloads.div._val",
"throttle.max_downloads.div._val.set",
"throttle.max_downloads.div.set",
"throttle.max_downloads.global",
"throttle.max_downloads.global._val",
"throttle.max_downloads.global._val.set",
"throttle.max_downloads.global.set",
"throttle.max_downloads.set",
"throttle.max_peers.normal",
"throttle.max_peers.normal.set",
"throttle.max_peers.seed",
"throttle.max_peers.seed.set",
"throttle.max_unchoked_downloads",
"throttle.max_unchoked_uploads",
"throttle.max_uploads",
"throttle.max_uploads.div",
"throttle.max_uploads.div._val",
"throttle.max_uploads.div._val.set",
"throttle.max_uploads.div.set",
"throttle.max_uploads.global",
"throttle.max_uploads.global._val",
"throttle.max_uploads.global._val.set",
"throttle.max_uploads.global.set",
"throttle.max_uploads.set",
"throttle.min_downloads",
"throttle.min_downloads.set",
"throttle.min_peers.normal",
"throttle.min_peers.normal.set",
"throttle.min_peers.seed",
"throttle.min_peers.seed.set",
"throttle.min_uploads",
"throttle.min_uploads.set",
"throttle.unchoked_downloads",
"throttle.unchoked_uploads",
"throttle.up",
"throttle.up.max",
"throttle.up.rate",
"to_date",
"to_elapsed_time",
"to_gm_date",
"to_gm_time",
"to_kb",
"to_mb",
"to_throttle",
"to_time",
"to_xb",
"torrent_list_layout",
"trackers.disable",
"trackers.enable",
"trackers.numwant",
"trackers.numwant.set",
"trackers.use_udp",
"trackers.use_udp.set",
"true",
"try",
"try_import",
"ui.current_view",
"ui.current_view.set",
"ui.input.history.clear",
"ui.input.history.size",
"ui.input.history.size.set",
"ui.status.throttle.down.set",
"ui.status.throttle.up.set",
"ui.throttle.global.step.large",
"ui.throttle.global.step.large.set",
"ui.throttle.global.step.medium",
"ui.throttle.global.step.medium.set",
"ui.throttle.global.step.small",
"ui.throttle.global.step.small.set",
"ui.torrent_list.layout",
"ui.torrent_list.layout.set",
"ui.unfocus_download",
"upload_rate",
"value",
"view.add",
"view.event_added",
"view.event_removed",
"view.filter",
"view.filter.temp",
"view.filter.temp.excluded",
"view.filter.temp.excluded.set",
"view.filter.temp.log",
"view.filter.temp.log.set",
"view.filter_all",
"view.filter_download",
"view.filter_on",
"view.list",
"view.persistent",
"view.set",
"view.set_not_visible",
"view.set_visible",
"view.size",
"view.size_not_visible",
"view.sort",
"view.sort_current",
"view.sort_new",
"system.startup_time",
"d.data_path",
"d.session_file",
"cfg.scrape_interval.active",
"cfg.scrape_interval.active.set",
"cfg.scrape_interval.idle",
"cfg.scrape_interval.idle.set",
"d.last_scrape.send_set",
] | /rtorrent_rpc-0.0.12-py3-none-any.whl/rtorrent_rpc/__init__.py | 0.776114 | 0.221561 | __init__.py | pypi |
from __future__ import annotations
from .utfUtils import SUPPORTED_ENCODINGS, utfEncode, utfDecode
class LengthError(Exception):
'''
Data is an invalid length.
'''
pass
class RTPPayload_TTML:
'''
A data structure for storing TTML RTP payloads as defined by RFC 8759.
Attributes:
reserved (bytearray): The reserved bits. MUST be set to ``0``.
userDataWords (str): The TTML document.
encoding (str): One of UTF-8, UTF-16, UTF-16LE, and UTF-16BE
bom (bool): Should encoded documents start with a byte-order mark
'''
def __init__(
self,
reserved: bytearray = bytearray(b'\x00\x00'),
userDataWords: str = "",
encoding: str = "UTF-8",
bom: bool = False) -> None:
self._userDataWords: bytearray
self.reserved = reserved
self._bom = bom
if encoding in SUPPORTED_ENCODINGS:
self._encoding = encoding
else:
raise AttributeError("Encoding must be one of {}".format(
"".join(SUPPORTED_ENCODINGS)))
self.userDataWords = userDataWords
def __eq__(self, other: object) -> bool:
if not isinstance(other, RTPPayload_TTML):
return NotImplemented
return (
(type(self) == type(other)) and
(self.reserved == other.reserved) and
(self.userDataWords == other.userDataWords) and
(self._encoding == other._encoding) and
(self._bom == other._bom))
@property
def reserved(self) -> bytearray:
return self._reserved
@reserved.setter
def reserved(self, p: bytearray) -> None:
if type(p) != bytearray:
raise AttributeError("Payload value must be bytearray")
if p != bytearray(b'\x00\x00'):
raise ValueError("Reserved bits must be '\x00\x00' under RFC 8759")
else:
self._reserved = p
@property
def userDataWords(self) -> str:
return utfDecode(self._userDataWords, self._encoding)
@userDataWords.setter
def userDataWords(self, p: str) -> None:
workingUDW = utfEncode(p, self._encoding, self._bom)
if (len(workingUDW) >= 2**16):
raise LengthError(
"userDataWords must be fewer than 2**16 bytes")
else:
self._userDataWords = workingUDW
def fromBytearray(self, packet: bytearray) -> RTPPayload_TTML:
'''
Populate instance from a bytearray.
'''
self.reserved = packet[0:2]
length = int.from_bytes(packet[2:4], byteorder='big')
self._userDataWords = packet[4:]
if length != len(self._userDataWords):
raise LengthError(
"Length field does not match length of userDataWords")
return self
def toBytearray(self) -> bytearray:
'''
Encode instance as a bytearray.
'''
packetLen = 4 + len(self._userDataWords)
packet = bytearray(packetLen)
packet[0:2] = self.reserved
packet[2:4] = len(self._userDataWords).to_bytes(2, byteorder='big')
packet[4:] = self._userDataWords
return packet
def __bytes__(self) -> bytes:
return bytes(self.toBytearray()) | /rtpPayload_ttml-0.0.2-py3-none-any.whl/rtpPayload_ttml/rtpPayload_ttml.py | 0.854763 | 0.348922 | rtpPayload_ttml.py | pypi |
# rtreelib
Pluggable R-tree implementation in pure Python.
## Overview
Since the original R-tree data structure has been initially proposed in 1984, there have been
many variations introduced over the years optimized for various use cases [1]. However, when
working in Python (one of the most popular languages for spatial data processing), there is
no easy way to quickly compare how these various implementations behave on real data.
The aim of this library is to provide a "pluggable" R-tree implementation that allows swapping
out the various strategies for insertion, node deletion, and other behaviors so that their
impact can be easily compared (without having to install separate libraries and having to
make code changes to accommodate for API differences). Several of the more common R-tree
variations will soon be provided as ready-built implementations (see the **Status** section
below).
In addition, this library also provides utilities for inspecting the R-tree structure. It
allows creating diagrams (using matplotlib and graphviz) that show the R-tree nodes and
entries (including all the intermediate, non-leaf nodes), along with plots of their
corresponding bounding boxes. It also allows exporting the R-tree to PostGIS so it could
be examined using a GIS viewer like QGIS.
## Status
This library is currently in early development. The table below shows which R-tree variants
have been implemented, along with which operations they currently support:
| R-Tree Variant | Insert | Update | Delete |
|-----------------------|-----------------------|-----------------------|-----------------------|
| **Guttman** [2] | :heavy_check_mark: | :black_square_button: | :black_square_button: |
| **R\*-Tree** [3] | :heavy_check_mark: | :black_square_button: | :black_square_button: |
The library has a framework in place for swapping out the various strategies, making it
possible to add a new R-tree variant. However, given that this library is still early in
development, it is anticipated that this framework may need to be extended, resulting in
breaking changes.
Contributions for implementing additional strategies and operations are welcome. See
the section on **Extending** below.
## Setup
This package is available on PyPI and can be installed using pip:
```
pip install rtreelib
```
This package requires Python 3.6+.
There are additional optional dependencies you can install if you want to be able to
create diagrams or export the R-tree data to PostGIS. See the corresponding sections
below for additional setup information.
## Usage
To instantiate the default implementation and insert some entries:
```python
from rtreelib import RTree, Rect
t = RTree()
t.insert('a', Rect(0, 0, 3, 3))
t.insert('b', Rect(2, 2, 4, 4))
t.insert('c', Rect(1, 1, 2, 4))
t.insert('d', Rect(8, 8, 10, 10))
t.insert('e', Rect(7, 7, 9, 9))
```
The first parameter to the `insert` method represents the data, and can be of any data type
(though you will want to stick to strings, numbers, and other basic data types that can be
easily and succintly represented as a string if you want to create diagrams). The second
parameter represents the minimum bounding rectangle (MBR) of the associated data element.
The default implementation uses Guttman's original strategies for insertion, node splitting,
and deletion, as outlined in his paper from 1984 [2].
To use the R* implementation instead:
```python
from rtreelib import RStarTree, Rect
t = RStarTree()
t.insert('a', Rect(0, 0, 3, 3))
t.insert('b', Rect(2, 2, 4, 4))
t.insert('c', Rect(1, 1, 2, 4))
t.insert('d', Rect(8, 8, 10, 10))
t.insert('e', Rect(7, 7, 9, 9))
```
You can also create a custom implementation by inheriting from `RTreeBase` and providing
your own implementations for the various behaviors (insert, overflow, etc.). See the
following section for more information.
## Querying
Use the `query` method to find entries at a given location. The library supports querying
by either a point or a rectangle, and returns an iterable of matching entries that
intersect the given location.
To query using `Point`:
```python
entries = t.query(Point(2, 4))
```
Alternatively, you can also pass a tuple or list of 2 coordinates (`x` and `y`):
```python
entries = t.query((2, 4))
```
When querying by point, note that points that lie on the border (rather than the
interior) of a bounding rectangle are considered to intersect the rectangle.
To query using `Rect`:
```python
entries = t.query(Rect(2, 1, 4, 5))
```
Alternatively, you can also pass a tuple or list of 4 coordinates (the order is the
same as when using `Rect`, namely `min_x`, `min_y`, `max_x`, and `max_y`):
```python
entries = t.query((2, 1, 4, 5))
```
When querying by rectangle, note that the rectangles must have a non-zero intersection
area. Rectangles that intersect at the border but whose interiors do not overlap will
*not* match the query.
Note the above methods return entries rather than nodes. To get an iterable of leaf
nodes instead, use `query_nodes`:
```python
nodes = t.query_nodes(Rect(2, 1, 4, 5))
```
By default, this method will only return leaf-level nodes. To include all
intermediate-level nodes (including the root), set the optional `leaves` parameter
to `False` (it defaults to `True` if not passed in):
```python
all_nodes = t.query_nodes(Rect(2, 1, 4, 5), leaves=False)
```
## Extending
As noted above, the purpose of this library is to provide a pluggable R-tree implementation
where the various behaviors can be swapped out and customized to allow comparison. To that
end, this library provides a framework for achieving this.
As an example, the [`RTreeGuttman`](https://github.com/sergkr/rtreelib/blob/master/rtreelib/strategies/guttman.py)
class (aliased as `RTree`) simply inherits from `RTreeBase`, providing an implementation
for the `insert`, `choose_leaf`, `adjust_tree`, and `overflow_strategy` behaviors as follows:
```python
class RTreeGuttman(RTreeBase[T]):
"""R-Tree implementation that uses Guttman's strategies for insertion, splitting, and deletion."""
def __init__(self, max_entries: int = DEFAULT_MAX_ENTRIES, min_entries: int = None):
"""
Initializes the R-Tree using Guttman's strategies for insertion, splitting, and deletion.
:param max_entries: Maximum number of entries per node.
:param min_entries: Minimum number of entries per node. Defaults to ceil(max_entries/2).
"""
super().__init__(
max_entries=max_entries,
min_entries=min_entries,
insert=insert,
choose_leaf=guttman_choose_leaf,
adjust_tree=adjust_tree_strategy,
overflow_strategy=quadratic_split
)
```
Each behavior should be a function that implements a specific signature and performs a given
task. Here are the behaviors that are currently required to be specified:
* **`insert`**: Strategy used for inserting a single new entry into the tree.
* Signature: `(tree: RTreeBase[T], data: T, rect: Rect) → RTreeEntry[T]`
* Arguments:
* `tree: RTreeBase[T]`: R-tree instance.
* `data: T`: Data stored in this entry.
* `rect: Rect`: Bounding rectangle.
* Returns: `RTreeEntry[T]`
* This function should return the newly inserted entry.
* **`choose_leaf`**: Strategy used for choosing a leaf node when inserting a new entry.
* Signature: `(tree: RTreeBase[T], entry: RTreeEntry[T]) → RTreeNode[T]`
* Arguments:
* `tree: RTreeBase[T]`: R-tree instance.
* `entry: RTreeEntry[T]`: Entry being inserted.
* Returns: `RTreeNode[T]`
* This function should return the leaf node where the new entry should be inserted. This
node may or may not have the capacity for the new entry. If the insertion of the new node
results in the node overflowing, then `overflow_strategy` will be invoked on the node.
* **`adjust_tree`**: Strategy used for balancing the tree, including propagating node splits,
updating bounding boxes on all nodes and entries as necessary, and growing the tree by
creating a new root if necessary. This strategy is executed after inserting or deleting an
entry.
* Signature: `(tree: RTreeBase[T], node: RTreeNode[T], split_node: RTreeNode[T]) → None`
* Arguments:
* `tree: RTreeBase[T]`: R-tree instance.
* `node: RTreeNode[T]`: Node where a newly-inserted entry has just been added.
* `split_node: RTreeNode[T]`: If the insertion of a new entry has caused the node to
split, this is the newly-created split node. Otherwise, this will be `None`.
* Returns: `None`
* **`overflow_strategy`**: Strategy used for handling an overflowing node (a node that
contains more than `max_entries`). Depending on the implementation, this may involve
splitting the node and potentially growing the tree (Guttman), performing a forced
reinsert of entries (R*), or some other strategy.
* Signature: `(tree: RTreeBase[T], node: RTreeNode[T]) → RTreeNode[T]`
* Arguments:
* `tree: RTreeBase[T]`: R-tree instance.
* `node: RTreeNode[T]`: Overflowing node.
* Returns: `RTreeNode[T]`
* Depending on the implementation, this function may return a newly-created split
node whose entries are a subset of the original node's entries (Guttman), or simply
return `None`.
## Creating R-tree Diagrams
This library provides a set of utility functions that can be used to create diagrams of the
entire R-tree structure, including the root and all intermediate and leaf level nodes and
entries.
These features are optional, and the required dependencies are *not* automatically installed
when installing this library. Therefore, you must install them manually. This includes the
following Python dependencies which can be installed using pip:
```
pip install matplotlib pydot tqdm
```
This also includes the following system-level dependencies:
* TkInter
* Graphviz
On Ubuntu, these can be installed using:
```
sudo apt install python3-tk graphviz
```
Once the above dependencies are installed, you can create an R-tree diagram as follows:
```python
from rtreelib import RTree, Rect
from rtreelib.diagram import create_rtree_diagram
# Create an RTree instance with some sample data
t = RTree(max_entries=4)
t.insert('a', Rect(0, 0, 3, 3))
t.insert('b', Rect(2, 2, 4, 4))
t.insert('c', Rect(1, 1, 2, 4))
t.insert('d', Rect(8, 8, 10, 10))
t.insert('e', Rect(7, 7, 9, 9))
# Create a diagram of the R-tree structure
create_rtree_diagram(t)
```
This creates a diagram like the following:

The diagram is created in a temp directory as a PNG file, and the default viewer
is automatically launched for convenience. Each box in the main diagram represents a node
(except at the leaf level, where it represents the leaf entry), and contains a plot that
depicts all of the data spatially. The bounding boxes of each node are represented using
tan rectangles with a dashed outline. The bounding box corresponding to the current node
is highlighted in pink.
The bounding boxes for the original data entries themselves are depicted in blue, and are
labeled using the value that was passed in to `insert`. At the leaf level, the corresponding
data element is highlighted in pink.
The entries contained in each node are depicted along the bottom of the node's box, and
point to either a child node (for non-leaf nodes), or to the data entries (for leaf nodes).
As can be seen in the above screenshot, the diagram depicts the entire tree structure, which
can be quite large depending on the number of nodes and entries. It may also take a while to
generate, since it launches matplotlib to plot the data spatially for each node and entry, and
then graphviz to generate the overall diagram. Given the size and execution time required to
generate these diagrams, it's only practical for R-trees containing a relatively small
amount of data (e.g., no more than about a dozen total entries). To analyze the resulting
R-tree structure when working with a large amount of data, it is recommended to export the
data to PostGIS and use a viewer like QGIS (as explained in the following section).
## Exporting to PostGIS
In addition to creating diagrams, this library also allows exporting R-trees to a
PostGIS database.
To do so, you will first need to install the [psycopg2](http://initd.org/psycopg/) driver.
This is an optional dependency, so it is not automatically installed when you install
this package. Refer to the
[installation instructions for psycopg2](http://initd.org/psycopg/docs/install.html) to
ensure that you have all the necessary system-wide prerequisites installed (C compiler,
Python header files, etc.). Then, install `psycopg2` using the following command (passing
the `--no-binary` flag to ensure that it is built from source, and also to avoid a console
warning when using `psycopg2`):
```
pip install psycopg2 --no-binary psycopg2
```
Once `psycopg2` is installed, you should be able to import the functions you need from the
`rtreelib.pg` module:
```python
from rtreelib.pg import init_db_pool, create_rtree_tables, export_to_postgis
```
The subsections below guide you throw how to use this library to export R-trees to the
database. You will first need to decide on your preferred method for connecting to the
database, as well as create the necessary tables to store the R-tree data. Once these
prerequisites are met, exporting the R-tree can be done using a simple function call.
Finally, this guide shows how you can visualize the exported data using QGIS, a popular
and freely-available GIS viewer.
### Initializing a Connection Pool
When working with the `rtreelib.pg` module, there are three ways of passing database
connection information:
1. Initialize a connection pool by calling `init_db_pool`. This allows using the other
functions in this module without having to pass around connection info.
2. Manually open the connection yourself, and pass in the connection object to the
function.
3. Pass in keyword arguments that can be used to establish the database connection.
The first method is generally the easiest - you just have to call it once, and not
have to worry about passing in connection information to the other functions. This
section explains this method, and the following sections assume that you are using
it. However, the other methods are also explained later on in this guide.
`init_db_pool` accepts the same parameters as the
[psycopg2.connect](http://initd.org/psycopg/docs/module.html#psycopg2.connect) function.
For example, you can pass in a connection string:
```python
init_db_pool("dbname=mydb user=postgres password=temp123!")
```
Alternatively, using the URL syntax:
```python
init_db_pool("postgresql://localhost/mydb?user=postgres&password=temp123!")
```
Or keyword arguments:
```python
init_db_pool(user="postgres", password="temp123!", host="localhost", database="mydb")
```
Next, before you can export an R-tree, you first need to create a few database
tables to store the data. The following section explains how to achieve this.
### Creating Tables to Store R-tree Data
When exporting an R-tree using this library, the data is populated inside three
tables:
* `rtree`: This tables simply contains the ID of each R-tree that was exported.
This library allows you to export multiple R-trees at once, and they are
differentiated by ID (you can also clear the contents of all tables using
`clear_rtree_tables`).
* `rtree_node`: Contains information about each node in the R-tree, including
its bounding box (as a PostGIS geometry column), a pointer to the parent entry
containing this node, and the level of this node (starting at 0 for the root).
The node also contains a reference to the `rtree` that it is a part of.
* `rtree_entry`: Contains information about each entry in the R-tree, including
its bounding box (as a PostGIS geometry column) and a pointer to the node
containing this entry. For leaf entries, this also contains the value of the
data element.
These tables can be created using the `create_rtree_tables` function. This is
something you only need to do once.
This function can be called without any arguments if you have established the
connection pool, and your data does not use a spatial reference system (`srid`).
However, generally when working with spatial data, you will have a particular
SRID that your data is in, in which case you should pass it in to ensure that
all geometry columns use the correct SRID:
```python
create_rtree_tables(srid=4326)
```
You can also choose to create the tables in a different schema (other than `public`):
```python
create_rtree_tables(srid=4326, schema="temp")
```
However, in this case, be sure to pass in the same schema to the other functions in
this module.
You can also pass in a `datatype`, which indicates the type of data stored in the leaf
entries (i.e., the type of the data you pass in to the `insert` method of `RTree`).
This can either be a string containing a PostgreSQL column type:
```python
create_rtree_tables(srid=4326, datatype='VARCHAR(255)')
```
Or a Python type, in which case an appropriate PostgreSQL data type will be inferred:
```python
create_rtree_tables(srid=4326, datatype=int)
```
If you don't pass anything in, or an appropriate PostgreSQL data type cannot be
determined from the Python type, the column type will default to `TEXT`, which allows
storing arbitrary-length strings.
When passing a string containing a PostgreSQL column type, you also have the option
of adding a modifier such as `NOT NULL`, or even a foreign key constraint:
```python
create_rtree_tables(srid=4326, datatype='INT REFERENCES my_other_table (my_id_column)')
```
### Exporting the R-tree
To export the R-tree once the tables have been created, simply call the
`export_to_postgis` function, passing in the R-tree instance (and optionally an SRID):
```python
rtree_id = export_to_postgis(tree, srid=4326)
```
This function populates the `rtree`, `rtree_node`, and `rtree_entry` tables with
the data from the R-tree, and returns the ID of the newly-inserted R-tree in the
`rtree` table.
Note that if you used a schema other than `public` when calling
`create_rtree_tables`, you will need to pass in the same schema when calling
`export_to_postgis`:
```python
rtree_id = export_to_postgis(tree, srid=4326, schema='temp')
```
### Viewing the Data Using QGIS
[QGIS](https://qgis.org/en/site/) is a popular and freely-available GIS viewer which
can be used to visualize the exported R-tree data. To do so, launch QGIS and create
a new project. Then, follow these steps to add the exported R-tree data as a layer:
* Go to Layer → Add Layer → Add PostGIS Layers
* Connect to the database where you exported the data
* Select either the `rtree_node` or `rtree_entry` table, depending on which part of
the structure you wish to visualize. For this example, we will be looking at the
nodes, so select `rtree_node`.
* Optionally, you can set a layer filter to only include the nodes belonging to a
particular tree (if you exported multiple R-trees). To do so, click the
**Set Filter** button, and enter a filter expression (such as `rtree_id=1`).
* Click **Add**
At this point, the layer will be displaying all nodes at every level of the tree,
which may be a bit hard to decipher if you have a lot of data. After adjusting the
layer style to make it partially transparent, here is an example of what an R-tree
with a couple hundred leaf entries might look like (41 nodes across 3 levels):

To make it easier to understand the structure, it might help to be able to view each
level of the tree independently. To do this, double click the layer in the Layers
panel, switch to the Style tab, and change the style type at the top from
"Single symbol" (the default) to "Categorized". Then in the Column dropdown, select
the "level" column. You can optionally assign a color ramp or use random colors so
that each level gets a different color. Then click **Classify** to automatically
create a separate style for each layer:

Now in the layers panel, each level will be shown as a separate entry and can be
toggled on and off, making it possible to explore the R-tree structure one level
at a time:

The advantage with exporting the data to QGIS is you can also bring in your
original dataset as a layer to see how it was partitioned spatially. Further,
you can import multiple R-trees as separate layers and be able to compare them
side by side.
Below, I am using a subset of the FAA airspace data for a portion of the
Northeastern US, and then toggling each level of the `rtree_node` layer individually
so we can examine the resulting R-tree structure one level at a time. After
compositing these together, you can see how the Guttman R-Tree performs against
R*.
**Guttman**:

**R\*-Tree**:

It is evident that R* has resulted in more square-like bounding rectangles with
less overlap at the intermediate levels, compared to Guttman. The areas of overlap
are made especially evident when using a partially transparent fill. Ideally, the
spatial partitioning scheme should aim to minimize this overlap, since a query to
find the leaf entry for a given point would require visiting multiple subtrees if
that point happens to land in one of these darker shaded areas of overlap.
You can also write a query to analyze the amount of overlap that resulted in each
level of the tree. For example, the query below returns the total amount of overlap
area of all nodes at level 2 of an exported R-tree having ID 1:
```postgresql
SELECT ST_Area(ST_Union(ST_Intersection(n1.bbox, n2.bbox))) AS OverlapArea
FROM temp.rtree t
INNER JOIN temp.rtree_node n1 ON n1.rtree_id = t.id
INNER JOIN temp.rtree_node n2 ON n2.rtree_id = t.id AND n1.level = n2.level
WHERE
t.id = 1
AND n1.level = 2
AND ST_Overlaps(n1.bbox, n2.bbox)
AND n1.id <> n2.id;
```
Extending this even further, you can compare the total overlap area of multiple exported
R-trees by level:
```postgresql
SELECT
CASE t.id
WHEN 1 THEN 'Guttman'
WHEN 2 THEN 'R*'
END AS tree,
n.level,
ST_Area(ST_Union(ST_Intersection(n.bbox, n2.bbox))) AS OverlapArea
FROM temp.rtree t
INNER JOIN temp.rtree_node n ON n.rtree_id = t.id
INNER JOIN temp.rtree_node n2 ON n2.rtree_id = t.id AND n.level = n2.level
WHERE
ST_Overlaps(n.bbox, n2.bbox)
AND n.id <> n2.id
GROUP BY
t.id,
n.level
ORDER BY
t.id,
n.level;
```
The above query may return a result like the following:
tree | level | OverlapArea |
--------|-------|-------------|
Guttman | 1 | 7.89e+11 |
Guttman | 2 | 9.12e+11 |
Guttman | 3 | 4.75e+11 |
R* | 1 | 3.97e+11 |
R* | 2 | 4.35e+11 |
R* | 3 | 1.80e+11 |
In the above example, the R*-Tree (`id`=2) achieved a smaller overlap area at
every level of the tree compared to Guttman (`id`=1).
### Cleaning Up
As mentioned above, when you call `export_to_postgis`, the existing data in the
tables is *not* cleared. This allows you to export multiple R-trees at once and
compare them side-by-side.
However, for simplicity, you may wish to clear out the existing data prior to
exporting new data. To do so, call `clear_rtree_tables`:
```python
clear_rtree_tables()
```
This will perform a SQL `TRUNCATE` on all R-tree tables.
Note that if you created the tables in a different schema (other than `public`),
you will need to pass in that same schema to this function:
```python
clear_rtree_tables(schema='temp')
```
You may also wish to completely drop all the tables that were created by
`create_rtree_tables`. To do so, call `drop_rtree_tables`:
```python
drop_rtree_tables()
```
Again, you may need to pass in a schema if it is something other than `public`:
```python
drop_rtree_tables(schema='temp')
```
### Alternate Database Connection Handling Methods
As mentioned earlier in this guide, instead of initializing a connection pool,
you have other options for how to handle establishing database connections when
using this library. You can choose to handle opening and closing the connection
yourself and pass in the connection object; alternatively, you can pass in the
connection information as keyword arguments.
To establish the database connection yourself, the typical usage scenario might
look like this:
```python
import psycopg2
from rtreelib import RTree, Rect
from rtreelib.pg import init_db_pool, create_rtree_tables, clear_rtree_tables, export_to_postgis, drop_rtree_tables
# Create an RTree instance with some sample data
t = RTree(max_entries=4)
t.insert('a', Rect(0, 0, 3, 3))
t.insert('b', Rect(2, 2, 4, 4))
t.insert('c', Rect(1, 1, 2, 4))
t.insert('d', Rect(8, 8, 10, 10))
t.insert('e', Rect(7, 7, 9, 9))
# Export R-tree to PostGIS (using explicit connection)
conn = None
try:
conn = psycopg2.connect(user="postgres", password="temp123!", host="localhost", database="mydb")
create_rtree_tables(conn, schema='temp')
rtree_id = export_to_postgis(t, conn=conn, schema='temp')
print(rtree_id)
finally:
if conn:
conn.close()
```
You can also pass in the database connection information separately to each method as
keyword arguments. These keyword arguments should be the same ones as required by the
[psycopg2.connect](http://initd.org/psycopg/docs/module.html#psycopg2.connect) function:
```python
rtree_id = export_to_postgis(tree, schema='temp', user="postgres", password="temp123!", host="localhost", database="mydb")
```
## References
[1]: Nanopoulos, Alexandros & Papadopoulos, Apostolos (2003):
["R-Trees Have Grown Everywhere"](https://pdfs.semanticscholar.org/4e07/e800fe71505fbad686b08334abb49d41fcda.pdf)
[2]: Guttman, A. (1984):
["R-trees: a Dynamic Index Structure for Spatial Searching"](http://www-db.deis.unibo.it/courses/SI-LS/papers/Gut84.pdf)
(PDF), *Proceedings of the 1984 ACM SIGMOD international conference on Management of data – SIGMOD
'84.* p. 47.
[3]: Beckmann, Norbert, et al.
["The R*-tree: an efficient and robust access method for points and rectangles."](https://infolab.usc.edu/csci599/Fall2001/paper/rstar-tree.pdf)
*Proceedings of the 1990 ACM SIGMOD international conference on Management of data.* 1990.
| /rtreelib-0.2.0.tar.gz/rtreelib-0.2.0/README.md | 0.70028 | 0.99178 | README.md | pypi |
from __future__ import unicode_literals
from .base_node_renderer import BaseNodeRenderer
class BaseBlockRenderer(BaseNodeRenderer):
def render(self, node):
return "<{0}>{1}</{0}>".format(self._render_tag, self._render_content(node))
def _render_content(self, node):
result = []
for c in node["content"]:
renderer = self._find_renderer(c)
if renderer is None:
continue
result.append(renderer.render(c))
return "".join(result)
@property
def _render_tag(self):
return "div"
class HeadingOneRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "h1"
class HeadingTwoRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "h2"
class HeadingThreeRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "h3"
class HeadingFourRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "h4"
class HeadingFiveRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "h5"
class HeadingSixRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "h6"
class ParagraphRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "p"
class OrderedListRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "ol"
class UnorderedListRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "ul"
class ListItemRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "li"
class BlockQuoteRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "blockquote"
class HrRenderer(BaseNodeRenderer):
def render(self, _node):
return "<hr />"
class HyperlinkRenderer(BaseBlockRenderer):
def render(self, node):
return '<a href="{0}">{1}</a>'.format(
node["data"]["uri"], self._render_content(node)
)
class EntryBlockRenderer(BaseNodeRenderer):
def render(self, node):
return "<div>{0}</div>".format(node["data"]["target"])
class AssetHyperlinkRenderer(BaseBlockRenderer):
ANCHOR_HTML = '<a href="{0}">{1}</a>'
def render(self, node):
asset = node["data"]["target"]
# Check by class name instead of instance type to
# avoid dependending on the Contentful SDK.
if asset.__class__.__name__ == "Asset":
return self._render_asset(asset, node)
elif isinstance(asset, dict):
if "fields" not in asset and "file" not in asset.get("fields", {}):
raise Exception("Node target is not an asset - Node: {0}".format(node))
return self._render_hash(asset, node)
else:
raise Exception("Node target is not an asset - Node: {0}".format(node))
def _render_asset(self, asset, node=None):
return self._render(
self.__class__.ANCHOR_HTML,
asset.url(),
node if node is not None else asset.title,
bool(node),
)
def _render_hash(self, asset, node=None):
return self._render(
self.__class__.ANCHOR_HTML,
asset["fields"]["file"]["url"],
node if node is not None else asset["fields"]["title"],
bool(node),
)
def _render(self, markup, url, text, formatted=True):
if formatted:
text = self._render_content(text)
return markup.format(url, text)
class AssetBlockRenderer(AssetHyperlinkRenderer):
IMAGE_HTML = '<img src="{0}" alt="{1}" />'
def _render_asset(self, asset, node=None):
if "contentType" in asset.file and "image" in asset.file["contentType"]:
return self._render(
self.__class__.IMAGE_HTML, asset.url(), asset.title, False
)
return super(AssetBlockRenderer, self)._render_asset(asset)
def _render_hash(self, asset, node=None):
if (
"contentType" in asset["fields"]["file"]
and "image" in asset["fields"]["file"]["contentType"]
):
return self._render(
self.__class__.IMAGE_HTML,
asset["fields"]["file"]["url"],
asset["fields"]["title"],
False,
)
return super(AssetBlockRenderer, self)._render_hash(asset)
class TableCellRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "td"
class TableHeaderCellRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "th"
class TableRowRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "tr"
class TableRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "table" | /rtrpy1-0.2.5.tar.gz/rtrpy1-0.2.5/rich_text_renderer/block_renderers.py | 0.788094 | 0.251119 | block_renderers.py | pypi |
from __future__ import unicode_literals
from .base_node_renderer import BaseNodeRenderer
class BaseBlockRenderer(BaseNodeRenderer):
def render(self, node):
return "<{0}>{1}</{0}>".format(self._render_tag, self._render_content(node))
def _render_content(self, node):
result = []
for c in node["content"]:
renderer = self._find_renderer(c)
if renderer is None:
continue
result.append(renderer.render(c))
return "".join(result)
@property
def _render_tag(self):
return "div"
class HeadingOneRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "h1"
class HeadingTwoRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "h2"
class HeadingThreeRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "h3"
class HeadingFourRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "h4"
class HeadingFiveRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "h5"
class HeadingSixRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "h6"
class ParagraphRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "p"
class OrderedListRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "ol"
class UnorderedListRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "ul"
class ListItemRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "li"
class BlockQuoteRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "blockquote"
class HrRenderer(BaseNodeRenderer):
def render(self, _node):
return "<hr />"
class HyperlinkRenderer(BaseBlockRenderer):
def render(self, node):
return '<a href="{0}">{1}</a>'.format(
node["data"]["uri"], self._render_content(node)
)
class EntryBlockRenderer(BaseNodeRenderer):
def render(self, node):
return "<div>{0}</div>".format(node["data"]["target"])
class AssetHyperlinkRenderer(BaseBlockRenderer):
ANCHOR_HTML = '<a href="{0}">{1}</a>'
def render(self, node):
asset = node["data"]["target"]
# Check by class name instead of instance type to
# avoid dependending on the Contentful SDK.
if asset.__class__.__name__ == "Asset":
return self._render_asset(asset, node)
elif isinstance(asset, dict):
if "fields" not in asset and "file" not in asset.get("fields", {}):
raise Exception("Node target is not an asset - Node: {0}".format(node))
return self._render_hash(asset, node)
else:
raise Exception("Node target is not an asset - Node: {0}".format(node))
def _render_asset(self, asset, node=None):
return self._render(
self.__class__.ANCHOR_HTML,
asset.url(),
node if node is not None else asset.title,
bool(node),
)
def _render_hash(self, asset, node=None):
return self._render(
self.__class__.ANCHOR_HTML,
asset["fields"]["file"]["url"],
node if node is not None else asset["fields"]["title"],
bool(node),
)
def _render(self, markup, url, text, formatted=True):
if formatted:
text = self._render_content(text)
return markup.format(url, text)
class AssetBlockRenderer(AssetHyperlinkRenderer):
IMAGE_HTML = '<img src="{0}" alt="{1}" />'
def _render_asset(self, asset, node=None):
if "contentType" in asset.file and "image" in asset.file["contentType"]:
return self._render(
self.__class__.IMAGE_HTML, asset.url(), asset.title, False
)
return super(AssetBlockRenderer, self)._render_asset(asset)
def _render_hash(self, asset, node=None):
if (
"contentType" in asset["fields"]["file"]
and "image" in asset["fields"]["file"]["contentType"]
):
return self._render(
self.__class__.IMAGE_HTML,
asset["fields"]["file"]["url"],
asset["fields"]["title"],
False,
)
return super(AssetBlockRenderer, self)._render_hash(asset)
class TableCellRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "td"
class TableHeaderCellRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "th"
class TableRowRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "tr"
class TableRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "table" | /rtrpy3-0.2.5.tar.gz/rtrpy3-0.2.5/rich_text_renderer/block_renderers.py | 0.788094 | 0.251119 | block_renderers.py | pypi |
from __future__ import unicode_literals
from .base_node_renderer import BaseNodeRenderer
class BaseBlockRenderer(BaseNodeRenderer):
def render(self, node):
return "<{0}>{1}</{0}>".format(self._render_tag, self._render_content(node))
def _render_content(self, node):
result = []
for c in node["content"]:
renderer = self._find_renderer(c)
if renderer is None:
continue
result.append(renderer.render(c))
return "".join(result)
@property
def _render_tag(self):
return "div"
class HeadingOneRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "h1"
class HeadingTwoRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "h2"
class HeadingThreeRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "h3"
class HeadingFourRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "h4"
class HeadingFiveRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "h5"
class HeadingSixRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "h6"
class ParagraphRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "p"
class OrderedListRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "ol"
class UnorderedListRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "ul"
class ListItemRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "li"
class BlockQuoteRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "blockquote"
class HrRenderer(BaseNodeRenderer):
def render(self, _node):
return "<hr />"
class HyperlinkRenderer(BaseBlockRenderer):
def render(self, node):
return '<a href="{0}">{1}</a>'.format(
node["data"]["uri"], self._render_content(node)
)
class EntryBlockRenderer(BaseNodeRenderer):
def render(self, node):
return "<div>{0}</div>".format(node["data"]["target"])
class AssetHyperlinkRenderer(BaseBlockRenderer):
ANCHOR_HTML = '<a href="{0}">{1}</a>'
def render(self, node):
asset = node["data"]["target"]
# Check by class name instead of instance type to
# avoid dependending on the Contentful SDK.
if asset.__class__.__name__ == "Asset":
return self._render_asset(asset, node)
elif isinstance(asset, dict):
if "fields" not in asset and "file" not in asset.get("fields", {}):
raise Exception("Node target is not an asset - Node: {0}".format(node))
return self._render_hash(asset, node)
else:
raise Exception("Node target is not an asset - Node: {0}".format(node))
def _render_asset(self, asset, node=None):
return self._render(
self.__class__.ANCHOR_HTML,
asset.url(),
node if node is not None else asset.title,
bool(node),
)
def _render_hash(self, asset, node=None):
return self._render(
self.__class__.ANCHOR_HTML,
asset["fields"]["file"]["url"],
node if node is not None else asset["fields"]["title"],
bool(node),
)
def _render(self, markup, url, text, formatted=True):
if formatted:
text = self._render_content(text)
return markup.format(url, text)
class AssetBlockRenderer(AssetHyperlinkRenderer):
IMAGE_HTML = '<img src="{0}" alt="{1}" />'
def _render_asset(self, asset, node=None):
if "contentType" in asset.file and "image" in asset.file["contentType"]:
return self._render(
self.__class__.IMAGE_HTML, asset.url(), asset.title, False
)
return super(AssetBlockRenderer, self)._render_asset(asset)
def _render_hash(self, asset, node=None):
if (
"contentType" in asset["fields"]["file"]
and "image" in asset["fields"]["file"]["contentType"]
):
return self._render(
self.__class__.IMAGE_HTML,
asset["fields"]["file"]["url"],
asset["fields"]["title"],
False,
)
return super(AssetBlockRenderer, self)._render_hash(asset)
class TableCellRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "td"
class TableHeaderCellRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "th"
class TableRowRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "tr"
class TableRenderer(BaseBlockRenderer):
@property
def _render_tag(self):
return "table" | /rtrpy4-0.2.5-py3-none-any.whl/rich_text_renderer/block_renderers.py | 0.788094 | 0.251119 | block_renderers.py | pypi |
from sqlalchemy import create_engine
from sqlalchemy import Table, Column, Integer, DateTime, String, MetaData, ForeignKey
from datetime import datetime
from sqlalchemy.orm import mapper, sessionmaker
import datetime
class ClientDB:
"""
Класс для описания локальной БД клиента. Состоит из таблиц:
KnownUsers - список известных пользователей
Messages - локальное хранилище сообщений
"""
class KnownUsers:
def __init__(self, name):
self.name = name
def __repr__(self):
return f'User: {self.name}'
# Таблица с сообщениями
class Messages:
def __init__(self, mdatetime, user_id, direction, message):
self.mdatetime = mdatetime
self.user_id = user_id
self.direction = direction
self.message = message
# Тут можно подойти творчески, добавить признаки доставки сообщения, прочтения сообщения,
# отложенного сообщения, доставки сообщения в строго определенное время, ...
def __init__(self, user_name):
self.sql_engine = None
try:
self.sql_engine = create_engine(f'sqlite:///client_{user_name}.db3',
pool_recycle=3600,
connect_args={'check_same_thread': False})
except Exception as e:
print(e)
self.metadata = MetaData()
known_users = Table('known_users', self.metadata,
Column('id', Integer, primary_key=True),
Column('name', String, nullable=False, unique=True)
)
messages = Table('messages', self.metadata,
Column('id', Integer, primary_key=True),
Column('mdatetime', DateTime, default=datetime.datetime.now()),
Column('user_id', ForeignKey('known_users.id')),
Column('direction', Integer), # In = 1, Out = 2
Column('message', String)
)
self.metadata.create_all(self.sql_engine)
mapper(self.KnownUsers, known_users)
mapper(self.Messages, messages)
Session = sessionmaker(bind=self.sql_engine)
self.session = Session()
# Добавяляем пользователя в список известных пользователей
def append_user(self, name_user):
if not self.session.query(self.KnownUsers).filter_by(name=name_user).count():
known_users_row = self.KnownUsers(name_user)
self.session.add(known_users_row)
self.session.commit()
# Получить id пользователя по его имени из локальной (!) БД
def get_user_id(self, name_user):
"""
Возвращает id пользователя по его имени
:param name_user: Имя пользователя
:return: Id пользователя
"""
return self.session.query(self.KnownUsers.id).filter_by(name=name_user)
def delete_known_user(self, name_user):
self.session.query(self.KnownUsers).filter_by(name=name_user).delete()
self.session.commit()
# Здесь важный нюанс! Если удаляем из known_users пользователя, удалять ли каскадно его сообщения?
# Получается, нарушена ссылочная целостность
# С другой стороны, все эти сообщения есть на сервере и их можно (при улучшении интерфейса) с сервера запросить
def append_message(self, name_user, direction, message):
user_id = self.get_user_id(name_user)
messages_row = self.Messages(datetime.datetime.now(), user_id, direction, message)
self.session.add(messages_row)
self.session.commit() | /rtrv_my_chat_client-1.0.5-py3-none-any.whl/client/client_db.py | 0.436742 | 0.238389 | client_db.py | pypi |
from copy import deepcopy
from rtsgame.src.Server import Map
from rtsgame.src.utility.constants import *
class WorldState:
def __init__(self, game_mode='Singleplayer'):
self.game_mode = game_mode
self.entity = {}
self.movable_entities = set()
self.enemies = set()
self.projectiles = set()
self.dead_entities = []
self.map = Map.Map(width=WIDTH, height=HEIGHT, max_rooms=ROOMS,
min_room_len=MIN_ROOM_LEN,
max_room_len=MAX_ROOM_LEN,
random_connections=RANDOM_CONNECTIONS)
self.first_player_glare = deepcopy(self.map.level)
self.second_player_glare = deepcopy(self.map.level)
self.first_player_id = 0
self.second_player_id = 1
def get_game_mode(self):
return self.game_mode
def set_game_mode(self, game_mode):
self.game_mode = game_mode
if game_mode == 'Singleplayer':
self.player_dead = [False]
else:
self.player_dead = [False, False]
def get_first_player_id(self):
return self.first_player_id
def get_second_player_id(self):
return self.second_player_id
def get_position(self, entity_id):
return (
self.get_box(entity_id).centerx, self.get_box(entity_id).centery)
def get_direction(self, entity_id):
return self.entity[entity_id].get_direction()
def get_velocity(self, entity_id):
return self.entity[entity_id].get_velocity()
def get_damage(self, entity_id):
return self.entity[entity_id].get_damage()
def get_health(self, entity_id):
return self.entity[entity_id].get_health()
def get_box(self, entity_id):
return self.entity[entity_id].get_box()
def get_last_attack(self, entity_id):
return self.entity[entity_id].get_last_attack()
def get_attack_reload(self, entity_id):
return self.entity[entity_id].get_attack_reload()
def set_last_attack(self, entity_id, last_attack):
self.entity[entity_id].set_last_attack(last_attack)
def set_attack_reload(self, entity_id, attack_reload):
self.entity[entity_id].set_attack_reload(attack_reload)
def set_box(self, entity_id, box):
self.entity[entity_id].set_box(box)
def set_position(self, entity_id, position):
self.entity[entity_id].set_position(position)
def set_direction(self, entity_id, direction):
self.entity[entity_id].set_direction(direction)
def set_velocity(self, entity_id, velocity):
self.entity[entity_id].set_velocity(velocity)
def set_damage(self, entity_id, damage):
self.entity[entity_id].set_damage(damage)
def set_health(self, entity_id, health):
self.entity[entity_id].set_health(health)
def delete_entity(self, entity_id):
self.movable_entities.discard(entity_id)
del self.entity[entity_id]
self.enemies.discard(entity_id)
self.projectiles.discard(entity_id)
world = WorldState()
def recreate_world():
global world
world = WorldState() | /src/Server/WorldState.py | 0.541166 | 0.158012 | WorldState.py | pypi |
from math import sin, cos, inf
from rtsgame.src.Server.Entity import PlayerEntity, Enemy
from rtsgame.src.Server.WorldState import world
from rtsgame.src.utility.constants import *
class GeometrySystem:
def get_visible_tiles(self, entity_id):
# Обновлять glare для правильного игрока
glare_map = None
visible_tiles = []
if entity_id == world.first_player_id:
glare_map = world.first_player_glare
if entity_id == world.second_player_id:
glare_map = world.second_player_id
for i in range(360):
deg = i * 3.1415 / 180
x0 = world.get_box(entity_id).centerx
y0 = world.get_box(entity_id).centery
x = round(cos(deg) * VISION_RANGE) + world.get_box(
entity_id).centerx
y = round(sin(deg) * VISION_RANGE) + world.get_box(
entity_id).centery
diag_dist = max(abs(x - x0), abs(y - y0))
for j in range(diag_dist):
tx = round(x0 + (j / diag_dist) * (x - x0))
ty = round(y0 + (j / diag_dist) * (y - y0))
if (tx < 0 or tx >= world.map.width * MAP_SCALE) or (
ty < 0 or ty >= world.map.height * MAP_SCALE):
break
if world.map.level[tx][ty] == WALL:
visible_tiles.append((tx, ty))
break
visible_tiles.append((tx, ty))
if glare_map is not None:
glare_map[tx][ty] = 1
return visible_tiles
@staticmethod
def get_squared_distance(entity_id1, entity_id2):
return (world.get_box(entity_id1).centerx - world.get_box(entity_id2).centerx) ** 2 + \
(world.get_box(entity_id1).centery - world.get_box(entity_id2).centery) ** 2
# TODO: нормальная система зрения
@staticmethod
def _is_visible(position1: tuple, position2: tuple) -> bool:
return ((position1[0] - position2[0]) ** 2 + (
position1[1] - position2[1]) ** 2) ** 0.5 < VISION_RANGE
@staticmethod
def _is_attackable(position1: tuple, position2: tuple) -> bool:
return ((position1[0] - position2[0]) ** 2 + (
position1[1] - position2[1]) ** 2) ** 0.5 < ATTACK_RANGE
def get_visible_entities(self, entity_id) -> list:
entities_id = []
for other_entity_id in world.entity.keys():
if self._is_visible(world.get_position(entity_id),
world.get_position(other_entity_id)):
entities_id.append(other_entity_id)
return entities_id
def get_attackable_entites(self, entity_id) -> list:
entities_id = []
if isinstance(world.entity[entity_id], PlayerEntity):
EnemyClass = Enemy
if isinstance(world.entity[entity_id], Enemy):
EnemyClass = PlayerEntity
for other_entity_id in world.entity.keys():
if isinstance(world.entity[other_entity_id], EnemyClass):
if self._is_attackable(world.get_position(entity_id), world.get_position(other_entity_id)) \
and other_entity_id != entity_id:
entities_id.append(other_entity_id)
return entities_id
def generate_npc_movement(self, npc_id):
if world.entity[npc_id].get_aim() is None:
return (0, 0)
target = world.entity[npc_id].get_aim()
current = world.get_position(npc_id)
directions = [(0, 0), (0, 1), (1, 1), (1, 0), (-1, 0), (-1, -1), (0, -1), (1, -1), (-1, 1)]
ans = (0, 0)
min_dist = 10 ** 10
for direction in directions:
new_cur = (current[0] + direction[0], current[1] + direction[1])
if ((new_cur[0] - target[0]) ** 2 + (new_cur[1] - target[1]) ** 2) < min_dist:
min_dist = (new_cur[0] - target[0]) ** 2 + (new_cur[1] - target[1]) ** 2
ans = direction
return ans
# Провекра пересечения хитбоксов
@staticmethod
def collide(box1, box2):
return box1.colliderect(box2)
@staticmethod
def collide_with_wall(box): # Саша проверь
non_passable_textures = {WALL, STONE}
return world.map.get(
*box.topleft) in non_passable_textures or world.map.get(
*box.topright) \
in non_passable_textures or world.map.get(
*box.bottomleft) in non_passable_textures or \
world.map.get(*box.bottomright) in non_passable_textures
def find_aim(self, entity_id):
if world.game_mode == "Multiplayer":
if not world.player_dead[world.get_first_player_id()]:
dist_to_first_player = self.get_squared_distance(entity_id, world.get_first_player_id())
else:
dist_to_first_player = inf
if not world.player_dead[world.get_second_player_id()]:
dist_to_second_player = self.get_squared_distance(entity_id, world.get_second_player_id())
else:
dist_to_second_player = inf
dist_to_aim = min(dist_to_first_player, dist_to_second_player)
if dist_to_first_player < dist_to_second_player:
probable_aim = world.get_box(world.get_first_player_id()).center
else:
probable_aim = world.get_box(world.get_second_player_id()).center
if dist_to_aim < VISION_RANGE ** 2:
world.entity[entity_id].set_aim(probable_aim)
if world.game_mode == "Singleplayer":
dist_to_first_player = self.get_squared_distance(entity_id, world.get_first_player_id())
if dist_to_first_player < VISION_RANGE ** 2:
world.entity[entity_id].set_aim(world.get_box(world.get_first_player_id()).center) | /src/Server/GeometrySystem.py | 0.416085 | 0.341912 | GeometrySystem.py | pypi |
from ..utility.constants import *
class Entity:
def __init__(self):
self._id = None
self.box = None # box - это хитбокс (pygame.rect для готовой геометрии)
self._type = "entity"
def accept(self, visitor):
raise NotImplementedError
def set_id(self, id):
self._id = id
return self
def set_position(self, position):
self._position = position
return self
def get_position(self):
return self._position
def get_id(self):
return self._id
def set_box(self, box):
self.box = box
return self
def get_box(self):
return self.box
def get_type(self):
return self._type
class MovableEntity(Entity):
def __init__(self):
super().__init__()
self._velocity = None
self._direction = None
def set_velocity(self, velocity):
self._velocity = velocity
return self
def set_direction(self, direction):
self._direction = direction
return self
def get_velocity(self):
return self._velocity
def get_direction(self):
return self._direction
def move(self):
dx = self._direction[0] * self._velocity
dy = self._direction[1] * self._velocity
self.box.move_ip(dx, dy)
class Enemy(MovableEntity):
def __init__(self):
super().__init__()
self._health = None
self._damage = None
self._attack_reload = None
self._last_attack = None
self._aim_position = None
def set_health(self, health):
self._health = health
return self
def set_last_attack(self, last_attack):
self._last_attack = last_attack
return self
def get_last_attack(self):
return self._last_attack
def set_attack_reload(self, attack_reload):
self._attack_reload = attack_reload
return self
def set_damage(self, damage):
self._damage = damage
return self
def get_attack_reload(self):
return self._attack_reload
def get_health(self):
return self._health
def get_damage(self):
return self._damage
def attack(self):
raise NotImplementedError
def get_aim(self):
return self._aim_position
def set_aim(self, position):
self._aim_position = position
class Projectile(MovableEntity):
def __init__(self):
super().__init__()
self._damage = None
self._health = 0
def accept(self, visitor):
return visitor.visit_projectile(self)
def set_damage(self, damage):
self._damage = damage
return self
def get_damage(self):
return self._damage
def get_health(self):
return self._health
# TODO MAKE LOADING ANIMATIONS MORE ROBUST
class MeleeEnemy(Enemy):
def __init__(self):
super().__init__()
self._type = 'melee'
def accept(self, visitor):
return visitor.visit_melee_enemy(self)
def attack(self):
pass
class RangedEnemy(Enemy):
def __init__(self):
super().__init__()
def accept(self, visitor):
return visitor.visit_ranged_enemy(self)
def attack(self):
pass
class PlayerEntity(MovableEntity):
def __init__(self):
super().__init__()
self._type = "player"
self._damage = PLAYER_START_DAMAGE
self._health = PLAYER_HEALTH
self._attack_reload = None
self._last_attack = None
def set_type(self, type):
self._type = type
return self
def get_type(self):
return self._type
def accept(self, visitor):
return visitor.visit_player(
self)
def set_last_attack(self, last_attack):
self._last_attack = last_attack
return self
def get_last_attack(self):
return self._last_attack
def set_attack_reload(self, attack_reload):
self._attack_reload = attack_reload
return self
def get_attack_reload(self):
return self._attack_reload
def set_damage(self, damage):
self._damage = damage
return self
def get_damage(self):
return self._damage
def get_health(self):
return self._health
def set_health(self, health):
self._health = health
return self | /src/Server/Entity.py | 0.500244 | 0.264868 | Entity.py | pypi |
import pygame
from rtsgame.src.Client.Sprite import Sprite
from rtsgame.src.utility.constants import WALL, STONE, FLOOR, PIXEL_SCALE
from rtsgame.src.utility.utilities import Vector
_surfaces = {
WALL: {
0: pygame.Surface((PIXEL_SCALE, PIXEL_SCALE)),
1: pygame.Surface((PIXEL_SCALE, PIXEL_SCALE))
},
FLOOR: {
0: pygame.Surface((PIXEL_SCALE, PIXEL_SCALE)),
1: pygame.Surface((PIXEL_SCALE, PIXEL_SCALE))
},
STONE: {
0: pygame.Surface((PIXEL_SCALE, PIXEL_SCALE)),
1: pygame.Surface((PIXEL_SCALE, PIXEL_SCALE))
}
}
_surfaces[WALL][0].fill((100, 100, 100))
_surfaces[WALL][1].fill((100, 100, 100))
_surfaces[FLOOR][0].fill((100, 0, 0))
_surfaces[FLOOR][1].fill((100, 0, 0))
_surfaces[STONE][0].fill((100, 100, 0))
_surfaces[STONE][1].fill((100, 100, 0))
pygame.draw.rect(_surfaces[WALL][1], (255, 0, 0),
(0, 0, PIXEL_SCALE, PIXEL_SCALE))
pygame.draw.rect(_surfaces[FLOOR][1], (255, 0, 0),
(0, 0, PIXEL_SCALE, PIXEL_SCALE))
pygame.draw.rect(_surfaces[STONE][1], (255, 0, 0),
(0, 0, PIXEL_SCALE, PIXEL_SCALE))
_surfaces[WALL][0] = _surfaces[WALL][0].convert_alpha()
_surfaces[WALL][1] = _surfaces[WALL][1].convert_alpha()
_surfaces[FLOOR][0] = _surfaces[FLOOR][0].convert_alpha()
_surfaces[FLOOR][1] = _surfaces[FLOOR][1].convert_alpha()
_surfaces[STONE][0] = _surfaces[STONE][0].convert_alpha()
_surfaces[STONE][1] = _surfaces[STONE][1].convert_alpha()
class TileSprite(Sprite):
def __init__(self, tile_type, col, row, tile_state=0):
super().__init__()
self.tile_type = tile_type
self.tile_state = tile_state
self.col = col
self.row = row
self.box = pygame.Rect(self.row * PIXEL_SCALE, self.col * PIXEL_SCALE,
PIXEL_SCALE, PIXEL_SCALE)
def set_visible(self):
self.tile_state = 1
@property
def x(self):
return self.box.left
@property
def y(self):
return self.box.top
@property
def position(self):
return Vector(*self.box.topleft)
@property
def sprite(self):
return _surfaces[self.tile_type][self.tile_state]
def draw(self, surface: pygame.Surface, abs_position: Vector):
surface.blit(self.sprite, abs_position)
super().draw(surface, abs_position) | /src/Client/TileSprite.py | 0.664323 | 0.267307 | TileSprite.py | pypi |
import pygame
import os
from rtsgame.src.utility.utilities import join_paths, Vector
class Animation:
def __init__(self, filenames, transforms=None, offset=None):
if transforms is None:
transforms = Transforms()
if offset is not None:
offset = Vector(*offset)
self.offset = offset
dirname = os.path.dirname(__file__)
images = [pygame.image.load(os.path.join(dirname, "../../"+f)) for f in filenames]
self.images = [transforms(img).convert_alpha() for img in images]
def __getitem__(self, item: int) -> pygame.Surface:
return self.images[item]
def __len__(self):
return len(self.images)
def parse_descriptions(descriptions):
animations = {
row['name']: Animation(join_paths(row['folder'], row['sprites'], ),
transforms=get_transforms(row),
offset=row['offset'])
for row in descriptions}
return animations
class Transform:
pass
class Transforms(Transform):
def __init__(self, transforms=None):
if transforms is None:
transforms = []
self.transforms = transforms
def append(self, transform):
self.transforms.append(transform)
def __call__(self, image: pygame.Surface):
for transform in self.transforms:
image = transform(image)
return image
class Flip(Transform):
def __init__(self, flip_x, flip_y):
self.flip_x = flip_x
self.flip_y = flip_y
def __call__(self, image):
return pygame.transform.flip(image, self.flip_x, self.flip_y)
class Scale(Transform):
def __init__(self, factor):
self.factor = factor
def __call__(self, img):
return pygame.transform.scale(img, img.get_height() * self.factor,
img.get_width() * self.factor)
class Resize(Transforms):
def __init__(self, size):
super().__init__()
self.size = size
def __call__(self, img):
return pygame.transform.scale(img, self.size)
def get_transforms(config_row):
transforms = Transforms()
# Flip transform
if 'flip_x' in config_row or 'flip_y' in config_row:
transforms.append(Flip(config_row.get('flip_x', False),
config_row.get('flip_y', False)))
# Resize transform
if 'resize' in config_row:
transforms.append(Resize(config_row['resize']))
return transforms | /src/Client/Animation.py | 0.579876 | 0.314544 | Animation.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.