repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
beregond/super_state_machine
|
super_state_machine/utils.py
|
generate_checker
|
python
|
def generate_checker(value):
@property
@wraps(can_be_)
def checker(self):
return self.can_be_(value)
return checker
|
Generate state checker for given value.
|
train
|
https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/utils.py#L74-L81
| null |
"""Utilities for core."""
from enum import Enum, unique
from functools import wraps
from .errors import TransitionError
def is_(self, state):
"""Check if machine is in given state."""
translator = self._meta['translator']
state = translator.translate(state)
return self.actual_state == state
def can_be_(self, state):
"""Check if machine can transit to given state."""
translator = self._meta['translator']
state = translator.translate(state)
if self._meta['complete']:
return True
if self.actual_state is None:
return True
transitions = self._meta['transitions'][self.actual_state]
return state in transitions
def force_set(self, state):
"""Set new state without checking if transition is allowed."""
translator = self._meta['translator']
state = translator.translate(state)
attr = self._meta['state_attribute_name']
setattr(self, attr, state)
def set_(self, state):
"""Set new state for machine."""
if not self.can_be_(state):
state = self._meta['translator'].translate(state)
raise TransitionError(
"Cannot transit from '{actual_value}' to '{value}'."
.format(actual_value=self.actual_state.value, value=state.value)
)
self.force_set(state)
def state_getter(self):
"""Get actual state as value."""
try:
return self.actual_state.value
except AttributeError:
return None
def state_setter(self, value):
"""Set new state for machine."""
self.set_(value)
def generate_getter(value):
"""Generate getter for given value."""
@property
@wraps(is_)
def getter(self):
return self.is_(value)
return getter
def generate_setter(value):
"""Generate setter for given value."""
@wraps(set_)
def setter(self):
self.set_(value)
return setter
state_property = property(state_getter, state_setter)
@property
def actual_state(self):
"""Actual state as `None` or `enum` instance."""
attr = self._meta['state_attribute_name']
return getattr(self, attr)
@property
def as_enum(self):
"""Return actual state as enum."""
return self.actual_state
class EnumValueTranslator(object):
"""Helps to find enum element by its value."""
def __init__(self, base_enum):
"""Init.
:param enum base_enum: Enum, to which elements values are translated.
"""
base_enum = unique(base_enum)
self.base_enum = base_enum
self.generate_search_table()
def generate_search_table(self):
self.search_table = dict(
(item.value, item) for item in list(self.base_enum)
)
def translate(self, value):
"""Translate value to enum instance.
If value is already enum instance, check if this value belongs to base
enum.
"""
if self._check_if_already_proper(value):
return value
try:
return self.search_table[value]
except KeyError:
raise ValueError("Value {value} doesn't match any state.".format(
value=value
))
def _check_if_already_proper(self, value):
if isinstance(value, Enum):
if value in self.base_enum:
return True
raise ValueError(
"Given value ('{value}') doesn't belong to states enum."
.format(value=value)
)
return False
|
beregond/super_state_machine
|
super_state_machine/utils.py
|
generate_setter
|
python
|
def generate_setter(value):
@wraps(set_)
def setter(self):
self.set_(value)
return setter
|
Generate setter for given value.
|
train
|
https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/utils.py#L84-L90
| null |
"""Utilities for core."""
from enum import Enum, unique
from functools import wraps
from .errors import TransitionError
def is_(self, state):
"""Check if machine is in given state."""
translator = self._meta['translator']
state = translator.translate(state)
return self.actual_state == state
def can_be_(self, state):
"""Check if machine can transit to given state."""
translator = self._meta['translator']
state = translator.translate(state)
if self._meta['complete']:
return True
if self.actual_state is None:
return True
transitions = self._meta['transitions'][self.actual_state]
return state in transitions
def force_set(self, state):
"""Set new state without checking if transition is allowed."""
translator = self._meta['translator']
state = translator.translate(state)
attr = self._meta['state_attribute_name']
setattr(self, attr, state)
def set_(self, state):
"""Set new state for machine."""
if not self.can_be_(state):
state = self._meta['translator'].translate(state)
raise TransitionError(
"Cannot transit from '{actual_value}' to '{value}'."
.format(actual_value=self.actual_state.value, value=state.value)
)
self.force_set(state)
def state_getter(self):
"""Get actual state as value."""
try:
return self.actual_state.value
except AttributeError:
return None
def state_setter(self, value):
"""Set new state for machine."""
self.set_(value)
def generate_getter(value):
"""Generate getter for given value."""
@property
@wraps(is_)
def getter(self):
return self.is_(value)
return getter
def generate_checker(value):
"""Generate state checker for given value."""
@property
@wraps(can_be_)
def checker(self):
return self.can_be_(value)
return checker
state_property = property(state_getter, state_setter)
@property
def actual_state(self):
"""Actual state as `None` or `enum` instance."""
attr = self._meta['state_attribute_name']
return getattr(self, attr)
@property
def as_enum(self):
"""Return actual state as enum."""
return self.actual_state
class EnumValueTranslator(object):
"""Helps to find enum element by its value."""
def __init__(self, base_enum):
"""Init.
:param enum base_enum: Enum, to which elements values are translated.
"""
base_enum = unique(base_enum)
self.base_enum = base_enum
self.generate_search_table()
def generate_search_table(self):
self.search_table = dict(
(item.value, item) for item in list(self.base_enum)
)
def translate(self, value):
"""Translate value to enum instance.
If value is already enum instance, check if this value belongs to base
enum.
"""
if self._check_if_already_proper(value):
return value
try:
return self.search_table[value]
except KeyError:
raise ValueError("Value {value} doesn't match any state.".format(
value=value
))
def _check_if_already_proper(self, value):
if isinstance(value, Enum):
if value in self.base_enum:
return True
raise ValueError(
"Given value ('{value}') doesn't belong to states enum."
.format(value=value)
)
return False
|
beregond/super_state_machine
|
super_state_machine/utils.py
|
EnumValueTranslator.translate
|
python
|
def translate(self, value):
if self._check_if_already_proper(value):
return value
try:
return self.search_table[value]
except KeyError:
raise ValueError("Value {value} doesn't match any state.".format(
value=value
))
|
Translate value to enum instance.
If value is already enum instance, check if this value belongs to base
enum.
|
train
|
https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/utils.py#L127-L142
|
[
"def _check_if_already_proper(self, value):\n if isinstance(value, Enum):\n if value in self.base_enum:\n return True\n raise ValueError(\n \"Given value ('{value}') doesn't belong to states enum.\"\n .format(value=value)\n )\n return False\n"
] |
class EnumValueTranslator(object):
"""Helps to find enum element by its value."""
def __init__(self, base_enum):
"""Init.
:param enum base_enum: Enum, to which elements values are translated.
"""
base_enum = unique(base_enum)
self.base_enum = base_enum
self.generate_search_table()
def generate_search_table(self):
self.search_table = dict(
(item.value, item) for item in list(self.base_enum)
)
def _check_if_already_proper(self, value):
if isinstance(value, Enum):
if value in self.base_enum:
return True
raise ValueError(
"Given value ('{value}') doesn't belong to states enum."
.format(value=value)
)
return False
|
beregond/super_state_machine
|
super_state_machine/machines.py
|
StateMachineMetaclass._set_up_context
|
python
|
def _set_up_context(cls):
cls.context = AttributeDict()
cls.context.new_meta = {}
cls.context.new_transitions = {}
cls.context.new_methods = {}
|
Create context to keep all needed variables in.
|
train
|
https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/machines.py#L64-L69
| null |
class StateMachineMetaclass(type):
"""Metaclass for state machine, to build all its logic."""
def __new__(cls, name, bases, attrs):
"""Create state machine and add all logic and methods to it."""
cls._set_up_context()
new_class = super(cls, cls).__new__(cls, name, bases, attrs)
cls.context.new_class = new_class
parents = [b for b in bases if isinstance(b, cls)]
if not parents:
return cls.context.new_class
cls._set_up_config_getter()
cls._check_states_enum()
cls._check_if_states_are_strings()
cls._set_up_translator()
cls._calculate_state_name()
cls._check_state_value()
cls._add_standard_attributes()
cls._generate_standard_transitions()
cls._generate_standard_methods()
cls._generate_named_checkers()
cls._generate_named_transitions()
cls._add_new_methods()
cls._set_complete_option()
cls._complete_meta_for_new_class()
new_class = cls.context.new_class
del cls.context
return new_class
@classmethod
@classmethod
def _check_states_enum(cls):
"""Check if states enum exists and is proper one."""
states_enum_name = cls.context.get_config('states_enum_name')
try:
cls.context['states_enum'] = getattr(
cls.context.new_class, states_enum_name)
except AttributeError:
raise ValueError('No states enum given!')
proper = True
try:
if not issubclass(cls.context.states_enum, Enum):
proper = False
except TypeError:
proper = False
if not proper:
raise ValueError(
'Please provide enum instance to define available states.')
@classmethod
def _check_if_states_are_strings(cls):
"""Check if all states are strings."""
for item in list(cls.context.states_enum):
if not isinstance(item.value, six.string_types):
raise ValueError(
'Item {name} is not string. Only strings are allowed.'
.format(name=item.name)
)
@classmethod
def _check_state_value(cls):
"""Check initial state value - if is proper and translate it.
Initial state is required.
"""
state_value = cls.context.get_config('initial_state', None)
state_value = state_value or getattr(
cls.context.new_class, cls.context.state_name, None
)
if not state_value:
raise ValueError(
"Empty state is disallowed, yet no initial state is given!"
)
state_value = (
cls.context
.new_meta['translator']
.translate(state_value)
)
cls.context.state_value = state_value
@classmethod
def _add_standard_attributes(cls):
"""Add attributes common to all state machines.
These are methods for setting and checking state etc.
"""
setattr(
cls.context.new_class,
cls.context.new_meta['state_attribute_name'],
cls.context.state_value)
setattr(
cls.context.new_class,
cls.context.state_name,
utils.state_property)
setattr(cls.context.new_class, 'is_', utils.is_)
setattr(cls.context.new_class, 'can_be_', utils.can_be_)
setattr(cls.context.new_class, 'set_', utils.set_)
@classmethod
def _generate_standard_transitions(cls):
"""Generate methods used for transitions."""
allowed_transitions = cls.context.get_config('transitions', {})
for key, transitions in allowed_transitions.items():
key = cls.context.new_meta['translator'].translate(key)
new_transitions = set()
for trans in transitions:
if not isinstance(trans, Enum):
trans = cls.context.new_meta['translator'].translate(trans)
new_transitions.add(trans)
cls.context.new_transitions[key] = new_transitions
for state in cls.context.states_enum:
if state not in cls.context.new_transitions:
cls.context.new_transitions[state] = set()
@classmethod
def _generate_standard_methods(cls):
"""Generate standard setters, getters and checkers."""
for state in cls.context.states_enum:
getter_name = 'is_{name}'.format(name=state.value)
cls.context.new_methods[getter_name] = utils.generate_getter(state)
setter_name = 'set_{name}'.format(name=state.value)
cls.context.new_methods[setter_name] = utils.generate_setter(state)
checker_name = 'can_be_{name}'.format(name=state.value)
checker = utils.generate_checker(state)
cls.context.new_methods[checker_name] = checker
cls.context.new_methods['actual_state'] = utils.actual_state
cls.context.new_methods['as_enum'] = utils.as_enum
cls.context.new_methods['force_set'] = utils.force_set
@classmethod
def _generate_named_checkers(cls):
named_checkers = cls.context.get_config('named_checkers', None) or []
for method, key in named_checkers:
if method in cls.context.new_methods:
raise ValueError(
"Name collision for named checker '{checker}' - this "
"name is reserved for other auto generated method."
.format(checker=method)
)
key = cls.context.new_meta['translator'].translate(key)
cls.context.new_methods[method] = utils.generate_checker(key.value)
@classmethod
def _generate_named_transitions(cls):
named_transitions = (
cls.context.get_config('named_transitions', None) or [])
translator = cls.context.new_meta['translator']
for item in named_transitions:
method, key, from_values = cls._unpack_named_transition_tuple(item)
if method in cls.context.new_methods:
raise ValueError(
"Name collision for transition '{transition}' - this name "
"is reserved for other auto generated method."
.format(transition=method)
)
key = translator.translate(key)
cls.context.new_methods[method] = utils.generate_setter(key)
if from_values:
from_values = [translator.translate(k) for k in from_values]
for s in cls.context.states_enum:
if s in from_values:
cls.context.new_transitions[s].add(key)
@classmethod
def _unpack_named_transition_tuple(cls, item):
try:
method, key = item
from_values = cls.context['states_enum']
except ValueError:
method, key, from_values = item
if from_values is None:
from_values = []
if not isinstance(from_values, list):
from_values = list((from_values,))
return method, key, from_values
@classmethod
def _add_new_methods(cls):
"""Add all generated methods to result class."""
for name, method in cls.context.new_methods.items():
if hasattr(cls.context.new_class, name):
raise ValueError(
"Name collision in state machine class - '{name}'."
.format(name)
)
setattr(cls.context.new_class, name, method)
@classmethod
def _set_complete_option(cls):
"""Check and set complete option."""
get_config = cls.context.get_config
complete = get_config('complete', None)
if complete is None:
conditions = [
get_config('transitions', False),
get_config('named_transitions', False),
]
complete = not any(conditions)
cls.context.new_meta['complete'] = complete
@classmethod
def _set_up_config_getter(cls):
meta = getattr(cls.context.new_class, 'Meta', DefaultMeta)
cls.context.get_config = partial(get_config, meta)
@classmethod
def _set_up_translator(cls):
translator = utils.EnumValueTranslator(cls.context['states_enum'])
cls.context.new_meta['translator'] = translator
@classmethod
def _calculate_state_name(cls):
cls.context.state_name = 'state'
new_state_name = '_' + cls.context.state_name
cls.context.new_meta['state_attribute_name'] = new_state_name
@classmethod
def _complete_meta_for_new_class(cls):
cls.context.new_meta['transitions'] = cls.context.new_transitions
cls.context.new_meta['config_getter'] = cls.context['get_config']
setattr(cls.context.new_class, '_meta', cls.context['new_meta'])
|
beregond/super_state_machine
|
super_state_machine/machines.py
|
StateMachineMetaclass._check_states_enum
|
python
|
def _check_states_enum(cls):
states_enum_name = cls.context.get_config('states_enum_name')
try:
cls.context['states_enum'] = getattr(
cls.context.new_class, states_enum_name)
except AttributeError:
raise ValueError('No states enum given!')
proper = True
try:
if not issubclass(cls.context.states_enum, Enum):
proper = False
except TypeError:
proper = False
if not proper:
raise ValueError(
'Please provide enum instance to define available states.')
|
Check if states enum exists and is proper one.
|
train
|
https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/machines.py#L72-L90
| null |
class StateMachineMetaclass(type):
"""Metaclass for state machine, to build all its logic."""
def __new__(cls, name, bases, attrs):
"""Create state machine and add all logic and methods to it."""
cls._set_up_context()
new_class = super(cls, cls).__new__(cls, name, bases, attrs)
cls.context.new_class = new_class
parents = [b for b in bases if isinstance(b, cls)]
if not parents:
return cls.context.new_class
cls._set_up_config_getter()
cls._check_states_enum()
cls._check_if_states_are_strings()
cls._set_up_translator()
cls._calculate_state_name()
cls._check_state_value()
cls._add_standard_attributes()
cls._generate_standard_transitions()
cls._generate_standard_methods()
cls._generate_named_checkers()
cls._generate_named_transitions()
cls._add_new_methods()
cls._set_complete_option()
cls._complete_meta_for_new_class()
new_class = cls.context.new_class
del cls.context
return new_class
@classmethod
def _set_up_context(cls):
"""Create context to keep all needed variables in."""
cls.context = AttributeDict()
cls.context.new_meta = {}
cls.context.new_transitions = {}
cls.context.new_methods = {}
@classmethod
@classmethod
def _check_if_states_are_strings(cls):
"""Check if all states are strings."""
for item in list(cls.context.states_enum):
if not isinstance(item.value, six.string_types):
raise ValueError(
'Item {name} is not string. Only strings are allowed.'
.format(name=item.name)
)
@classmethod
def _check_state_value(cls):
"""Check initial state value - if is proper and translate it.
Initial state is required.
"""
state_value = cls.context.get_config('initial_state', None)
state_value = state_value or getattr(
cls.context.new_class, cls.context.state_name, None
)
if not state_value:
raise ValueError(
"Empty state is disallowed, yet no initial state is given!"
)
state_value = (
cls.context
.new_meta['translator']
.translate(state_value)
)
cls.context.state_value = state_value
@classmethod
def _add_standard_attributes(cls):
"""Add attributes common to all state machines.
These are methods for setting and checking state etc.
"""
setattr(
cls.context.new_class,
cls.context.new_meta['state_attribute_name'],
cls.context.state_value)
setattr(
cls.context.new_class,
cls.context.state_name,
utils.state_property)
setattr(cls.context.new_class, 'is_', utils.is_)
setattr(cls.context.new_class, 'can_be_', utils.can_be_)
setattr(cls.context.new_class, 'set_', utils.set_)
@classmethod
def _generate_standard_transitions(cls):
"""Generate methods used for transitions."""
allowed_transitions = cls.context.get_config('transitions', {})
for key, transitions in allowed_transitions.items():
key = cls.context.new_meta['translator'].translate(key)
new_transitions = set()
for trans in transitions:
if not isinstance(trans, Enum):
trans = cls.context.new_meta['translator'].translate(trans)
new_transitions.add(trans)
cls.context.new_transitions[key] = new_transitions
for state in cls.context.states_enum:
if state not in cls.context.new_transitions:
cls.context.new_transitions[state] = set()
@classmethod
def _generate_standard_methods(cls):
"""Generate standard setters, getters and checkers."""
for state in cls.context.states_enum:
getter_name = 'is_{name}'.format(name=state.value)
cls.context.new_methods[getter_name] = utils.generate_getter(state)
setter_name = 'set_{name}'.format(name=state.value)
cls.context.new_methods[setter_name] = utils.generate_setter(state)
checker_name = 'can_be_{name}'.format(name=state.value)
checker = utils.generate_checker(state)
cls.context.new_methods[checker_name] = checker
cls.context.new_methods['actual_state'] = utils.actual_state
cls.context.new_methods['as_enum'] = utils.as_enum
cls.context.new_methods['force_set'] = utils.force_set
@classmethod
def _generate_named_checkers(cls):
named_checkers = cls.context.get_config('named_checkers', None) or []
for method, key in named_checkers:
if method in cls.context.new_methods:
raise ValueError(
"Name collision for named checker '{checker}' - this "
"name is reserved for other auto generated method."
.format(checker=method)
)
key = cls.context.new_meta['translator'].translate(key)
cls.context.new_methods[method] = utils.generate_checker(key.value)
@classmethod
def _generate_named_transitions(cls):
named_transitions = (
cls.context.get_config('named_transitions', None) or [])
translator = cls.context.new_meta['translator']
for item in named_transitions:
method, key, from_values = cls._unpack_named_transition_tuple(item)
if method in cls.context.new_methods:
raise ValueError(
"Name collision for transition '{transition}' - this name "
"is reserved for other auto generated method."
.format(transition=method)
)
key = translator.translate(key)
cls.context.new_methods[method] = utils.generate_setter(key)
if from_values:
from_values = [translator.translate(k) for k in from_values]
for s in cls.context.states_enum:
if s in from_values:
cls.context.new_transitions[s].add(key)
@classmethod
def _unpack_named_transition_tuple(cls, item):
try:
method, key = item
from_values = cls.context['states_enum']
except ValueError:
method, key, from_values = item
if from_values is None:
from_values = []
if not isinstance(from_values, list):
from_values = list((from_values,))
return method, key, from_values
@classmethod
def _add_new_methods(cls):
"""Add all generated methods to result class."""
for name, method in cls.context.new_methods.items():
if hasattr(cls.context.new_class, name):
raise ValueError(
"Name collision in state machine class - '{name}'."
.format(name)
)
setattr(cls.context.new_class, name, method)
@classmethod
def _set_complete_option(cls):
"""Check and set complete option."""
get_config = cls.context.get_config
complete = get_config('complete', None)
if complete is None:
conditions = [
get_config('transitions', False),
get_config('named_transitions', False),
]
complete = not any(conditions)
cls.context.new_meta['complete'] = complete
@classmethod
def _set_up_config_getter(cls):
meta = getattr(cls.context.new_class, 'Meta', DefaultMeta)
cls.context.get_config = partial(get_config, meta)
@classmethod
def _set_up_translator(cls):
translator = utils.EnumValueTranslator(cls.context['states_enum'])
cls.context.new_meta['translator'] = translator
@classmethod
def _calculate_state_name(cls):
cls.context.state_name = 'state'
new_state_name = '_' + cls.context.state_name
cls.context.new_meta['state_attribute_name'] = new_state_name
@classmethod
def _complete_meta_for_new_class(cls):
cls.context.new_meta['transitions'] = cls.context.new_transitions
cls.context.new_meta['config_getter'] = cls.context['get_config']
setattr(cls.context.new_class, '_meta', cls.context['new_meta'])
|
beregond/super_state_machine
|
super_state_machine/machines.py
|
StateMachineMetaclass._check_if_states_are_strings
|
python
|
def _check_if_states_are_strings(cls):
for item in list(cls.context.states_enum):
if not isinstance(item.value, six.string_types):
raise ValueError(
'Item {name} is not string. Only strings are allowed.'
.format(name=item.name)
)
|
Check if all states are strings.
|
train
|
https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/machines.py#L93-L100
| null |
class StateMachineMetaclass(type):
"""Metaclass for state machine, to build all its logic."""
def __new__(cls, name, bases, attrs):
"""Create state machine and add all logic and methods to it."""
cls._set_up_context()
new_class = super(cls, cls).__new__(cls, name, bases, attrs)
cls.context.new_class = new_class
parents = [b for b in bases if isinstance(b, cls)]
if not parents:
return cls.context.new_class
cls._set_up_config_getter()
cls._check_states_enum()
cls._check_if_states_are_strings()
cls._set_up_translator()
cls._calculate_state_name()
cls._check_state_value()
cls._add_standard_attributes()
cls._generate_standard_transitions()
cls._generate_standard_methods()
cls._generate_named_checkers()
cls._generate_named_transitions()
cls._add_new_methods()
cls._set_complete_option()
cls._complete_meta_for_new_class()
new_class = cls.context.new_class
del cls.context
return new_class
@classmethod
def _set_up_context(cls):
"""Create context to keep all needed variables in."""
cls.context = AttributeDict()
cls.context.new_meta = {}
cls.context.new_transitions = {}
cls.context.new_methods = {}
@classmethod
def _check_states_enum(cls):
"""Check if states enum exists and is proper one."""
states_enum_name = cls.context.get_config('states_enum_name')
try:
cls.context['states_enum'] = getattr(
cls.context.new_class, states_enum_name)
except AttributeError:
raise ValueError('No states enum given!')
proper = True
try:
if not issubclass(cls.context.states_enum, Enum):
proper = False
except TypeError:
proper = False
if not proper:
raise ValueError(
'Please provide enum instance to define available states.')
@classmethod
@classmethod
def _check_state_value(cls):
"""Check initial state value - if is proper and translate it.
Initial state is required.
"""
state_value = cls.context.get_config('initial_state', None)
state_value = state_value or getattr(
cls.context.new_class, cls.context.state_name, None
)
if not state_value:
raise ValueError(
"Empty state is disallowed, yet no initial state is given!"
)
state_value = (
cls.context
.new_meta['translator']
.translate(state_value)
)
cls.context.state_value = state_value
@classmethod
def _add_standard_attributes(cls):
"""Add attributes common to all state machines.
These are methods for setting and checking state etc.
"""
setattr(
cls.context.new_class,
cls.context.new_meta['state_attribute_name'],
cls.context.state_value)
setattr(
cls.context.new_class,
cls.context.state_name,
utils.state_property)
setattr(cls.context.new_class, 'is_', utils.is_)
setattr(cls.context.new_class, 'can_be_', utils.can_be_)
setattr(cls.context.new_class, 'set_', utils.set_)
@classmethod
def _generate_standard_transitions(cls):
"""Generate methods used for transitions."""
allowed_transitions = cls.context.get_config('transitions', {})
for key, transitions in allowed_transitions.items():
key = cls.context.new_meta['translator'].translate(key)
new_transitions = set()
for trans in transitions:
if not isinstance(trans, Enum):
trans = cls.context.new_meta['translator'].translate(trans)
new_transitions.add(trans)
cls.context.new_transitions[key] = new_transitions
for state in cls.context.states_enum:
if state not in cls.context.new_transitions:
cls.context.new_transitions[state] = set()
@classmethod
def _generate_standard_methods(cls):
"""Generate standard setters, getters and checkers."""
for state in cls.context.states_enum:
getter_name = 'is_{name}'.format(name=state.value)
cls.context.new_methods[getter_name] = utils.generate_getter(state)
setter_name = 'set_{name}'.format(name=state.value)
cls.context.new_methods[setter_name] = utils.generate_setter(state)
checker_name = 'can_be_{name}'.format(name=state.value)
checker = utils.generate_checker(state)
cls.context.new_methods[checker_name] = checker
cls.context.new_methods['actual_state'] = utils.actual_state
cls.context.new_methods['as_enum'] = utils.as_enum
cls.context.new_methods['force_set'] = utils.force_set
@classmethod
def _generate_named_checkers(cls):
named_checkers = cls.context.get_config('named_checkers', None) or []
for method, key in named_checkers:
if method in cls.context.new_methods:
raise ValueError(
"Name collision for named checker '{checker}' - this "
"name is reserved for other auto generated method."
.format(checker=method)
)
key = cls.context.new_meta['translator'].translate(key)
cls.context.new_methods[method] = utils.generate_checker(key.value)
@classmethod
def _generate_named_transitions(cls):
named_transitions = (
cls.context.get_config('named_transitions', None) or [])
translator = cls.context.new_meta['translator']
for item in named_transitions:
method, key, from_values = cls._unpack_named_transition_tuple(item)
if method in cls.context.new_methods:
raise ValueError(
"Name collision for transition '{transition}' - this name "
"is reserved for other auto generated method."
.format(transition=method)
)
key = translator.translate(key)
cls.context.new_methods[method] = utils.generate_setter(key)
if from_values:
from_values = [translator.translate(k) for k in from_values]
for s in cls.context.states_enum:
if s in from_values:
cls.context.new_transitions[s].add(key)
@classmethod
def _unpack_named_transition_tuple(cls, item):
try:
method, key = item
from_values = cls.context['states_enum']
except ValueError:
method, key, from_values = item
if from_values is None:
from_values = []
if not isinstance(from_values, list):
from_values = list((from_values,))
return method, key, from_values
@classmethod
def _add_new_methods(cls):
"""Add all generated methods to result class."""
for name, method in cls.context.new_methods.items():
if hasattr(cls.context.new_class, name):
raise ValueError(
"Name collision in state machine class - '{name}'."
.format(name)
)
setattr(cls.context.new_class, name, method)
@classmethod
def _set_complete_option(cls):
"""Check and set complete option."""
get_config = cls.context.get_config
complete = get_config('complete', None)
if complete is None:
conditions = [
get_config('transitions', False),
get_config('named_transitions', False),
]
complete = not any(conditions)
cls.context.new_meta['complete'] = complete
@classmethod
def _set_up_config_getter(cls):
meta = getattr(cls.context.new_class, 'Meta', DefaultMeta)
cls.context.get_config = partial(get_config, meta)
@classmethod
def _set_up_translator(cls):
translator = utils.EnumValueTranslator(cls.context['states_enum'])
cls.context.new_meta['translator'] = translator
@classmethod
def _calculate_state_name(cls):
cls.context.state_name = 'state'
new_state_name = '_' + cls.context.state_name
cls.context.new_meta['state_attribute_name'] = new_state_name
@classmethod
def _complete_meta_for_new_class(cls):
cls.context.new_meta['transitions'] = cls.context.new_transitions
cls.context.new_meta['config_getter'] = cls.context['get_config']
setattr(cls.context.new_class, '_meta', cls.context['new_meta'])
|
beregond/super_state_machine
|
super_state_machine/machines.py
|
StateMachineMetaclass._check_state_value
|
python
|
def _check_state_value(cls):
state_value = cls.context.get_config('initial_state', None)
state_value = state_value or getattr(
cls.context.new_class, cls.context.state_name, None
)
if not state_value:
raise ValueError(
"Empty state is disallowed, yet no initial state is given!"
)
state_value = (
cls.context
.new_meta['translator']
.translate(state_value)
)
cls.context.state_value = state_value
|
Check initial state value - if is proper and translate it.
Initial state is required.
|
train
|
https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/machines.py#L103-L122
| null |
class StateMachineMetaclass(type):
"""Metaclass for state machine, to build all its logic."""
def __new__(cls, name, bases, attrs):
"""Create state machine and add all logic and methods to it."""
cls._set_up_context()
new_class = super(cls, cls).__new__(cls, name, bases, attrs)
cls.context.new_class = new_class
parents = [b for b in bases if isinstance(b, cls)]
if not parents:
return cls.context.new_class
cls._set_up_config_getter()
cls._check_states_enum()
cls._check_if_states_are_strings()
cls._set_up_translator()
cls._calculate_state_name()
cls._check_state_value()
cls._add_standard_attributes()
cls._generate_standard_transitions()
cls._generate_standard_methods()
cls._generate_named_checkers()
cls._generate_named_transitions()
cls._add_new_methods()
cls._set_complete_option()
cls._complete_meta_for_new_class()
new_class = cls.context.new_class
del cls.context
return new_class
@classmethod
def _set_up_context(cls):
"""Create context to keep all needed variables in."""
cls.context = AttributeDict()
cls.context.new_meta = {}
cls.context.new_transitions = {}
cls.context.new_methods = {}
@classmethod
def _check_states_enum(cls):
"""Check if states enum exists and is proper one."""
states_enum_name = cls.context.get_config('states_enum_name')
try:
cls.context['states_enum'] = getattr(
cls.context.new_class, states_enum_name)
except AttributeError:
raise ValueError('No states enum given!')
proper = True
try:
if not issubclass(cls.context.states_enum, Enum):
proper = False
except TypeError:
proper = False
if not proper:
raise ValueError(
'Please provide enum instance to define available states.')
@classmethod
def _check_if_states_are_strings(cls):
"""Check if all states are strings."""
for item in list(cls.context.states_enum):
if not isinstance(item.value, six.string_types):
raise ValueError(
'Item {name} is not string. Only strings are allowed.'
.format(name=item.name)
)
@classmethod
@classmethod
def _add_standard_attributes(cls):
"""Add attributes common to all state machines.
These are methods for setting and checking state etc.
"""
setattr(
cls.context.new_class,
cls.context.new_meta['state_attribute_name'],
cls.context.state_value)
setattr(
cls.context.new_class,
cls.context.state_name,
utils.state_property)
setattr(cls.context.new_class, 'is_', utils.is_)
setattr(cls.context.new_class, 'can_be_', utils.can_be_)
setattr(cls.context.new_class, 'set_', utils.set_)
@classmethod
def _generate_standard_transitions(cls):
"""Generate methods used for transitions."""
allowed_transitions = cls.context.get_config('transitions', {})
for key, transitions in allowed_transitions.items():
key = cls.context.new_meta['translator'].translate(key)
new_transitions = set()
for trans in transitions:
if not isinstance(trans, Enum):
trans = cls.context.new_meta['translator'].translate(trans)
new_transitions.add(trans)
cls.context.new_transitions[key] = new_transitions
for state in cls.context.states_enum:
if state not in cls.context.new_transitions:
cls.context.new_transitions[state] = set()
@classmethod
def _generate_standard_methods(cls):
"""Generate standard setters, getters and checkers."""
for state in cls.context.states_enum:
getter_name = 'is_{name}'.format(name=state.value)
cls.context.new_methods[getter_name] = utils.generate_getter(state)
setter_name = 'set_{name}'.format(name=state.value)
cls.context.new_methods[setter_name] = utils.generate_setter(state)
checker_name = 'can_be_{name}'.format(name=state.value)
checker = utils.generate_checker(state)
cls.context.new_methods[checker_name] = checker
cls.context.new_methods['actual_state'] = utils.actual_state
cls.context.new_methods['as_enum'] = utils.as_enum
cls.context.new_methods['force_set'] = utils.force_set
@classmethod
def _generate_named_checkers(cls):
named_checkers = cls.context.get_config('named_checkers', None) or []
for method, key in named_checkers:
if method in cls.context.new_methods:
raise ValueError(
"Name collision for named checker '{checker}' - this "
"name is reserved for other auto generated method."
.format(checker=method)
)
key = cls.context.new_meta['translator'].translate(key)
cls.context.new_methods[method] = utils.generate_checker(key.value)
@classmethod
def _generate_named_transitions(cls):
named_transitions = (
cls.context.get_config('named_transitions', None) or [])
translator = cls.context.new_meta['translator']
for item in named_transitions:
method, key, from_values = cls._unpack_named_transition_tuple(item)
if method in cls.context.new_methods:
raise ValueError(
"Name collision for transition '{transition}' - this name "
"is reserved for other auto generated method."
.format(transition=method)
)
key = translator.translate(key)
cls.context.new_methods[method] = utils.generate_setter(key)
if from_values:
from_values = [translator.translate(k) for k in from_values]
for s in cls.context.states_enum:
if s in from_values:
cls.context.new_transitions[s].add(key)
@classmethod
def _unpack_named_transition_tuple(cls, item):
try:
method, key = item
from_values = cls.context['states_enum']
except ValueError:
method, key, from_values = item
if from_values is None:
from_values = []
if not isinstance(from_values, list):
from_values = list((from_values,))
return method, key, from_values
@classmethod
def _add_new_methods(cls):
"""Add all generated methods to result class."""
for name, method in cls.context.new_methods.items():
if hasattr(cls.context.new_class, name):
raise ValueError(
"Name collision in state machine class - '{name}'."
.format(name)
)
setattr(cls.context.new_class, name, method)
@classmethod
def _set_complete_option(cls):
"""Check and set complete option."""
get_config = cls.context.get_config
complete = get_config('complete', None)
if complete is None:
conditions = [
get_config('transitions', False),
get_config('named_transitions', False),
]
complete = not any(conditions)
cls.context.new_meta['complete'] = complete
@classmethod
def _set_up_config_getter(cls):
meta = getattr(cls.context.new_class, 'Meta', DefaultMeta)
cls.context.get_config = partial(get_config, meta)
@classmethod
def _set_up_translator(cls):
translator = utils.EnumValueTranslator(cls.context['states_enum'])
cls.context.new_meta['translator'] = translator
@classmethod
def _calculate_state_name(cls):
cls.context.state_name = 'state'
new_state_name = '_' + cls.context.state_name
cls.context.new_meta['state_attribute_name'] = new_state_name
@classmethod
def _complete_meta_for_new_class(cls):
cls.context.new_meta['transitions'] = cls.context.new_transitions
cls.context.new_meta['config_getter'] = cls.context['get_config']
setattr(cls.context.new_class, '_meta', cls.context['new_meta'])
|
beregond/super_state_machine
|
super_state_machine/machines.py
|
StateMachineMetaclass._add_standard_attributes
|
python
|
def _add_standard_attributes(cls):
setattr(
cls.context.new_class,
cls.context.new_meta['state_attribute_name'],
cls.context.state_value)
setattr(
cls.context.new_class,
cls.context.state_name,
utils.state_property)
setattr(cls.context.new_class, 'is_', utils.is_)
setattr(cls.context.new_class, 'can_be_', utils.can_be_)
setattr(cls.context.new_class, 'set_', utils.set_)
|
Add attributes common to all state machines.
These are methods for setting and checking state etc.
|
train
|
https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/machines.py#L125-L142
| null |
class StateMachineMetaclass(type):
"""Metaclass for state machine, to build all its logic."""
def __new__(cls, name, bases, attrs):
"""Create state machine and add all logic and methods to it."""
cls._set_up_context()
new_class = super(cls, cls).__new__(cls, name, bases, attrs)
cls.context.new_class = new_class
parents = [b for b in bases if isinstance(b, cls)]
if not parents:
return cls.context.new_class
cls._set_up_config_getter()
cls._check_states_enum()
cls._check_if_states_are_strings()
cls._set_up_translator()
cls._calculate_state_name()
cls._check_state_value()
cls._add_standard_attributes()
cls._generate_standard_transitions()
cls._generate_standard_methods()
cls._generate_named_checkers()
cls._generate_named_transitions()
cls._add_new_methods()
cls._set_complete_option()
cls._complete_meta_for_new_class()
new_class = cls.context.new_class
del cls.context
return new_class
@classmethod
def _set_up_context(cls):
"""Create context to keep all needed variables in."""
cls.context = AttributeDict()
cls.context.new_meta = {}
cls.context.new_transitions = {}
cls.context.new_methods = {}
@classmethod
def _check_states_enum(cls):
"""Check if states enum exists and is proper one."""
states_enum_name = cls.context.get_config('states_enum_name')
try:
cls.context['states_enum'] = getattr(
cls.context.new_class, states_enum_name)
except AttributeError:
raise ValueError('No states enum given!')
proper = True
try:
if not issubclass(cls.context.states_enum, Enum):
proper = False
except TypeError:
proper = False
if not proper:
raise ValueError(
'Please provide enum instance to define available states.')
@classmethod
def _check_if_states_are_strings(cls):
"""Check if all states are strings."""
for item in list(cls.context.states_enum):
if not isinstance(item.value, six.string_types):
raise ValueError(
'Item {name} is not string. Only strings are allowed.'
.format(name=item.name)
)
@classmethod
def _check_state_value(cls):
"""Check initial state value - if is proper and translate it.
Initial state is required.
"""
state_value = cls.context.get_config('initial_state', None)
state_value = state_value or getattr(
cls.context.new_class, cls.context.state_name, None
)
if not state_value:
raise ValueError(
"Empty state is disallowed, yet no initial state is given!"
)
state_value = (
cls.context
.new_meta['translator']
.translate(state_value)
)
cls.context.state_value = state_value
@classmethod
@classmethod
def _generate_standard_transitions(cls):
"""Generate methods used for transitions."""
allowed_transitions = cls.context.get_config('transitions', {})
for key, transitions in allowed_transitions.items():
key = cls.context.new_meta['translator'].translate(key)
new_transitions = set()
for trans in transitions:
if not isinstance(trans, Enum):
trans = cls.context.new_meta['translator'].translate(trans)
new_transitions.add(trans)
cls.context.new_transitions[key] = new_transitions
for state in cls.context.states_enum:
if state not in cls.context.new_transitions:
cls.context.new_transitions[state] = set()
@classmethod
def _generate_standard_methods(cls):
"""Generate standard setters, getters and checkers."""
for state in cls.context.states_enum:
getter_name = 'is_{name}'.format(name=state.value)
cls.context.new_methods[getter_name] = utils.generate_getter(state)
setter_name = 'set_{name}'.format(name=state.value)
cls.context.new_methods[setter_name] = utils.generate_setter(state)
checker_name = 'can_be_{name}'.format(name=state.value)
checker = utils.generate_checker(state)
cls.context.new_methods[checker_name] = checker
cls.context.new_methods['actual_state'] = utils.actual_state
cls.context.new_methods['as_enum'] = utils.as_enum
cls.context.new_methods['force_set'] = utils.force_set
@classmethod
def _generate_named_checkers(cls):
named_checkers = cls.context.get_config('named_checkers', None) or []
for method, key in named_checkers:
if method in cls.context.new_methods:
raise ValueError(
"Name collision for named checker '{checker}' - this "
"name is reserved for other auto generated method."
.format(checker=method)
)
key = cls.context.new_meta['translator'].translate(key)
cls.context.new_methods[method] = utils.generate_checker(key.value)
@classmethod
def _generate_named_transitions(cls):
named_transitions = (
cls.context.get_config('named_transitions', None) or [])
translator = cls.context.new_meta['translator']
for item in named_transitions:
method, key, from_values = cls._unpack_named_transition_tuple(item)
if method in cls.context.new_methods:
raise ValueError(
"Name collision for transition '{transition}' - this name "
"is reserved for other auto generated method."
.format(transition=method)
)
key = translator.translate(key)
cls.context.new_methods[method] = utils.generate_setter(key)
if from_values:
from_values = [translator.translate(k) for k in from_values]
for s in cls.context.states_enum:
if s in from_values:
cls.context.new_transitions[s].add(key)
@classmethod
def _unpack_named_transition_tuple(cls, item):
try:
method, key = item
from_values = cls.context['states_enum']
except ValueError:
method, key, from_values = item
if from_values is None:
from_values = []
if not isinstance(from_values, list):
from_values = list((from_values,))
return method, key, from_values
@classmethod
def _add_new_methods(cls):
"""Add all generated methods to result class."""
for name, method in cls.context.new_methods.items():
if hasattr(cls.context.new_class, name):
raise ValueError(
"Name collision in state machine class - '{name}'."
.format(name)
)
setattr(cls.context.new_class, name, method)
@classmethod
def _set_complete_option(cls):
"""Check and set complete option."""
get_config = cls.context.get_config
complete = get_config('complete', None)
if complete is None:
conditions = [
get_config('transitions', False),
get_config('named_transitions', False),
]
complete = not any(conditions)
cls.context.new_meta['complete'] = complete
@classmethod
def _set_up_config_getter(cls):
meta = getattr(cls.context.new_class, 'Meta', DefaultMeta)
cls.context.get_config = partial(get_config, meta)
@classmethod
def _set_up_translator(cls):
translator = utils.EnumValueTranslator(cls.context['states_enum'])
cls.context.new_meta['translator'] = translator
@classmethod
def _calculate_state_name(cls):
cls.context.state_name = 'state'
new_state_name = '_' + cls.context.state_name
cls.context.new_meta['state_attribute_name'] = new_state_name
@classmethod
def _complete_meta_for_new_class(cls):
cls.context.new_meta['transitions'] = cls.context.new_transitions
cls.context.new_meta['config_getter'] = cls.context['get_config']
setattr(cls.context.new_class, '_meta', cls.context['new_meta'])
|
beregond/super_state_machine
|
super_state_machine/machines.py
|
StateMachineMetaclass._generate_standard_transitions
|
python
|
def _generate_standard_transitions(cls):
allowed_transitions = cls.context.get_config('transitions', {})
for key, transitions in allowed_transitions.items():
key = cls.context.new_meta['translator'].translate(key)
new_transitions = set()
for trans in transitions:
if not isinstance(trans, Enum):
trans = cls.context.new_meta['translator'].translate(trans)
new_transitions.add(trans)
cls.context.new_transitions[key] = new_transitions
for state in cls.context.states_enum:
if state not in cls.context.new_transitions:
cls.context.new_transitions[state] = set()
|
Generate methods used for transitions.
|
train
|
https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/machines.py#L145-L161
| null |
class StateMachineMetaclass(type):
"""Metaclass for state machine, to build all its logic."""
def __new__(cls, name, bases, attrs):
"""Create state machine and add all logic and methods to it."""
cls._set_up_context()
new_class = super(cls, cls).__new__(cls, name, bases, attrs)
cls.context.new_class = new_class
parents = [b for b in bases if isinstance(b, cls)]
if not parents:
return cls.context.new_class
cls._set_up_config_getter()
cls._check_states_enum()
cls._check_if_states_are_strings()
cls._set_up_translator()
cls._calculate_state_name()
cls._check_state_value()
cls._add_standard_attributes()
cls._generate_standard_transitions()
cls._generate_standard_methods()
cls._generate_named_checkers()
cls._generate_named_transitions()
cls._add_new_methods()
cls._set_complete_option()
cls._complete_meta_for_new_class()
new_class = cls.context.new_class
del cls.context
return new_class
@classmethod
def _set_up_context(cls):
"""Create context to keep all needed variables in."""
cls.context = AttributeDict()
cls.context.new_meta = {}
cls.context.new_transitions = {}
cls.context.new_methods = {}
@classmethod
def _check_states_enum(cls):
"""Check if states enum exists and is proper one."""
states_enum_name = cls.context.get_config('states_enum_name')
try:
cls.context['states_enum'] = getattr(
cls.context.new_class, states_enum_name)
except AttributeError:
raise ValueError('No states enum given!')
proper = True
try:
if not issubclass(cls.context.states_enum, Enum):
proper = False
except TypeError:
proper = False
if not proper:
raise ValueError(
'Please provide enum instance to define available states.')
@classmethod
def _check_if_states_are_strings(cls):
"""Check if all states are strings."""
for item in list(cls.context.states_enum):
if not isinstance(item.value, six.string_types):
raise ValueError(
'Item {name} is not string. Only strings are allowed.'
.format(name=item.name)
)
@classmethod
def _check_state_value(cls):
"""Check initial state value - if is proper and translate it.
Initial state is required.
"""
state_value = cls.context.get_config('initial_state', None)
state_value = state_value or getattr(
cls.context.new_class, cls.context.state_name, None
)
if not state_value:
raise ValueError(
"Empty state is disallowed, yet no initial state is given!"
)
state_value = (
cls.context
.new_meta['translator']
.translate(state_value)
)
cls.context.state_value = state_value
@classmethod
def _add_standard_attributes(cls):
"""Add attributes common to all state machines.
These are methods for setting and checking state etc.
"""
setattr(
cls.context.new_class,
cls.context.new_meta['state_attribute_name'],
cls.context.state_value)
setattr(
cls.context.new_class,
cls.context.state_name,
utils.state_property)
setattr(cls.context.new_class, 'is_', utils.is_)
setattr(cls.context.new_class, 'can_be_', utils.can_be_)
setattr(cls.context.new_class, 'set_', utils.set_)
@classmethod
@classmethod
def _generate_standard_methods(cls):
"""Generate standard setters, getters and checkers."""
for state in cls.context.states_enum:
getter_name = 'is_{name}'.format(name=state.value)
cls.context.new_methods[getter_name] = utils.generate_getter(state)
setter_name = 'set_{name}'.format(name=state.value)
cls.context.new_methods[setter_name] = utils.generate_setter(state)
checker_name = 'can_be_{name}'.format(name=state.value)
checker = utils.generate_checker(state)
cls.context.new_methods[checker_name] = checker
cls.context.new_methods['actual_state'] = utils.actual_state
cls.context.new_methods['as_enum'] = utils.as_enum
cls.context.new_methods['force_set'] = utils.force_set
@classmethod
def _generate_named_checkers(cls):
named_checkers = cls.context.get_config('named_checkers', None) or []
for method, key in named_checkers:
if method in cls.context.new_methods:
raise ValueError(
"Name collision for named checker '{checker}' - this "
"name is reserved for other auto generated method."
.format(checker=method)
)
key = cls.context.new_meta['translator'].translate(key)
cls.context.new_methods[method] = utils.generate_checker(key.value)
@classmethod
def _generate_named_transitions(cls):
named_transitions = (
cls.context.get_config('named_transitions', None) or [])
translator = cls.context.new_meta['translator']
for item in named_transitions:
method, key, from_values = cls._unpack_named_transition_tuple(item)
if method in cls.context.new_methods:
raise ValueError(
"Name collision for transition '{transition}' - this name "
"is reserved for other auto generated method."
.format(transition=method)
)
key = translator.translate(key)
cls.context.new_methods[method] = utils.generate_setter(key)
if from_values:
from_values = [translator.translate(k) for k in from_values]
for s in cls.context.states_enum:
if s in from_values:
cls.context.new_transitions[s].add(key)
@classmethod
def _unpack_named_transition_tuple(cls, item):
try:
method, key = item
from_values = cls.context['states_enum']
except ValueError:
method, key, from_values = item
if from_values is None:
from_values = []
if not isinstance(from_values, list):
from_values = list((from_values,))
return method, key, from_values
@classmethod
def _add_new_methods(cls):
"""Add all generated methods to result class."""
for name, method in cls.context.new_methods.items():
if hasattr(cls.context.new_class, name):
raise ValueError(
"Name collision in state machine class - '{name}'."
.format(name)
)
setattr(cls.context.new_class, name, method)
@classmethod
def _set_complete_option(cls):
"""Check and set complete option."""
get_config = cls.context.get_config
complete = get_config('complete', None)
if complete is None:
conditions = [
get_config('transitions', False),
get_config('named_transitions', False),
]
complete = not any(conditions)
cls.context.new_meta['complete'] = complete
@classmethod
def _set_up_config_getter(cls):
meta = getattr(cls.context.new_class, 'Meta', DefaultMeta)
cls.context.get_config = partial(get_config, meta)
@classmethod
def _set_up_translator(cls):
translator = utils.EnumValueTranslator(cls.context['states_enum'])
cls.context.new_meta['translator'] = translator
@classmethod
def _calculate_state_name(cls):
cls.context.state_name = 'state'
new_state_name = '_' + cls.context.state_name
cls.context.new_meta['state_attribute_name'] = new_state_name
@classmethod
def _complete_meta_for_new_class(cls):
cls.context.new_meta['transitions'] = cls.context.new_transitions
cls.context.new_meta['config_getter'] = cls.context['get_config']
setattr(cls.context.new_class, '_meta', cls.context['new_meta'])
|
beregond/super_state_machine
|
super_state_machine/machines.py
|
StateMachineMetaclass._generate_standard_methods
|
python
|
def _generate_standard_methods(cls):
for state in cls.context.states_enum:
getter_name = 'is_{name}'.format(name=state.value)
cls.context.new_methods[getter_name] = utils.generate_getter(state)
setter_name = 'set_{name}'.format(name=state.value)
cls.context.new_methods[setter_name] = utils.generate_setter(state)
checker_name = 'can_be_{name}'.format(name=state.value)
checker = utils.generate_checker(state)
cls.context.new_methods[checker_name] = checker
cls.context.new_methods['actual_state'] = utils.actual_state
cls.context.new_methods['as_enum'] = utils.as_enum
cls.context.new_methods['force_set'] = utils.force_set
|
Generate standard setters, getters and checkers.
|
train
|
https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/machines.py#L164-L179
| null |
class StateMachineMetaclass(type):
"""Metaclass for state machine, to build all its logic."""
def __new__(cls, name, bases, attrs):
"""Create state machine and add all logic and methods to it."""
cls._set_up_context()
new_class = super(cls, cls).__new__(cls, name, bases, attrs)
cls.context.new_class = new_class
parents = [b for b in bases if isinstance(b, cls)]
if not parents:
return cls.context.new_class
cls._set_up_config_getter()
cls._check_states_enum()
cls._check_if_states_are_strings()
cls._set_up_translator()
cls._calculate_state_name()
cls._check_state_value()
cls._add_standard_attributes()
cls._generate_standard_transitions()
cls._generate_standard_methods()
cls._generate_named_checkers()
cls._generate_named_transitions()
cls._add_new_methods()
cls._set_complete_option()
cls._complete_meta_for_new_class()
new_class = cls.context.new_class
del cls.context
return new_class
@classmethod
def _set_up_context(cls):
"""Create context to keep all needed variables in."""
cls.context = AttributeDict()
cls.context.new_meta = {}
cls.context.new_transitions = {}
cls.context.new_methods = {}
@classmethod
def _check_states_enum(cls):
"""Check if states enum exists and is proper one."""
states_enum_name = cls.context.get_config('states_enum_name')
try:
cls.context['states_enum'] = getattr(
cls.context.new_class, states_enum_name)
except AttributeError:
raise ValueError('No states enum given!')
proper = True
try:
if not issubclass(cls.context.states_enum, Enum):
proper = False
except TypeError:
proper = False
if not proper:
raise ValueError(
'Please provide enum instance to define available states.')
@classmethod
def _check_if_states_are_strings(cls):
"""Check if all states are strings."""
for item in list(cls.context.states_enum):
if not isinstance(item.value, six.string_types):
raise ValueError(
'Item {name} is not string. Only strings are allowed.'
.format(name=item.name)
)
@classmethod
def _check_state_value(cls):
"""Check initial state value - if is proper and translate it.
Initial state is required.
"""
state_value = cls.context.get_config('initial_state', None)
state_value = state_value or getattr(
cls.context.new_class, cls.context.state_name, None
)
if not state_value:
raise ValueError(
"Empty state is disallowed, yet no initial state is given!"
)
state_value = (
cls.context
.new_meta['translator']
.translate(state_value)
)
cls.context.state_value = state_value
@classmethod
def _add_standard_attributes(cls):
"""Add attributes common to all state machines.
These are methods for setting and checking state etc.
"""
setattr(
cls.context.new_class,
cls.context.new_meta['state_attribute_name'],
cls.context.state_value)
setattr(
cls.context.new_class,
cls.context.state_name,
utils.state_property)
setattr(cls.context.new_class, 'is_', utils.is_)
setattr(cls.context.new_class, 'can_be_', utils.can_be_)
setattr(cls.context.new_class, 'set_', utils.set_)
@classmethod
def _generate_standard_transitions(cls):
"""Generate methods used for transitions."""
allowed_transitions = cls.context.get_config('transitions', {})
for key, transitions in allowed_transitions.items():
key = cls.context.new_meta['translator'].translate(key)
new_transitions = set()
for trans in transitions:
if not isinstance(trans, Enum):
trans = cls.context.new_meta['translator'].translate(trans)
new_transitions.add(trans)
cls.context.new_transitions[key] = new_transitions
for state in cls.context.states_enum:
if state not in cls.context.new_transitions:
cls.context.new_transitions[state] = set()
@classmethod
@classmethod
def _generate_named_checkers(cls):
named_checkers = cls.context.get_config('named_checkers', None) or []
for method, key in named_checkers:
if method in cls.context.new_methods:
raise ValueError(
"Name collision for named checker '{checker}' - this "
"name is reserved for other auto generated method."
.format(checker=method)
)
key = cls.context.new_meta['translator'].translate(key)
cls.context.new_methods[method] = utils.generate_checker(key.value)
@classmethod
def _generate_named_transitions(cls):
named_transitions = (
cls.context.get_config('named_transitions', None) or [])
translator = cls.context.new_meta['translator']
for item in named_transitions:
method, key, from_values = cls._unpack_named_transition_tuple(item)
if method in cls.context.new_methods:
raise ValueError(
"Name collision for transition '{transition}' - this name "
"is reserved for other auto generated method."
.format(transition=method)
)
key = translator.translate(key)
cls.context.new_methods[method] = utils.generate_setter(key)
if from_values:
from_values = [translator.translate(k) for k in from_values]
for s in cls.context.states_enum:
if s in from_values:
cls.context.new_transitions[s].add(key)
@classmethod
def _unpack_named_transition_tuple(cls, item):
try:
method, key = item
from_values = cls.context['states_enum']
except ValueError:
method, key, from_values = item
if from_values is None:
from_values = []
if not isinstance(from_values, list):
from_values = list((from_values,))
return method, key, from_values
@classmethod
def _add_new_methods(cls):
"""Add all generated methods to result class."""
for name, method in cls.context.new_methods.items():
if hasattr(cls.context.new_class, name):
raise ValueError(
"Name collision in state machine class - '{name}'."
.format(name)
)
setattr(cls.context.new_class, name, method)
@classmethod
def _set_complete_option(cls):
"""Check and set complete option."""
get_config = cls.context.get_config
complete = get_config('complete', None)
if complete is None:
conditions = [
get_config('transitions', False),
get_config('named_transitions', False),
]
complete = not any(conditions)
cls.context.new_meta['complete'] = complete
@classmethod
def _set_up_config_getter(cls):
meta = getattr(cls.context.new_class, 'Meta', DefaultMeta)
cls.context.get_config = partial(get_config, meta)
@classmethod
def _set_up_translator(cls):
translator = utils.EnumValueTranslator(cls.context['states_enum'])
cls.context.new_meta['translator'] = translator
@classmethod
def _calculate_state_name(cls):
cls.context.state_name = 'state'
new_state_name = '_' + cls.context.state_name
cls.context.new_meta['state_attribute_name'] = new_state_name
@classmethod
def _complete_meta_for_new_class(cls):
cls.context.new_meta['transitions'] = cls.context.new_transitions
cls.context.new_meta['config_getter'] = cls.context['get_config']
setattr(cls.context.new_class, '_meta', cls.context['new_meta'])
|
beregond/super_state_machine
|
super_state_machine/machines.py
|
StateMachineMetaclass._add_new_methods
|
python
|
def _add_new_methods(cls):
for name, method in cls.context.new_methods.items():
if hasattr(cls.context.new_class, name):
raise ValueError(
"Name collision in state machine class - '{name}'."
.format(name)
)
setattr(cls.context.new_class, name, method)
|
Add all generated methods to result class.
|
train
|
https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/machines.py#L235-L244
| null |
class StateMachineMetaclass(type):
"""Metaclass for state machine, to build all its logic."""
def __new__(cls, name, bases, attrs):
"""Create state machine and add all logic and methods to it."""
cls._set_up_context()
new_class = super(cls, cls).__new__(cls, name, bases, attrs)
cls.context.new_class = new_class
parents = [b for b in bases if isinstance(b, cls)]
if not parents:
return cls.context.new_class
cls._set_up_config_getter()
cls._check_states_enum()
cls._check_if_states_are_strings()
cls._set_up_translator()
cls._calculate_state_name()
cls._check_state_value()
cls._add_standard_attributes()
cls._generate_standard_transitions()
cls._generate_standard_methods()
cls._generate_named_checkers()
cls._generate_named_transitions()
cls._add_new_methods()
cls._set_complete_option()
cls._complete_meta_for_new_class()
new_class = cls.context.new_class
del cls.context
return new_class
@classmethod
def _set_up_context(cls):
"""Create context to keep all needed variables in."""
cls.context = AttributeDict()
cls.context.new_meta = {}
cls.context.new_transitions = {}
cls.context.new_methods = {}
@classmethod
def _check_states_enum(cls):
"""Check if states enum exists and is proper one."""
states_enum_name = cls.context.get_config('states_enum_name')
try:
cls.context['states_enum'] = getattr(
cls.context.new_class, states_enum_name)
except AttributeError:
raise ValueError('No states enum given!')
proper = True
try:
if not issubclass(cls.context.states_enum, Enum):
proper = False
except TypeError:
proper = False
if not proper:
raise ValueError(
'Please provide enum instance to define available states.')
@classmethod
def _check_if_states_are_strings(cls):
"""Check if all states are strings."""
for item in list(cls.context.states_enum):
if not isinstance(item.value, six.string_types):
raise ValueError(
'Item {name} is not string. Only strings are allowed.'
.format(name=item.name)
)
@classmethod
def _check_state_value(cls):
"""Check initial state value - if is proper and translate it.
Initial state is required.
"""
state_value = cls.context.get_config('initial_state', None)
state_value = state_value or getattr(
cls.context.new_class, cls.context.state_name, None
)
if not state_value:
raise ValueError(
"Empty state is disallowed, yet no initial state is given!"
)
state_value = (
cls.context
.new_meta['translator']
.translate(state_value)
)
cls.context.state_value = state_value
@classmethod
def _add_standard_attributes(cls):
"""Add attributes common to all state machines.
These are methods for setting and checking state etc.
"""
setattr(
cls.context.new_class,
cls.context.new_meta['state_attribute_name'],
cls.context.state_value)
setattr(
cls.context.new_class,
cls.context.state_name,
utils.state_property)
setattr(cls.context.new_class, 'is_', utils.is_)
setattr(cls.context.new_class, 'can_be_', utils.can_be_)
setattr(cls.context.new_class, 'set_', utils.set_)
@classmethod
def _generate_standard_transitions(cls):
"""Generate methods used for transitions."""
allowed_transitions = cls.context.get_config('transitions', {})
for key, transitions in allowed_transitions.items():
key = cls.context.new_meta['translator'].translate(key)
new_transitions = set()
for trans in transitions:
if not isinstance(trans, Enum):
trans = cls.context.new_meta['translator'].translate(trans)
new_transitions.add(trans)
cls.context.new_transitions[key] = new_transitions
for state in cls.context.states_enum:
if state not in cls.context.new_transitions:
cls.context.new_transitions[state] = set()
@classmethod
def _generate_standard_methods(cls):
"""Generate standard setters, getters and checkers."""
for state in cls.context.states_enum:
getter_name = 'is_{name}'.format(name=state.value)
cls.context.new_methods[getter_name] = utils.generate_getter(state)
setter_name = 'set_{name}'.format(name=state.value)
cls.context.new_methods[setter_name] = utils.generate_setter(state)
checker_name = 'can_be_{name}'.format(name=state.value)
checker = utils.generate_checker(state)
cls.context.new_methods[checker_name] = checker
cls.context.new_methods['actual_state'] = utils.actual_state
cls.context.new_methods['as_enum'] = utils.as_enum
cls.context.new_methods['force_set'] = utils.force_set
@classmethod
def _generate_named_checkers(cls):
named_checkers = cls.context.get_config('named_checkers', None) or []
for method, key in named_checkers:
if method in cls.context.new_methods:
raise ValueError(
"Name collision for named checker '{checker}' - this "
"name is reserved for other auto generated method."
.format(checker=method)
)
key = cls.context.new_meta['translator'].translate(key)
cls.context.new_methods[method] = utils.generate_checker(key.value)
@classmethod
def _generate_named_transitions(cls):
named_transitions = (
cls.context.get_config('named_transitions', None) or [])
translator = cls.context.new_meta['translator']
for item in named_transitions:
method, key, from_values = cls._unpack_named_transition_tuple(item)
if method in cls.context.new_methods:
raise ValueError(
"Name collision for transition '{transition}' - this name "
"is reserved for other auto generated method."
.format(transition=method)
)
key = translator.translate(key)
cls.context.new_methods[method] = utils.generate_setter(key)
if from_values:
from_values = [translator.translate(k) for k in from_values]
for s in cls.context.states_enum:
if s in from_values:
cls.context.new_transitions[s].add(key)
@classmethod
def _unpack_named_transition_tuple(cls, item):
try:
method, key = item
from_values = cls.context['states_enum']
except ValueError:
method, key, from_values = item
if from_values is None:
from_values = []
if not isinstance(from_values, list):
from_values = list((from_values,))
return method, key, from_values
@classmethod
@classmethod
def _set_complete_option(cls):
"""Check and set complete option."""
get_config = cls.context.get_config
complete = get_config('complete', None)
if complete is None:
conditions = [
get_config('transitions', False),
get_config('named_transitions', False),
]
complete = not any(conditions)
cls.context.new_meta['complete'] = complete
@classmethod
def _set_up_config_getter(cls):
meta = getattr(cls.context.new_class, 'Meta', DefaultMeta)
cls.context.get_config = partial(get_config, meta)
@classmethod
def _set_up_translator(cls):
translator = utils.EnumValueTranslator(cls.context['states_enum'])
cls.context.new_meta['translator'] = translator
@classmethod
def _calculate_state_name(cls):
cls.context.state_name = 'state'
new_state_name = '_' + cls.context.state_name
cls.context.new_meta['state_attribute_name'] = new_state_name
@classmethod
def _complete_meta_for_new_class(cls):
cls.context.new_meta['transitions'] = cls.context.new_transitions
cls.context.new_meta['config_getter'] = cls.context['get_config']
setattr(cls.context.new_class, '_meta', cls.context['new_meta'])
|
beregond/super_state_machine
|
super_state_machine/machines.py
|
StateMachineMetaclass._set_complete_option
|
python
|
def _set_complete_option(cls):
get_config = cls.context.get_config
complete = get_config('complete', None)
if complete is None:
conditions = [
get_config('transitions', False),
get_config('named_transitions', False),
]
complete = not any(conditions)
cls.context.new_meta['complete'] = complete
|
Check and set complete option.
|
train
|
https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/machines.py#L247-L258
| null |
class StateMachineMetaclass(type):
"""Metaclass for state machine, to build all its logic."""
def __new__(cls, name, bases, attrs):
"""Create state machine and add all logic and methods to it."""
cls._set_up_context()
new_class = super(cls, cls).__new__(cls, name, bases, attrs)
cls.context.new_class = new_class
parents = [b for b in bases if isinstance(b, cls)]
if not parents:
return cls.context.new_class
cls._set_up_config_getter()
cls._check_states_enum()
cls._check_if_states_are_strings()
cls._set_up_translator()
cls._calculate_state_name()
cls._check_state_value()
cls._add_standard_attributes()
cls._generate_standard_transitions()
cls._generate_standard_methods()
cls._generate_named_checkers()
cls._generate_named_transitions()
cls._add_new_methods()
cls._set_complete_option()
cls._complete_meta_for_new_class()
new_class = cls.context.new_class
del cls.context
return new_class
@classmethod
def _set_up_context(cls):
"""Create context to keep all needed variables in."""
cls.context = AttributeDict()
cls.context.new_meta = {}
cls.context.new_transitions = {}
cls.context.new_methods = {}
@classmethod
def _check_states_enum(cls):
"""Check if states enum exists and is proper one."""
states_enum_name = cls.context.get_config('states_enum_name')
try:
cls.context['states_enum'] = getattr(
cls.context.new_class, states_enum_name)
except AttributeError:
raise ValueError('No states enum given!')
proper = True
try:
if not issubclass(cls.context.states_enum, Enum):
proper = False
except TypeError:
proper = False
if not proper:
raise ValueError(
'Please provide enum instance to define available states.')
@classmethod
def _check_if_states_are_strings(cls):
"""Check if all states are strings."""
for item in list(cls.context.states_enum):
if not isinstance(item.value, six.string_types):
raise ValueError(
'Item {name} is not string. Only strings are allowed.'
.format(name=item.name)
)
@classmethod
def _check_state_value(cls):
"""Check initial state value - if is proper and translate it.
Initial state is required.
"""
state_value = cls.context.get_config('initial_state', None)
state_value = state_value or getattr(
cls.context.new_class, cls.context.state_name, None
)
if not state_value:
raise ValueError(
"Empty state is disallowed, yet no initial state is given!"
)
state_value = (
cls.context
.new_meta['translator']
.translate(state_value)
)
cls.context.state_value = state_value
@classmethod
def _add_standard_attributes(cls):
"""Add attributes common to all state machines.
These are methods for setting and checking state etc.
"""
setattr(
cls.context.new_class,
cls.context.new_meta['state_attribute_name'],
cls.context.state_value)
setattr(
cls.context.new_class,
cls.context.state_name,
utils.state_property)
setattr(cls.context.new_class, 'is_', utils.is_)
setattr(cls.context.new_class, 'can_be_', utils.can_be_)
setattr(cls.context.new_class, 'set_', utils.set_)
@classmethod
def _generate_standard_transitions(cls):
"""Generate methods used for transitions."""
allowed_transitions = cls.context.get_config('transitions', {})
for key, transitions in allowed_transitions.items():
key = cls.context.new_meta['translator'].translate(key)
new_transitions = set()
for trans in transitions:
if not isinstance(trans, Enum):
trans = cls.context.new_meta['translator'].translate(trans)
new_transitions.add(trans)
cls.context.new_transitions[key] = new_transitions
for state in cls.context.states_enum:
if state not in cls.context.new_transitions:
cls.context.new_transitions[state] = set()
@classmethod
def _generate_standard_methods(cls):
"""Generate standard setters, getters and checkers."""
for state in cls.context.states_enum:
getter_name = 'is_{name}'.format(name=state.value)
cls.context.new_methods[getter_name] = utils.generate_getter(state)
setter_name = 'set_{name}'.format(name=state.value)
cls.context.new_methods[setter_name] = utils.generate_setter(state)
checker_name = 'can_be_{name}'.format(name=state.value)
checker = utils.generate_checker(state)
cls.context.new_methods[checker_name] = checker
cls.context.new_methods['actual_state'] = utils.actual_state
cls.context.new_methods['as_enum'] = utils.as_enum
cls.context.new_methods['force_set'] = utils.force_set
@classmethod
def _generate_named_checkers(cls):
named_checkers = cls.context.get_config('named_checkers', None) or []
for method, key in named_checkers:
if method in cls.context.new_methods:
raise ValueError(
"Name collision for named checker '{checker}' - this "
"name is reserved for other auto generated method."
.format(checker=method)
)
key = cls.context.new_meta['translator'].translate(key)
cls.context.new_methods[method] = utils.generate_checker(key.value)
@classmethod
def _generate_named_transitions(cls):
named_transitions = (
cls.context.get_config('named_transitions', None) or [])
translator = cls.context.new_meta['translator']
for item in named_transitions:
method, key, from_values = cls._unpack_named_transition_tuple(item)
if method in cls.context.new_methods:
raise ValueError(
"Name collision for transition '{transition}' - this name "
"is reserved for other auto generated method."
.format(transition=method)
)
key = translator.translate(key)
cls.context.new_methods[method] = utils.generate_setter(key)
if from_values:
from_values = [translator.translate(k) for k in from_values]
for s in cls.context.states_enum:
if s in from_values:
cls.context.new_transitions[s].add(key)
@classmethod
def _unpack_named_transition_tuple(cls, item):
try:
method, key = item
from_values = cls.context['states_enum']
except ValueError:
method, key, from_values = item
if from_values is None:
from_values = []
if not isinstance(from_values, list):
from_values = list((from_values,))
return method, key, from_values
@classmethod
def _add_new_methods(cls):
"""Add all generated methods to result class."""
for name, method in cls.context.new_methods.items():
if hasattr(cls.context.new_class, name):
raise ValueError(
"Name collision in state machine class - '{name}'."
.format(name)
)
setattr(cls.context.new_class, name, method)
@classmethod
@classmethod
def _set_up_config_getter(cls):
meta = getattr(cls.context.new_class, 'Meta', DefaultMeta)
cls.context.get_config = partial(get_config, meta)
@classmethod
def _set_up_translator(cls):
translator = utils.EnumValueTranslator(cls.context['states_enum'])
cls.context.new_meta['translator'] = translator
@classmethod
def _calculate_state_name(cls):
cls.context.state_name = 'state'
new_state_name = '_' + cls.context.state_name
cls.context.new_meta['state_attribute_name'] = new_state_name
@classmethod
def _complete_meta_for_new_class(cls):
cls.context.new_meta['transitions'] = cls.context.new_transitions
cls.context.new_meta['config_getter'] = cls.context['get_config']
setattr(cls.context.new_class, '_meta', cls.context['new_meta'])
|
codeforamerica/three
|
three/api.py
|
city
|
python
|
def city(name=None):
info = find_info(name)
os.environ['OPEN311_CITY_INFO'] = dumps(info)
return Three(**info)
|
Store the city that will be queried against.
>>> three.city('sf')
|
train
|
https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/api.py#L23-L31
|
[
"def find_info(name=None):\n \"\"\"Find the needed city server information.\"\"\"\n if not name:\n return list(servers.keys()) \n name = name.lower()\n if name in servers:\n info = servers[name]\n else:\n raise CityNotFound(\"Could not find the specified city: %s\" % name)\n return info\n"
] |
"""
Simple, top-level functions for working with the Open311 API.
"""
import os
from simplejson import dumps
from .cities import find_info
from .core import Three
def key(key=None):
"""
Save your API key to the global environment.
>>> three.api_key('my_api_key')
"""
if key:
os.environ['OPEN311_API_KEY'] = key
return os.environ['OPEN311_API_KEY']
def cities():
"""Return a list of available cities."""
info = find_info()
return info
def dev(endpoint, **kwargs):
"""
Use an endpoint and any additional keyword arguments rather than one
of the pre-defined cities. Similar to the `city` function, but useful for
development.
"""
kwargs['endpoint'] = endpoint
os.environ['OPEN311_CITY_INFO'] = dumps(kwargs)
return Three(**kwargs)
def discovery(path=None, **kwargs):
"""
Check a city's Open311 discovery endpoint.
>>> three.city('sf')
>>> three.discovery()
"""
return Three().discovery(path, **kwargs)
def post(code=None, **kwargs):
"""
Send a POST service request to a city's Open311 endpoint.
>>> three.city('sf')
>>> three.post('123', address='155 9th St', name='Zach Williams',
... phone='555-5555', description='My issue description'.)
{'successful': {'request': 'post'}}
"""
return Three().post(code, **kwargs)
def request(code, **kwargs):
"""
Find a specific request in a city.
>>> three.city('sf')
>>> three.request('12345')
"""
return Three().request(code, **kwargs)
def requests(code=None, **kwargs):
"""
Find service requests for a city.
>>> three.city('sf')
>>> three.requests()
"""
return Three().requests(code, **kwargs)
def services(code=None, **kwargs):
"""
Find services for a given city.
>>> three.city('sf')
>>> three.services()
"""
return Three().services(code, **kwargs)
def token(code, **kwargs):
"""
Find service request information for a specific token.
>>> three.city('sf')
>>> three.token('123abc')
"""
return Three().token(code, **kwargs)
|
codeforamerica/three
|
three/api.py
|
dev
|
python
|
def dev(endpoint, **kwargs):
kwargs['endpoint'] = endpoint
os.environ['OPEN311_CITY_INFO'] = dumps(kwargs)
return Three(**kwargs)
|
Use an endpoint and any additional keyword arguments rather than one
of the pre-defined cities. Similar to the `city` function, but useful for
development.
|
train
|
https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/api.py#L40-L48
| null |
"""
Simple, top-level functions for working with the Open311 API.
"""
import os
from simplejson import dumps
from .cities import find_info
from .core import Three
def key(key=None):
"""
Save your API key to the global environment.
>>> three.api_key('my_api_key')
"""
if key:
os.environ['OPEN311_API_KEY'] = key
return os.environ['OPEN311_API_KEY']
def city(name=None):
"""
Store the city that will be queried against.
>>> three.city('sf')
"""
info = find_info(name)
os.environ['OPEN311_CITY_INFO'] = dumps(info)
return Three(**info)
def cities():
"""Return a list of available cities."""
info = find_info()
return info
def discovery(path=None, **kwargs):
"""
Check a city's Open311 discovery endpoint.
>>> three.city('sf')
>>> three.discovery()
"""
return Three().discovery(path, **kwargs)
def post(code=None, **kwargs):
"""
Send a POST service request to a city's Open311 endpoint.
>>> three.city('sf')
>>> three.post('123', address='155 9th St', name='Zach Williams',
... phone='555-5555', description='My issue description'.)
{'successful': {'request': 'post'}}
"""
return Three().post(code, **kwargs)
def request(code, **kwargs):
"""
Find a specific request in a city.
>>> three.city('sf')
>>> three.request('12345')
"""
return Three().request(code, **kwargs)
def requests(code=None, **kwargs):
"""
Find service requests for a city.
>>> three.city('sf')
>>> three.requests()
"""
return Three().requests(code, **kwargs)
def services(code=None, **kwargs):
"""
Find services for a given city.
>>> three.city('sf')
>>> three.services()
"""
return Three().services(code, **kwargs)
def token(code, **kwargs):
"""
Find service request information for a specific token.
>>> three.city('sf')
>>> three.token('123abc')
"""
return Three().token(code, **kwargs)
|
codeforamerica/three
|
three/cities.py
|
find_info
|
python
|
def find_info(name=None):
if not name:
return list(servers.keys())
name = name.lower()
if name in servers:
info = servers[name]
else:
raise CityNotFound("Could not find the specified city: %s" % name)
return info
|
Find the needed city server information.
|
train
|
https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/cities.py#L10-L19
| null |
"""
A dict of information needed to query city Open311 servers.
"""
class CityNotFound(Exception):
pass
servers = {
'bainbridge': {
'endpoint': 'http://seeclickfix.com/bainbridge-island/open311/'
},
'baltimore': {
'endpoint': 'http://311.baltimorecity.gov/open311/v2/'
},
'bloomington': {
'endpoint': 'https://bloomington.in.gov/crm/open311/v2/'
},
'boston': {
'endpoint': 'https://mayors24.cityofboston.gov/open311/v2/'
},
'brookline': {
'endpoint': 'http://spot.brooklinema.gov/open311/v2/'
},
'chicago': {
'endpoint': 'http://311api.cityofchicago.org/open311/v2/',
'discovery': 'http://311api.cityofchicago.org/open311/discovery.json'
},
'corona': {
'endpoint': 'http://seeclickfix.com/corona/open311/'
},
'darwin': {
'endpoint': 'http://seeclickfix.com/aus_darwin/open311/'
},
'dc': {
'endpoint': 'http://app.311.dc.gov/CWI/Open311/v2/',
'format': 'xml',
'jurisdiction': 'dc.gov'
},
'district of columbia': {
'endpoint': 'http://app.311.dc.gov/CWI/Open311/v2/',
'format': 'xml',
'jurisdiction': 'dc.gov'
},
'deleon': {
'endpoint': 'http://seeclickfix.com/de-leon/open311/'
},
'dunwoody': {
'endpoint': 'http://seeclickfix.com/dunwoody_ga/open311/'
},
'fontana': {
'endpoint': 'http://seeclickfix.com/fontana/open311/'
},
'grand rapids': {
'endpoint': 'http://grcity.spotreporters.com/open311/v2/'
},
'hillsborough': {
'endpoint': 'http://seeclickfix.com/hillsborough/open311/'
},
'howard county': {
'endpoint': 'http://seeclickfix.com/md_howard-county/open311/'
},
'huntsville': {
'endpoint': 'http://seeclickfix.com/huntsville/open311/'
},
'macon': {
'endpoint': 'http://seeclickfix.com/macon/open311/'
},
'manor': {
'endpoint': 'http://seeclickfix.com/manor/open311/'
},
'new haven': {
'endpoint': 'http://seeclickfix.com/new-haven/open311/'
},
'newark': {
'endpoint': 'http://seeclickfix.com/newark_2/open311/'
},
'newberg': {
'endpoint': 'http://seeclickfix.com/newberg/open311/'
},
'newnan': {
'endpoint': 'http://seeclickfix.com/newnan/open311/'
},
'olathe': {
'endpoint': 'http://seeclickfix.com/olathe/open311/'
},
'raleigh': {
'endpoint': 'http://seeclickfix.com/raleigh/open311/'
},
'richmond': {
'endpoint': 'http://seeclickfix.com/richmond/open311/'
},
'roosevelt island': {
'endpoint': 'http://seeclickfix.com/roosevelt-island/open311/'
},
'russell springs': {
'endpoint': 'http://seeclickfix.com/russell-springs/open311/'
},
'san francisco': {
'endpoint': 'https://open311.sfgov.org/V2/',
'format': 'xml',
'jurisdiction': 'sfgov.org'
},
'sf': {
'endpoint': 'https://open311.sfgov.org/V2/',
'format': 'xml',
'jurisdiction': 'sfgov.org'
},
'toronto': {
'endpoint': 'https://secure.toronto.ca/webwizard/ws/',
'jurisdiction': 'toronto.ca'
},
'tucson': {
'endpoint': 'http://seeclickfix.com/tucson/open311/'
},
}
|
codeforamerica/three
|
three/core.py
|
Three.configure
|
python
|
def configure(self, endpoint=None, **kwargs):
if endpoint:
kwargs['endpoint'] = endpoint
keywords = self._keywords.copy()
keywords.update(kwargs)
if 'endpoint' in kwargs:
# Then we need to correctly format the endpoint.
endpoint = kwargs['endpoint']
keywords['endpoint'] = self._configure_endpoint(endpoint)
self.api_key = keywords['api_key'] or self._global_api_key()
self.endpoint = keywords['endpoint']
self.format = keywords['format'] or 'json'
self.jurisdiction = keywords['jurisdiction']
self.proxy = keywords['proxy']
self.discovery_url = keywords['discovery'] or None
# Use a custom requests session and set the correct SSL version if
# specified.
self.session = requests.Session()
if 'ssl_version' in keywords:
self.session.mount('https://', SSLAdapter(keywords['ssl_version']))
|
Configure a previously initialized instance of the class.
|
train
|
https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L66-L87
|
[
"def _global_api_key(self):\n \"\"\"\n If a global Open311 API key is available as an environment variable,\n then it will be used when querying.\n \"\"\"\n if 'OPEN311_API_KEY' in os.environ:\n api_key = os.environ['OPEN311_API_KEY']\n else:\n api_key = ''\n return api_key\n",
"def _configure_endpoint(self, endpoint):\n \"\"\"Configure the endpoint with a schema and end slash.\"\"\"\n if not endpoint.startswith('http'):\n endpoint = 'https://' + endpoint\n if not endpoint.endswith('/'):\n endpoint += '/'\n return endpoint\n"
] |
class Three(object):
"""The main class for interacting with the Open311 API."""
def __init__(self, endpoint=None, **kwargs):
keywords = defaultdict(str)
keywords.update(kwargs)
if endpoint:
endpoint = self._configure_endpoint(endpoint)
keywords['endpoint'] = endpoint
elif 'OPEN311_CITY_INFO' in os.environ:
info = json.loads(os.environ['OPEN311_CITY_INFO'])
endpoint = info['endpoint']
endpoint = self._configure_endpoint(endpoint)
keywords.update(info)
keywords['endpoint'] = endpoint
self._keywords = keywords
self.configure()
def _global_api_key(self):
"""
If a global Open311 API key is available as an environment variable,
then it will be used when querying.
"""
if 'OPEN311_API_KEY' in os.environ:
api_key = os.environ['OPEN311_API_KEY']
else:
api_key = ''
return api_key
def _configure_endpoint(self, endpoint):
"""Configure the endpoint with a schema and end slash."""
if not endpoint.startswith('http'):
endpoint = 'https://' + endpoint
if not endpoint.endswith('/'):
endpoint += '/'
return endpoint
def reset(self):
"""Reset the class back to the original keywords and values."""
self.configure()
def _create_path(self, *args):
"""Create URL path for endpoint and args."""
args = filter(None, args)
path = self.endpoint + '/'.join(args) + '.%s' % (self.format)
return path
def get(self, *args, **kwargs):
"""Perform a get request."""
if 'convert' in kwargs:
conversion = kwargs.pop('convert')
else:
conversion = True
kwargs = self._get_keywords(**kwargs)
url = self._create_path(*args)
request = self.session.get(url, params=kwargs)
content = request.content
self._request = request
return self.convert(content, conversion)
def _get_keywords(self, **kwargs):
"""Format GET request parameters and keywords."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'count' in kwargs:
kwargs['page_size'] = kwargs.pop('count')
if 'start' in kwargs:
start = kwargs.pop('start')
if 'end' in kwargs:
end = kwargs.pop('end')
else:
end = date.today().strftime('%m-%d-%Y')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
elif 'between' in kwargs:
start, end = kwargs.pop('between')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
return kwargs
def _format_dates(self, start, end):
"""Format start and end dates."""
start = self._split_date(start)
end = self._split_date(end)
return start, end
def _split_date(self, time):
"""Split apart a date string."""
if isinstance(time, str):
month, day, year = [int(t) for t in re.split(r'-|/', time)]
if year < 100:
# Quick hack for dates < 2000.
year += 2000
time = date(year, month, day)
return time.strftime('%Y-%m-%dT%H:%M:%SZ')
def convert(self, content, conversion):
"""Convert content to Python data structures."""
if not conversion:
data = content
elif self.format == 'json':
data = json.loads(content)
elif self.format == 'xml':
content = xml(content)
first = list(content.keys())[0]
data = content[first]
else:
data = content
return data
def discovery(self, url=None):
"""
Retrieve the standard discovery file that provides routing
information.
>>> Three().discovery()
{'discovery': 'data'}
"""
if url:
data = self.session.get(url).content
elif self.discovery_url:
response = self.session.get(self.discovery_url)
if self.format == 'xml':
# Because, SF doesn't follow the spec.
data = xml(response.text)
else:
# Spec calls for discovery always allowing JSON.
data = response.json()
else:
data = self.get('discovery')
return data
def services(self, code=None, **kwargs):
"""
Retrieve information about available services. You can also enter a
specific service code argument.
>>> Three().services()
{'all': {'service_code': 'data'}}
>>> Three().services('033')
{'033': {'service_code': 'data'}}
"""
data = self.get('services', code, **kwargs)
return data
def requests(self, code=None, **kwargs):
"""
Retrieve open requests. You can also enter a specific service code
argument.
>>> Three('api.city.gov').requests()
{'all': {'requests': 'data'}}
>>> Three('api.city.gov').requests('123')
{'123': {'requests': 'data'}}
"""
if code:
kwargs['service_code'] = code
data = self.get('requests', **kwargs)
return data
def request(self, id, **kwargs):
"""
Retrieve a specific request using its service code ID.
>>> Three('api.city.gov').request('12345')
{'request': {'service_code': {'12345': 'data'}}}
"""
data = self.get('requests', id, **kwargs)
return data
def post(self, service_code='0', **kwargs):
"""
Post a new Open311 request.
>>> t = Three('api.city.gov')
>>> t.post('123', address='123 Any St', name='Zach Williams',
... phone='555-5555', description='My issue description.',
... media=open('photo.png', 'rb'))
{'successful': {'request': 'post'}}
"""
kwargs['service_code'] = service_code
kwargs = self._post_keywords(**kwargs)
media = kwargs.pop('media', None)
if media:
files = {'media': media}
else:
files = None
url = self._create_path('requests')
self.post_response = self.session.post(url,
data=kwargs, files=files)
content = self.post_response.content
if self.post_response.status_code >= 500:
conversion = False
else:
conversion = True
return self.convert(content, conversion)
def _post_keywords(self, **kwargs):
"""Configure keyword arguments for Open311 POST requests."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'address' in kwargs:
address = kwargs.pop('address')
kwargs['address_string'] = address
if 'name' in kwargs:
first, last = kwargs.pop('name').split(' ')
kwargs['first_name'] = first
kwargs['last_name'] = last
if 'api_key' not in kwargs:
kwargs['api_key'] = self.api_key
return kwargs
def token(self, id, **kwargs):
"""
Retrieve a service request ID from a token.
>>> Three('api.city.gov').token('12345')
{'service_request_id': {'for': {'token': '12345'}}}
"""
data = self.get('tokens', id, **kwargs)
return data
|
codeforamerica/three
|
three/core.py
|
Three._configure_endpoint
|
python
|
def _configure_endpoint(self, endpoint):
if not endpoint.startswith('http'):
endpoint = 'https://' + endpoint
if not endpoint.endswith('/'):
endpoint += '/'
return endpoint
|
Configure the endpoint with a schema and end slash.
|
train
|
https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L89-L95
| null |
class Three(object):
"""The main class for interacting with the Open311 API."""
def __init__(self, endpoint=None, **kwargs):
keywords = defaultdict(str)
keywords.update(kwargs)
if endpoint:
endpoint = self._configure_endpoint(endpoint)
keywords['endpoint'] = endpoint
elif 'OPEN311_CITY_INFO' in os.environ:
info = json.loads(os.environ['OPEN311_CITY_INFO'])
endpoint = info['endpoint']
endpoint = self._configure_endpoint(endpoint)
keywords.update(info)
keywords['endpoint'] = endpoint
self._keywords = keywords
self.configure()
def _global_api_key(self):
"""
If a global Open311 API key is available as an environment variable,
then it will be used when querying.
"""
if 'OPEN311_API_KEY' in os.environ:
api_key = os.environ['OPEN311_API_KEY']
else:
api_key = ''
return api_key
def configure(self, endpoint=None, **kwargs):
"""Configure a previously initialized instance of the class."""
if endpoint:
kwargs['endpoint'] = endpoint
keywords = self._keywords.copy()
keywords.update(kwargs)
if 'endpoint' in kwargs:
# Then we need to correctly format the endpoint.
endpoint = kwargs['endpoint']
keywords['endpoint'] = self._configure_endpoint(endpoint)
self.api_key = keywords['api_key'] or self._global_api_key()
self.endpoint = keywords['endpoint']
self.format = keywords['format'] or 'json'
self.jurisdiction = keywords['jurisdiction']
self.proxy = keywords['proxy']
self.discovery_url = keywords['discovery'] or None
# Use a custom requests session and set the correct SSL version if
# specified.
self.session = requests.Session()
if 'ssl_version' in keywords:
self.session.mount('https://', SSLAdapter(keywords['ssl_version']))
def reset(self):
"""Reset the class back to the original keywords and values."""
self.configure()
def _create_path(self, *args):
"""Create URL path for endpoint and args."""
args = filter(None, args)
path = self.endpoint + '/'.join(args) + '.%s' % (self.format)
return path
def get(self, *args, **kwargs):
"""Perform a get request."""
if 'convert' in kwargs:
conversion = kwargs.pop('convert')
else:
conversion = True
kwargs = self._get_keywords(**kwargs)
url = self._create_path(*args)
request = self.session.get(url, params=kwargs)
content = request.content
self._request = request
return self.convert(content, conversion)
def _get_keywords(self, **kwargs):
"""Format GET request parameters and keywords."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'count' in kwargs:
kwargs['page_size'] = kwargs.pop('count')
if 'start' in kwargs:
start = kwargs.pop('start')
if 'end' in kwargs:
end = kwargs.pop('end')
else:
end = date.today().strftime('%m-%d-%Y')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
elif 'between' in kwargs:
start, end = kwargs.pop('between')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
return kwargs
def _format_dates(self, start, end):
"""Format start and end dates."""
start = self._split_date(start)
end = self._split_date(end)
return start, end
def _split_date(self, time):
"""Split apart a date string."""
if isinstance(time, str):
month, day, year = [int(t) for t in re.split(r'-|/', time)]
if year < 100:
# Quick hack for dates < 2000.
year += 2000
time = date(year, month, day)
return time.strftime('%Y-%m-%dT%H:%M:%SZ')
def convert(self, content, conversion):
"""Convert content to Python data structures."""
if not conversion:
data = content
elif self.format == 'json':
data = json.loads(content)
elif self.format == 'xml':
content = xml(content)
first = list(content.keys())[0]
data = content[first]
else:
data = content
return data
def discovery(self, url=None):
"""
Retrieve the standard discovery file that provides routing
information.
>>> Three().discovery()
{'discovery': 'data'}
"""
if url:
data = self.session.get(url).content
elif self.discovery_url:
response = self.session.get(self.discovery_url)
if self.format == 'xml':
# Because, SF doesn't follow the spec.
data = xml(response.text)
else:
# Spec calls for discovery always allowing JSON.
data = response.json()
else:
data = self.get('discovery')
return data
def services(self, code=None, **kwargs):
"""
Retrieve information about available services. You can also enter a
specific service code argument.
>>> Three().services()
{'all': {'service_code': 'data'}}
>>> Three().services('033')
{'033': {'service_code': 'data'}}
"""
data = self.get('services', code, **kwargs)
return data
def requests(self, code=None, **kwargs):
"""
Retrieve open requests. You can also enter a specific service code
argument.
>>> Three('api.city.gov').requests()
{'all': {'requests': 'data'}}
>>> Three('api.city.gov').requests('123')
{'123': {'requests': 'data'}}
"""
if code:
kwargs['service_code'] = code
data = self.get('requests', **kwargs)
return data
def request(self, id, **kwargs):
"""
Retrieve a specific request using its service code ID.
>>> Three('api.city.gov').request('12345')
{'request': {'service_code': {'12345': 'data'}}}
"""
data = self.get('requests', id, **kwargs)
return data
def post(self, service_code='0', **kwargs):
"""
Post a new Open311 request.
>>> t = Three('api.city.gov')
>>> t.post('123', address='123 Any St', name='Zach Williams',
... phone='555-5555', description='My issue description.',
... media=open('photo.png', 'rb'))
{'successful': {'request': 'post'}}
"""
kwargs['service_code'] = service_code
kwargs = self._post_keywords(**kwargs)
media = kwargs.pop('media', None)
if media:
files = {'media': media}
else:
files = None
url = self._create_path('requests')
self.post_response = self.session.post(url,
data=kwargs, files=files)
content = self.post_response.content
if self.post_response.status_code >= 500:
conversion = False
else:
conversion = True
return self.convert(content, conversion)
def _post_keywords(self, **kwargs):
"""Configure keyword arguments for Open311 POST requests."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'address' in kwargs:
address = kwargs.pop('address')
kwargs['address_string'] = address
if 'name' in kwargs:
first, last = kwargs.pop('name').split(' ')
kwargs['first_name'] = first
kwargs['last_name'] = last
if 'api_key' not in kwargs:
kwargs['api_key'] = self.api_key
return kwargs
def token(self, id, **kwargs):
"""
Retrieve a service request ID from a token.
>>> Three('api.city.gov').token('12345')
{'service_request_id': {'for': {'token': '12345'}}}
"""
data = self.get('tokens', id, **kwargs)
return data
|
codeforamerica/three
|
three/core.py
|
Three.get
|
python
|
def get(self, *args, **kwargs):
if 'convert' in kwargs:
conversion = kwargs.pop('convert')
else:
conversion = True
kwargs = self._get_keywords(**kwargs)
url = self._create_path(*args)
request = self.session.get(url, params=kwargs)
content = request.content
self._request = request
return self.convert(content, conversion)
|
Perform a get request.
|
train
|
https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L107-L118
|
[
"def _create_path(self, *args):\n \"\"\"Create URL path for endpoint and args.\"\"\"\n args = filter(None, args)\n path = self.endpoint + '/'.join(args) + '.%s' % (self.format)\n return path\n",
"def _get_keywords(self, **kwargs):\n \"\"\"Format GET request parameters and keywords.\"\"\"\n if self.jurisdiction and 'jurisdiction_id' not in kwargs:\n kwargs['jurisdiction_id'] = self.jurisdiction\n if 'count' in kwargs:\n kwargs['page_size'] = kwargs.pop('count')\n if 'start' in kwargs:\n start = kwargs.pop('start')\n if 'end' in kwargs:\n end = kwargs.pop('end')\n else:\n end = date.today().strftime('%m-%d-%Y')\n start, end = self._format_dates(start, end)\n kwargs['start_date'] = start\n kwargs['end_date'] = end\n elif 'between' in kwargs:\n start, end = kwargs.pop('between')\n start, end = self._format_dates(start, end)\n kwargs['start_date'] = start\n kwargs['end_date'] = end\n return kwargs\n",
"def convert(self, content, conversion):\n \"\"\"Convert content to Python data structures.\"\"\"\n if not conversion:\n data = content\n elif self.format == 'json':\n data = json.loads(content)\n elif self.format == 'xml':\n content = xml(content)\n first = list(content.keys())[0]\n data = content[first]\n else:\n data = content\n return data\n"
] |
class Three(object):
"""The main class for interacting with the Open311 API."""
def __init__(self, endpoint=None, **kwargs):
keywords = defaultdict(str)
keywords.update(kwargs)
if endpoint:
endpoint = self._configure_endpoint(endpoint)
keywords['endpoint'] = endpoint
elif 'OPEN311_CITY_INFO' in os.environ:
info = json.loads(os.environ['OPEN311_CITY_INFO'])
endpoint = info['endpoint']
endpoint = self._configure_endpoint(endpoint)
keywords.update(info)
keywords['endpoint'] = endpoint
self._keywords = keywords
self.configure()
def _global_api_key(self):
"""
If a global Open311 API key is available as an environment variable,
then it will be used when querying.
"""
if 'OPEN311_API_KEY' in os.environ:
api_key = os.environ['OPEN311_API_KEY']
else:
api_key = ''
return api_key
def configure(self, endpoint=None, **kwargs):
"""Configure a previously initialized instance of the class."""
if endpoint:
kwargs['endpoint'] = endpoint
keywords = self._keywords.copy()
keywords.update(kwargs)
if 'endpoint' in kwargs:
# Then we need to correctly format the endpoint.
endpoint = kwargs['endpoint']
keywords['endpoint'] = self._configure_endpoint(endpoint)
self.api_key = keywords['api_key'] or self._global_api_key()
self.endpoint = keywords['endpoint']
self.format = keywords['format'] or 'json'
self.jurisdiction = keywords['jurisdiction']
self.proxy = keywords['proxy']
self.discovery_url = keywords['discovery'] or None
# Use a custom requests session and set the correct SSL version if
# specified.
self.session = requests.Session()
if 'ssl_version' in keywords:
self.session.mount('https://', SSLAdapter(keywords['ssl_version']))
def _configure_endpoint(self, endpoint):
"""Configure the endpoint with a schema and end slash."""
if not endpoint.startswith('http'):
endpoint = 'https://' + endpoint
if not endpoint.endswith('/'):
endpoint += '/'
return endpoint
def reset(self):
"""Reset the class back to the original keywords and values."""
self.configure()
def _create_path(self, *args):
"""Create URL path for endpoint and args."""
args = filter(None, args)
path = self.endpoint + '/'.join(args) + '.%s' % (self.format)
return path
def _get_keywords(self, **kwargs):
"""Format GET request parameters and keywords."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'count' in kwargs:
kwargs['page_size'] = kwargs.pop('count')
if 'start' in kwargs:
start = kwargs.pop('start')
if 'end' in kwargs:
end = kwargs.pop('end')
else:
end = date.today().strftime('%m-%d-%Y')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
elif 'between' in kwargs:
start, end = kwargs.pop('between')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
return kwargs
def _format_dates(self, start, end):
"""Format start and end dates."""
start = self._split_date(start)
end = self._split_date(end)
return start, end
def _split_date(self, time):
"""Split apart a date string."""
if isinstance(time, str):
month, day, year = [int(t) for t in re.split(r'-|/', time)]
if year < 100:
# Quick hack for dates < 2000.
year += 2000
time = date(year, month, day)
return time.strftime('%Y-%m-%dT%H:%M:%SZ')
def convert(self, content, conversion):
"""Convert content to Python data structures."""
if not conversion:
data = content
elif self.format == 'json':
data = json.loads(content)
elif self.format == 'xml':
content = xml(content)
first = list(content.keys())[0]
data = content[first]
else:
data = content
return data
def discovery(self, url=None):
"""
Retrieve the standard discovery file that provides routing
information.
>>> Three().discovery()
{'discovery': 'data'}
"""
if url:
data = self.session.get(url).content
elif self.discovery_url:
response = self.session.get(self.discovery_url)
if self.format == 'xml':
# Because, SF doesn't follow the spec.
data = xml(response.text)
else:
# Spec calls for discovery always allowing JSON.
data = response.json()
else:
data = self.get('discovery')
return data
def services(self, code=None, **kwargs):
"""
Retrieve information about available services. You can also enter a
specific service code argument.
>>> Three().services()
{'all': {'service_code': 'data'}}
>>> Three().services('033')
{'033': {'service_code': 'data'}}
"""
data = self.get('services', code, **kwargs)
return data
def requests(self, code=None, **kwargs):
"""
Retrieve open requests. You can also enter a specific service code
argument.
>>> Three('api.city.gov').requests()
{'all': {'requests': 'data'}}
>>> Three('api.city.gov').requests('123')
{'123': {'requests': 'data'}}
"""
if code:
kwargs['service_code'] = code
data = self.get('requests', **kwargs)
return data
def request(self, id, **kwargs):
"""
Retrieve a specific request using its service code ID.
>>> Three('api.city.gov').request('12345')
{'request': {'service_code': {'12345': 'data'}}}
"""
data = self.get('requests', id, **kwargs)
return data
def post(self, service_code='0', **kwargs):
"""
Post a new Open311 request.
>>> t = Three('api.city.gov')
>>> t.post('123', address='123 Any St', name='Zach Williams',
... phone='555-5555', description='My issue description.',
... media=open('photo.png', 'rb'))
{'successful': {'request': 'post'}}
"""
kwargs['service_code'] = service_code
kwargs = self._post_keywords(**kwargs)
media = kwargs.pop('media', None)
if media:
files = {'media': media}
else:
files = None
url = self._create_path('requests')
self.post_response = self.session.post(url,
data=kwargs, files=files)
content = self.post_response.content
if self.post_response.status_code >= 500:
conversion = False
else:
conversion = True
return self.convert(content, conversion)
def _post_keywords(self, **kwargs):
"""Configure keyword arguments for Open311 POST requests."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'address' in kwargs:
address = kwargs.pop('address')
kwargs['address_string'] = address
if 'name' in kwargs:
first, last = kwargs.pop('name').split(' ')
kwargs['first_name'] = first
kwargs['last_name'] = last
if 'api_key' not in kwargs:
kwargs['api_key'] = self.api_key
return kwargs
def token(self, id, **kwargs):
"""
Retrieve a service request ID from a token.
>>> Three('api.city.gov').token('12345')
{'service_request_id': {'for': {'token': '12345'}}}
"""
data = self.get('tokens', id, **kwargs)
return data
|
codeforamerica/three
|
three/core.py
|
Three._get_keywords
|
python
|
def _get_keywords(self, **kwargs):
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'count' in kwargs:
kwargs['page_size'] = kwargs.pop('count')
if 'start' in kwargs:
start = kwargs.pop('start')
if 'end' in kwargs:
end = kwargs.pop('end')
else:
end = date.today().strftime('%m-%d-%Y')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
elif 'between' in kwargs:
start, end = kwargs.pop('between')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
return kwargs
|
Format GET request parameters and keywords.
|
train
|
https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L120-L140
| null |
class Three(object):
"""The main class for interacting with the Open311 API."""
def __init__(self, endpoint=None, **kwargs):
keywords = defaultdict(str)
keywords.update(kwargs)
if endpoint:
endpoint = self._configure_endpoint(endpoint)
keywords['endpoint'] = endpoint
elif 'OPEN311_CITY_INFO' in os.environ:
info = json.loads(os.environ['OPEN311_CITY_INFO'])
endpoint = info['endpoint']
endpoint = self._configure_endpoint(endpoint)
keywords.update(info)
keywords['endpoint'] = endpoint
self._keywords = keywords
self.configure()
def _global_api_key(self):
"""
If a global Open311 API key is available as an environment variable,
then it will be used when querying.
"""
if 'OPEN311_API_KEY' in os.environ:
api_key = os.environ['OPEN311_API_KEY']
else:
api_key = ''
return api_key
def configure(self, endpoint=None, **kwargs):
"""Configure a previously initialized instance of the class."""
if endpoint:
kwargs['endpoint'] = endpoint
keywords = self._keywords.copy()
keywords.update(kwargs)
if 'endpoint' in kwargs:
# Then we need to correctly format the endpoint.
endpoint = kwargs['endpoint']
keywords['endpoint'] = self._configure_endpoint(endpoint)
self.api_key = keywords['api_key'] or self._global_api_key()
self.endpoint = keywords['endpoint']
self.format = keywords['format'] or 'json'
self.jurisdiction = keywords['jurisdiction']
self.proxy = keywords['proxy']
self.discovery_url = keywords['discovery'] or None
# Use a custom requests session and set the correct SSL version if
# specified.
self.session = requests.Session()
if 'ssl_version' in keywords:
self.session.mount('https://', SSLAdapter(keywords['ssl_version']))
def _configure_endpoint(self, endpoint):
"""Configure the endpoint with a schema and end slash."""
if not endpoint.startswith('http'):
endpoint = 'https://' + endpoint
if not endpoint.endswith('/'):
endpoint += '/'
return endpoint
def reset(self):
"""Reset the class back to the original keywords and values."""
self.configure()
def _create_path(self, *args):
"""Create URL path for endpoint and args."""
args = filter(None, args)
path = self.endpoint + '/'.join(args) + '.%s' % (self.format)
return path
def get(self, *args, **kwargs):
"""Perform a get request."""
if 'convert' in kwargs:
conversion = kwargs.pop('convert')
else:
conversion = True
kwargs = self._get_keywords(**kwargs)
url = self._create_path(*args)
request = self.session.get(url, params=kwargs)
content = request.content
self._request = request
return self.convert(content, conversion)
def _format_dates(self, start, end):
"""Format start and end dates."""
start = self._split_date(start)
end = self._split_date(end)
return start, end
def _split_date(self, time):
"""Split apart a date string."""
if isinstance(time, str):
month, day, year = [int(t) for t in re.split(r'-|/', time)]
if year < 100:
# Quick hack for dates < 2000.
year += 2000
time = date(year, month, day)
return time.strftime('%Y-%m-%dT%H:%M:%SZ')
def convert(self, content, conversion):
"""Convert content to Python data structures."""
if not conversion:
data = content
elif self.format == 'json':
data = json.loads(content)
elif self.format == 'xml':
content = xml(content)
first = list(content.keys())[0]
data = content[first]
else:
data = content
return data
def discovery(self, url=None):
"""
Retrieve the standard discovery file that provides routing
information.
>>> Three().discovery()
{'discovery': 'data'}
"""
if url:
data = self.session.get(url).content
elif self.discovery_url:
response = self.session.get(self.discovery_url)
if self.format == 'xml':
# Because, SF doesn't follow the spec.
data = xml(response.text)
else:
# Spec calls for discovery always allowing JSON.
data = response.json()
else:
data = self.get('discovery')
return data
def services(self, code=None, **kwargs):
"""
Retrieve information about available services. You can also enter a
specific service code argument.
>>> Three().services()
{'all': {'service_code': 'data'}}
>>> Three().services('033')
{'033': {'service_code': 'data'}}
"""
data = self.get('services', code, **kwargs)
return data
def requests(self, code=None, **kwargs):
"""
Retrieve open requests. You can also enter a specific service code
argument.
>>> Three('api.city.gov').requests()
{'all': {'requests': 'data'}}
>>> Three('api.city.gov').requests('123')
{'123': {'requests': 'data'}}
"""
if code:
kwargs['service_code'] = code
data = self.get('requests', **kwargs)
return data
def request(self, id, **kwargs):
"""
Retrieve a specific request using its service code ID.
>>> Three('api.city.gov').request('12345')
{'request': {'service_code': {'12345': 'data'}}}
"""
data = self.get('requests', id, **kwargs)
return data
def post(self, service_code='0', **kwargs):
"""
Post a new Open311 request.
>>> t = Three('api.city.gov')
>>> t.post('123', address='123 Any St', name='Zach Williams',
... phone='555-5555', description='My issue description.',
... media=open('photo.png', 'rb'))
{'successful': {'request': 'post'}}
"""
kwargs['service_code'] = service_code
kwargs = self._post_keywords(**kwargs)
media = kwargs.pop('media', None)
if media:
files = {'media': media}
else:
files = None
url = self._create_path('requests')
self.post_response = self.session.post(url,
data=kwargs, files=files)
content = self.post_response.content
if self.post_response.status_code >= 500:
conversion = False
else:
conversion = True
return self.convert(content, conversion)
def _post_keywords(self, **kwargs):
"""Configure keyword arguments for Open311 POST requests."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'address' in kwargs:
address = kwargs.pop('address')
kwargs['address_string'] = address
if 'name' in kwargs:
first, last = kwargs.pop('name').split(' ')
kwargs['first_name'] = first
kwargs['last_name'] = last
if 'api_key' not in kwargs:
kwargs['api_key'] = self.api_key
return kwargs
def token(self, id, **kwargs):
"""
Retrieve a service request ID from a token.
>>> Three('api.city.gov').token('12345')
{'service_request_id': {'for': {'token': '12345'}}}
"""
data = self.get('tokens', id, **kwargs)
return data
|
codeforamerica/three
|
three/core.py
|
Three._format_dates
|
python
|
def _format_dates(self, start, end):
start = self._split_date(start)
end = self._split_date(end)
return start, end
|
Format start and end dates.
|
train
|
https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L142-L146
| null |
class Three(object):
"""The main class for interacting with the Open311 API."""
def __init__(self, endpoint=None, **kwargs):
keywords = defaultdict(str)
keywords.update(kwargs)
if endpoint:
endpoint = self._configure_endpoint(endpoint)
keywords['endpoint'] = endpoint
elif 'OPEN311_CITY_INFO' in os.environ:
info = json.loads(os.environ['OPEN311_CITY_INFO'])
endpoint = info['endpoint']
endpoint = self._configure_endpoint(endpoint)
keywords.update(info)
keywords['endpoint'] = endpoint
self._keywords = keywords
self.configure()
def _global_api_key(self):
"""
If a global Open311 API key is available as an environment variable,
then it will be used when querying.
"""
if 'OPEN311_API_KEY' in os.environ:
api_key = os.environ['OPEN311_API_KEY']
else:
api_key = ''
return api_key
def configure(self, endpoint=None, **kwargs):
"""Configure a previously initialized instance of the class."""
if endpoint:
kwargs['endpoint'] = endpoint
keywords = self._keywords.copy()
keywords.update(kwargs)
if 'endpoint' in kwargs:
# Then we need to correctly format the endpoint.
endpoint = kwargs['endpoint']
keywords['endpoint'] = self._configure_endpoint(endpoint)
self.api_key = keywords['api_key'] or self._global_api_key()
self.endpoint = keywords['endpoint']
self.format = keywords['format'] or 'json'
self.jurisdiction = keywords['jurisdiction']
self.proxy = keywords['proxy']
self.discovery_url = keywords['discovery'] or None
# Use a custom requests session and set the correct SSL version if
# specified.
self.session = requests.Session()
if 'ssl_version' in keywords:
self.session.mount('https://', SSLAdapter(keywords['ssl_version']))
def _configure_endpoint(self, endpoint):
"""Configure the endpoint with a schema and end slash."""
if not endpoint.startswith('http'):
endpoint = 'https://' + endpoint
if not endpoint.endswith('/'):
endpoint += '/'
return endpoint
def reset(self):
"""Reset the class back to the original keywords and values."""
self.configure()
def _create_path(self, *args):
"""Create URL path for endpoint and args."""
args = filter(None, args)
path = self.endpoint + '/'.join(args) + '.%s' % (self.format)
return path
def get(self, *args, **kwargs):
"""Perform a get request."""
if 'convert' in kwargs:
conversion = kwargs.pop('convert')
else:
conversion = True
kwargs = self._get_keywords(**kwargs)
url = self._create_path(*args)
request = self.session.get(url, params=kwargs)
content = request.content
self._request = request
return self.convert(content, conversion)
def _get_keywords(self, **kwargs):
"""Format GET request parameters and keywords."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'count' in kwargs:
kwargs['page_size'] = kwargs.pop('count')
if 'start' in kwargs:
start = kwargs.pop('start')
if 'end' in kwargs:
end = kwargs.pop('end')
else:
end = date.today().strftime('%m-%d-%Y')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
elif 'between' in kwargs:
start, end = kwargs.pop('between')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
return kwargs
def _split_date(self, time):
"""Split apart a date string."""
if isinstance(time, str):
month, day, year = [int(t) for t in re.split(r'-|/', time)]
if year < 100:
# Quick hack for dates < 2000.
year += 2000
time = date(year, month, day)
return time.strftime('%Y-%m-%dT%H:%M:%SZ')
def convert(self, content, conversion):
"""Convert content to Python data structures."""
if not conversion:
data = content
elif self.format == 'json':
data = json.loads(content)
elif self.format == 'xml':
content = xml(content)
first = list(content.keys())[0]
data = content[first]
else:
data = content
return data
def discovery(self, url=None):
"""
Retrieve the standard discovery file that provides routing
information.
>>> Three().discovery()
{'discovery': 'data'}
"""
if url:
data = self.session.get(url).content
elif self.discovery_url:
response = self.session.get(self.discovery_url)
if self.format == 'xml':
# Because, SF doesn't follow the spec.
data = xml(response.text)
else:
# Spec calls for discovery always allowing JSON.
data = response.json()
else:
data = self.get('discovery')
return data
def services(self, code=None, **kwargs):
"""
Retrieve information about available services. You can also enter a
specific service code argument.
>>> Three().services()
{'all': {'service_code': 'data'}}
>>> Three().services('033')
{'033': {'service_code': 'data'}}
"""
data = self.get('services', code, **kwargs)
return data
def requests(self, code=None, **kwargs):
"""
Retrieve open requests. You can also enter a specific service code
argument.
>>> Three('api.city.gov').requests()
{'all': {'requests': 'data'}}
>>> Three('api.city.gov').requests('123')
{'123': {'requests': 'data'}}
"""
if code:
kwargs['service_code'] = code
data = self.get('requests', **kwargs)
return data
def request(self, id, **kwargs):
"""
Retrieve a specific request using its service code ID.
>>> Three('api.city.gov').request('12345')
{'request': {'service_code': {'12345': 'data'}}}
"""
data = self.get('requests', id, **kwargs)
return data
def post(self, service_code='0', **kwargs):
"""
Post a new Open311 request.
>>> t = Three('api.city.gov')
>>> t.post('123', address='123 Any St', name='Zach Williams',
... phone='555-5555', description='My issue description.',
... media=open('photo.png', 'rb'))
{'successful': {'request': 'post'}}
"""
kwargs['service_code'] = service_code
kwargs = self._post_keywords(**kwargs)
media = kwargs.pop('media', None)
if media:
files = {'media': media}
else:
files = None
url = self._create_path('requests')
self.post_response = self.session.post(url,
data=kwargs, files=files)
content = self.post_response.content
if self.post_response.status_code >= 500:
conversion = False
else:
conversion = True
return self.convert(content, conversion)
def _post_keywords(self, **kwargs):
"""Configure keyword arguments for Open311 POST requests."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'address' in kwargs:
address = kwargs.pop('address')
kwargs['address_string'] = address
if 'name' in kwargs:
first, last = kwargs.pop('name').split(' ')
kwargs['first_name'] = first
kwargs['last_name'] = last
if 'api_key' not in kwargs:
kwargs['api_key'] = self.api_key
return kwargs
def token(self, id, **kwargs):
"""
Retrieve a service request ID from a token.
>>> Three('api.city.gov').token('12345')
{'service_request_id': {'for': {'token': '12345'}}}
"""
data = self.get('tokens', id, **kwargs)
return data
|
codeforamerica/three
|
three/core.py
|
Three._split_date
|
python
|
def _split_date(self, time):
if isinstance(time, str):
month, day, year = [int(t) for t in re.split(r'-|/', time)]
if year < 100:
# Quick hack for dates < 2000.
year += 2000
time = date(year, month, day)
return time.strftime('%Y-%m-%dT%H:%M:%SZ')
|
Split apart a date string.
|
train
|
https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L148-L156
| null |
class Three(object):
"""The main class for interacting with the Open311 API."""
def __init__(self, endpoint=None, **kwargs):
keywords = defaultdict(str)
keywords.update(kwargs)
if endpoint:
endpoint = self._configure_endpoint(endpoint)
keywords['endpoint'] = endpoint
elif 'OPEN311_CITY_INFO' in os.environ:
info = json.loads(os.environ['OPEN311_CITY_INFO'])
endpoint = info['endpoint']
endpoint = self._configure_endpoint(endpoint)
keywords.update(info)
keywords['endpoint'] = endpoint
self._keywords = keywords
self.configure()
def _global_api_key(self):
"""
If a global Open311 API key is available as an environment variable,
then it will be used when querying.
"""
if 'OPEN311_API_KEY' in os.environ:
api_key = os.environ['OPEN311_API_KEY']
else:
api_key = ''
return api_key
def configure(self, endpoint=None, **kwargs):
"""Configure a previously initialized instance of the class."""
if endpoint:
kwargs['endpoint'] = endpoint
keywords = self._keywords.copy()
keywords.update(kwargs)
if 'endpoint' in kwargs:
# Then we need to correctly format the endpoint.
endpoint = kwargs['endpoint']
keywords['endpoint'] = self._configure_endpoint(endpoint)
self.api_key = keywords['api_key'] or self._global_api_key()
self.endpoint = keywords['endpoint']
self.format = keywords['format'] or 'json'
self.jurisdiction = keywords['jurisdiction']
self.proxy = keywords['proxy']
self.discovery_url = keywords['discovery'] or None
# Use a custom requests session and set the correct SSL version if
# specified.
self.session = requests.Session()
if 'ssl_version' in keywords:
self.session.mount('https://', SSLAdapter(keywords['ssl_version']))
def _configure_endpoint(self, endpoint):
"""Configure the endpoint with a schema and end slash."""
if not endpoint.startswith('http'):
endpoint = 'https://' + endpoint
if not endpoint.endswith('/'):
endpoint += '/'
return endpoint
def reset(self):
"""Reset the class back to the original keywords and values."""
self.configure()
def _create_path(self, *args):
"""Create URL path for endpoint and args."""
args = filter(None, args)
path = self.endpoint + '/'.join(args) + '.%s' % (self.format)
return path
def get(self, *args, **kwargs):
"""Perform a get request."""
if 'convert' in kwargs:
conversion = kwargs.pop('convert')
else:
conversion = True
kwargs = self._get_keywords(**kwargs)
url = self._create_path(*args)
request = self.session.get(url, params=kwargs)
content = request.content
self._request = request
return self.convert(content, conversion)
def _get_keywords(self, **kwargs):
"""Format GET request parameters and keywords."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'count' in kwargs:
kwargs['page_size'] = kwargs.pop('count')
if 'start' in kwargs:
start = kwargs.pop('start')
if 'end' in kwargs:
end = kwargs.pop('end')
else:
end = date.today().strftime('%m-%d-%Y')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
elif 'between' in kwargs:
start, end = kwargs.pop('between')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
return kwargs
def _format_dates(self, start, end):
"""Format start and end dates."""
start = self._split_date(start)
end = self._split_date(end)
return start, end
def convert(self, content, conversion):
"""Convert content to Python data structures."""
if not conversion:
data = content
elif self.format == 'json':
data = json.loads(content)
elif self.format == 'xml':
content = xml(content)
first = list(content.keys())[0]
data = content[first]
else:
data = content
return data
def discovery(self, url=None):
"""
Retrieve the standard discovery file that provides routing
information.
>>> Three().discovery()
{'discovery': 'data'}
"""
if url:
data = self.session.get(url).content
elif self.discovery_url:
response = self.session.get(self.discovery_url)
if self.format == 'xml':
# Because, SF doesn't follow the spec.
data = xml(response.text)
else:
# Spec calls for discovery always allowing JSON.
data = response.json()
else:
data = self.get('discovery')
return data
def services(self, code=None, **kwargs):
"""
Retrieve information about available services. You can also enter a
specific service code argument.
>>> Three().services()
{'all': {'service_code': 'data'}}
>>> Three().services('033')
{'033': {'service_code': 'data'}}
"""
data = self.get('services', code, **kwargs)
return data
def requests(self, code=None, **kwargs):
"""
Retrieve open requests. You can also enter a specific service code
argument.
>>> Three('api.city.gov').requests()
{'all': {'requests': 'data'}}
>>> Three('api.city.gov').requests('123')
{'123': {'requests': 'data'}}
"""
if code:
kwargs['service_code'] = code
data = self.get('requests', **kwargs)
return data
def request(self, id, **kwargs):
"""
Retrieve a specific request using its service code ID.
>>> Three('api.city.gov').request('12345')
{'request': {'service_code': {'12345': 'data'}}}
"""
data = self.get('requests', id, **kwargs)
return data
def post(self, service_code='0', **kwargs):
"""
Post a new Open311 request.
>>> t = Three('api.city.gov')
>>> t.post('123', address='123 Any St', name='Zach Williams',
... phone='555-5555', description='My issue description.',
... media=open('photo.png', 'rb'))
{'successful': {'request': 'post'}}
"""
kwargs['service_code'] = service_code
kwargs = self._post_keywords(**kwargs)
media = kwargs.pop('media', None)
if media:
files = {'media': media}
else:
files = None
url = self._create_path('requests')
self.post_response = self.session.post(url,
data=kwargs, files=files)
content = self.post_response.content
if self.post_response.status_code >= 500:
conversion = False
else:
conversion = True
return self.convert(content, conversion)
def _post_keywords(self, **kwargs):
"""Configure keyword arguments for Open311 POST requests."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'address' in kwargs:
address = kwargs.pop('address')
kwargs['address_string'] = address
if 'name' in kwargs:
first, last = kwargs.pop('name').split(' ')
kwargs['first_name'] = first
kwargs['last_name'] = last
if 'api_key' not in kwargs:
kwargs['api_key'] = self.api_key
return kwargs
def token(self, id, **kwargs):
"""
Retrieve a service request ID from a token.
>>> Three('api.city.gov').token('12345')
{'service_request_id': {'for': {'token': '12345'}}}
"""
data = self.get('tokens', id, **kwargs)
return data
|
codeforamerica/three
|
three/core.py
|
Three.convert
|
python
|
def convert(self, content, conversion):
if not conversion:
data = content
elif self.format == 'json':
data = json.loads(content)
elif self.format == 'xml':
content = xml(content)
first = list(content.keys())[0]
data = content[first]
else:
data = content
return data
|
Convert content to Python data structures.
|
train
|
https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L158-L170
| null |
class Three(object):
"""The main class for interacting with the Open311 API."""
def __init__(self, endpoint=None, **kwargs):
keywords = defaultdict(str)
keywords.update(kwargs)
if endpoint:
endpoint = self._configure_endpoint(endpoint)
keywords['endpoint'] = endpoint
elif 'OPEN311_CITY_INFO' in os.environ:
info = json.loads(os.environ['OPEN311_CITY_INFO'])
endpoint = info['endpoint']
endpoint = self._configure_endpoint(endpoint)
keywords.update(info)
keywords['endpoint'] = endpoint
self._keywords = keywords
self.configure()
def _global_api_key(self):
"""
If a global Open311 API key is available as an environment variable,
then it will be used when querying.
"""
if 'OPEN311_API_KEY' in os.environ:
api_key = os.environ['OPEN311_API_KEY']
else:
api_key = ''
return api_key
def configure(self, endpoint=None, **kwargs):
"""Configure a previously initialized instance of the class."""
if endpoint:
kwargs['endpoint'] = endpoint
keywords = self._keywords.copy()
keywords.update(kwargs)
if 'endpoint' in kwargs:
# Then we need to correctly format the endpoint.
endpoint = kwargs['endpoint']
keywords['endpoint'] = self._configure_endpoint(endpoint)
self.api_key = keywords['api_key'] or self._global_api_key()
self.endpoint = keywords['endpoint']
self.format = keywords['format'] or 'json'
self.jurisdiction = keywords['jurisdiction']
self.proxy = keywords['proxy']
self.discovery_url = keywords['discovery'] or None
# Use a custom requests session and set the correct SSL version if
# specified.
self.session = requests.Session()
if 'ssl_version' in keywords:
self.session.mount('https://', SSLAdapter(keywords['ssl_version']))
def _configure_endpoint(self, endpoint):
"""Configure the endpoint with a schema and end slash."""
if not endpoint.startswith('http'):
endpoint = 'https://' + endpoint
if not endpoint.endswith('/'):
endpoint += '/'
return endpoint
def reset(self):
"""Reset the class back to the original keywords and values."""
self.configure()
def _create_path(self, *args):
"""Create URL path for endpoint and args."""
args = filter(None, args)
path = self.endpoint + '/'.join(args) + '.%s' % (self.format)
return path
def get(self, *args, **kwargs):
"""Perform a get request."""
if 'convert' in kwargs:
conversion = kwargs.pop('convert')
else:
conversion = True
kwargs = self._get_keywords(**kwargs)
url = self._create_path(*args)
request = self.session.get(url, params=kwargs)
content = request.content
self._request = request
return self.convert(content, conversion)
def _get_keywords(self, **kwargs):
"""Format GET request parameters and keywords."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'count' in kwargs:
kwargs['page_size'] = kwargs.pop('count')
if 'start' in kwargs:
start = kwargs.pop('start')
if 'end' in kwargs:
end = kwargs.pop('end')
else:
end = date.today().strftime('%m-%d-%Y')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
elif 'between' in kwargs:
start, end = kwargs.pop('between')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
return kwargs
def _format_dates(self, start, end):
"""Format start and end dates."""
start = self._split_date(start)
end = self._split_date(end)
return start, end
def _split_date(self, time):
"""Split apart a date string."""
if isinstance(time, str):
month, day, year = [int(t) for t in re.split(r'-|/', time)]
if year < 100:
# Quick hack for dates < 2000.
year += 2000
time = date(year, month, day)
return time.strftime('%Y-%m-%dT%H:%M:%SZ')
def discovery(self, url=None):
"""
Retrieve the standard discovery file that provides routing
information.
>>> Three().discovery()
{'discovery': 'data'}
"""
if url:
data = self.session.get(url).content
elif self.discovery_url:
response = self.session.get(self.discovery_url)
if self.format == 'xml':
# Because, SF doesn't follow the spec.
data = xml(response.text)
else:
# Spec calls for discovery always allowing JSON.
data = response.json()
else:
data = self.get('discovery')
return data
def services(self, code=None, **kwargs):
"""
Retrieve information about available services. You can also enter a
specific service code argument.
>>> Three().services()
{'all': {'service_code': 'data'}}
>>> Three().services('033')
{'033': {'service_code': 'data'}}
"""
data = self.get('services', code, **kwargs)
return data
def requests(self, code=None, **kwargs):
"""
Retrieve open requests. You can also enter a specific service code
argument.
>>> Three('api.city.gov').requests()
{'all': {'requests': 'data'}}
>>> Three('api.city.gov').requests('123')
{'123': {'requests': 'data'}}
"""
if code:
kwargs['service_code'] = code
data = self.get('requests', **kwargs)
return data
def request(self, id, **kwargs):
"""
Retrieve a specific request using its service code ID.
>>> Three('api.city.gov').request('12345')
{'request': {'service_code': {'12345': 'data'}}}
"""
data = self.get('requests', id, **kwargs)
return data
def post(self, service_code='0', **kwargs):
"""
Post a new Open311 request.
>>> t = Three('api.city.gov')
>>> t.post('123', address='123 Any St', name='Zach Williams',
... phone='555-5555', description='My issue description.',
... media=open('photo.png', 'rb'))
{'successful': {'request': 'post'}}
"""
kwargs['service_code'] = service_code
kwargs = self._post_keywords(**kwargs)
media = kwargs.pop('media', None)
if media:
files = {'media': media}
else:
files = None
url = self._create_path('requests')
self.post_response = self.session.post(url,
data=kwargs, files=files)
content = self.post_response.content
if self.post_response.status_code >= 500:
conversion = False
else:
conversion = True
return self.convert(content, conversion)
def _post_keywords(self, **kwargs):
"""Configure keyword arguments for Open311 POST requests."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'address' in kwargs:
address = kwargs.pop('address')
kwargs['address_string'] = address
if 'name' in kwargs:
first, last = kwargs.pop('name').split(' ')
kwargs['first_name'] = first
kwargs['last_name'] = last
if 'api_key' not in kwargs:
kwargs['api_key'] = self.api_key
return kwargs
def token(self, id, **kwargs):
"""
Retrieve a service request ID from a token.
>>> Three('api.city.gov').token('12345')
{'service_request_id': {'for': {'token': '12345'}}}
"""
data = self.get('tokens', id, **kwargs)
return data
|
codeforamerica/three
|
three/core.py
|
Three.discovery
|
python
|
def discovery(self, url=None):
if url:
data = self.session.get(url).content
elif self.discovery_url:
response = self.session.get(self.discovery_url)
if self.format == 'xml':
# Because, SF doesn't follow the spec.
data = xml(response.text)
else:
# Spec calls for discovery always allowing JSON.
data = response.json()
else:
data = self.get('discovery')
return data
|
Retrieve the standard discovery file that provides routing
information.
>>> Three().discovery()
{'discovery': 'data'}
|
train
|
https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L172-L192
|
[
"def get(self, *args, **kwargs):\n \"\"\"Perform a get request.\"\"\"\n if 'convert' in kwargs:\n conversion = kwargs.pop('convert')\n else:\n conversion = True\n kwargs = self._get_keywords(**kwargs)\n url = self._create_path(*args)\n request = self.session.get(url, params=kwargs)\n content = request.content\n self._request = request\n return self.convert(content, conversion)\n"
] |
class Three(object):
"""The main class for interacting with the Open311 API."""
def __init__(self, endpoint=None, **kwargs):
keywords = defaultdict(str)
keywords.update(kwargs)
if endpoint:
endpoint = self._configure_endpoint(endpoint)
keywords['endpoint'] = endpoint
elif 'OPEN311_CITY_INFO' in os.environ:
info = json.loads(os.environ['OPEN311_CITY_INFO'])
endpoint = info['endpoint']
endpoint = self._configure_endpoint(endpoint)
keywords.update(info)
keywords['endpoint'] = endpoint
self._keywords = keywords
self.configure()
def _global_api_key(self):
"""
If a global Open311 API key is available as an environment variable,
then it will be used when querying.
"""
if 'OPEN311_API_KEY' in os.environ:
api_key = os.environ['OPEN311_API_KEY']
else:
api_key = ''
return api_key
def configure(self, endpoint=None, **kwargs):
"""Configure a previously initialized instance of the class."""
if endpoint:
kwargs['endpoint'] = endpoint
keywords = self._keywords.copy()
keywords.update(kwargs)
if 'endpoint' in kwargs:
# Then we need to correctly format the endpoint.
endpoint = kwargs['endpoint']
keywords['endpoint'] = self._configure_endpoint(endpoint)
self.api_key = keywords['api_key'] or self._global_api_key()
self.endpoint = keywords['endpoint']
self.format = keywords['format'] or 'json'
self.jurisdiction = keywords['jurisdiction']
self.proxy = keywords['proxy']
self.discovery_url = keywords['discovery'] or None
# Use a custom requests session and set the correct SSL version if
# specified.
self.session = requests.Session()
if 'ssl_version' in keywords:
self.session.mount('https://', SSLAdapter(keywords['ssl_version']))
def _configure_endpoint(self, endpoint):
"""Configure the endpoint with a schema and end slash."""
if not endpoint.startswith('http'):
endpoint = 'https://' + endpoint
if not endpoint.endswith('/'):
endpoint += '/'
return endpoint
def reset(self):
"""Reset the class back to the original keywords and values."""
self.configure()
def _create_path(self, *args):
"""Create URL path for endpoint and args."""
args = filter(None, args)
path = self.endpoint + '/'.join(args) + '.%s' % (self.format)
return path
def get(self, *args, **kwargs):
"""Perform a get request."""
if 'convert' in kwargs:
conversion = kwargs.pop('convert')
else:
conversion = True
kwargs = self._get_keywords(**kwargs)
url = self._create_path(*args)
request = self.session.get(url, params=kwargs)
content = request.content
self._request = request
return self.convert(content, conversion)
def _get_keywords(self, **kwargs):
"""Format GET request parameters and keywords."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'count' in kwargs:
kwargs['page_size'] = kwargs.pop('count')
if 'start' in kwargs:
start = kwargs.pop('start')
if 'end' in kwargs:
end = kwargs.pop('end')
else:
end = date.today().strftime('%m-%d-%Y')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
elif 'between' in kwargs:
start, end = kwargs.pop('between')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
return kwargs
def _format_dates(self, start, end):
"""Format start and end dates."""
start = self._split_date(start)
end = self._split_date(end)
return start, end
def _split_date(self, time):
"""Split apart a date string."""
if isinstance(time, str):
month, day, year = [int(t) for t in re.split(r'-|/', time)]
if year < 100:
# Quick hack for dates < 2000.
year += 2000
time = date(year, month, day)
return time.strftime('%Y-%m-%dT%H:%M:%SZ')
def convert(self, content, conversion):
"""Convert content to Python data structures."""
if not conversion:
data = content
elif self.format == 'json':
data = json.loads(content)
elif self.format == 'xml':
content = xml(content)
first = list(content.keys())[0]
data = content[first]
else:
data = content
return data
def services(self, code=None, **kwargs):
"""
Retrieve information about available services. You can also enter a
specific service code argument.
>>> Three().services()
{'all': {'service_code': 'data'}}
>>> Three().services('033')
{'033': {'service_code': 'data'}}
"""
data = self.get('services', code, **kwargs)
return data
def requests(self, code=None, **kwargs):
"""
Retrieve open requests. You can also enter a specific service code
argument.
>>> Three('api.city.gov').requests()
{'all': {'requests': 'data'}}
>>> Three('api.city.gov').requests('123')
{'123': {'requests': 'data'}}
"""
if code:
kwargs['service_code'] = code
data = self.get('requests', **kwargs)
return data
def request(self, id, **kwargs):
"""
Retrieve a specific request using its service code ID.
>>> Three('api.city.gov').request('12345')
{'request': {'service_code': {'12345': 'data'}}}
"""
data = self.get('requests', id, **kwargs)
return data
def post(self, service_code='0', **kwargs):
"""
Post a new Open311 request.
>>> t = Three('api.city.gov')
>>> t.post('123', address='123 Any St', name='Zach Williams',
... phone='555-5555', description='My issue description.',
... media=open('photo.png', 'rb'))
{'successful': {'request': 'post'}}
"""
kwargs['service_code'] = service_code
kwargs = self._post_keywords(**kwargs)
media = kwargs.pop('media', None)
if media:
files = {'media': media}
else:
files = None
url = self._create_path('requests')
self.post_response = self.session.post(url,
data=kwargs, files=files)
content = self.post_response.content
if self.post_response.status_code >= 500:
conversion = False
else:
conversion = True
return self.convert(content, conversion)
def _post_keywords(self, **kwargs):
"""Configure keyword arguments for Open311 POST requests."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'address' in kwargs:
address = kwargs.pop('address')
kwargs['address_string'] = address
if 'name' in kwargs:
first, last = kwargs.pop('name').split(' ')
kwargs['first_name'] = first
kwargs['last_name'] = last
if 'api_key' not in kwargs:
kwargs['api_key'] = self.api_key
return kwargs
def token(self, id, **kwargs):
"""
Retrieve a service request ID from a token.
>>> Three('api.city.gov').token('12345')
{'service_request_id': {'for': {'token': '12345'}}}
"""
data = self.get('tokens', id, **kwargs)
return data
|
codeforamerica/three
|
three/core.py
|
Three.services
|
python
|
def services(self, code=None, **kwargs):
data = self.get('services', code, **kwargs)
return data
|
Retrieve information about available services. You can also enter a
specific service code argument.
>>> Three().services()
{'all': {'service_code': 'data'}}
>>> Three().services('033')
{'033': {'service_code': 'data'}}
|
train
|
https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L194-L205
|
[
"def get(self, *args, **kwargs):\n \"\"\"Perform a get request.\"\"\"\n if 'convert' in kwargs:\n conversion = kwargs.pop('convert')\n else:\n conversion = True\n kwargs = self._get_keywords(**kwargs)\n url = self._create_path(*args)\n request = self.session.get(url, params=kwargs)\n content = request.content\n self._request = request\n return self.convert(content, conversion)\n"
] |
class Three(object):
"""The main class for interacting with the Open311 API."""
def __init__(self, endpoint=None, **kwargs):
keywords = defaultdict(str)
keywords.update(kwargs)
if endpoint:
endpoint = self._configure_endpoint(endpoint)
keywords['endpoint'] = endpoint
elif 'OPEN311_CITY_INFO' in os.environ:
info = json.loads(os.environ['OPEN311_CITY_INFO'])
endpoint = info['endpoint']
endpoint = self._configure_endpoint(endpoint)
keywords.update(info)
keywords['endpoint'] = endpoint
self._keywords = keywords
self.configure()
def _global_api_key(self):
"""
If a global Open311 API key is available as an environment variable,
then it will be used when querying.
"""
if 'OPEN311_API_KEY' in os.environ:
api_key = os.environ['OPEN311_API_KEY']
else:
api_key = ''
return api_key
def configure(self, endpoint=None, **kwargs):
"""Configure a previously initialized instance of the class."""
if endpoint:
kwargs['endpoint'] = endpoint
keywords = self._keywords.copy()
keywords.update(kwargs)
if 'endpoint' in kwargs:
# Then we need to correctly format the endpoint.
endpoint = kwargs['endpoint']
keywords['endpoint'] = self._configure_endpoint(endpoint)
self.api_key = keywords['api_key'] or self._global_api_key()
self.endpoint = keywords['endpoint']
self.format = keywords['format'] or 'json'
self.jurisdiction = keywords['jurisdiction']
self.proxy = keywords['proxy']
self.discovery_url = keywords['discovery'] or None
# Use a custom requests session and set the correct SSL version if
# specified.
self.session = requests.Session()
if 'ssl_version' in keywords:
self.session.mount('https://', SSLAdapter(keywords['ssl_version']))
def _configure_endpoint(self, endpoint):
"""Configure the endpoint with a schema and end slash."""
if not endpoint.startswith('http'):
endpoint = 'https://' + endpoint
if not endpoint.endswith('/'):
endpoint += '/'
return endpoint
def reset(self):
"""Reset the class back to the original keywords and values."""
self.configure()
def _create_path(self, *args):
"""Create URL path for endpoint and args."""
args = filter(None, args)
path = self.endpoint + '/'.join(args) + '.%s' % (self.format)
return path
def get(self, *args, **kwargs):
"""Perform a get request."""
if 'convert' in kwargs:
conversion = kwargs.pop('convert')
else:
conversion = True
kwargs = self._get_keywords(**kwargs)
url = self._create_path(*args)
request = self.session.get(url, params=kwargs)
content = request.content
self._request = request
return self.convert(content, conversion)
def _get_keywords(self, **kwargs):
"""Format GET request parameters and keywords."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'count' in kwargs:
kwargs['page_size'] = kwargs.pop('count')
if 'start' in kwargs:
start = kwargs.pop('start')
if 'end' in kwargs:
end = kwargs.pop('end')
else:
end = date.today().strftime('%m-%d-%Y')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
elif 'between' in kwargs:
start, end = kwargs.pop('between')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
return kwargs
def _format_dates(self, start, end):
"""Format start and end dates."""
start = self._split_date(start)
end = self._split_date(end)
return start, end
def _split_date(self, time):
"""Split apart a date string."""
if isinstance(time, str):
month, day, year = [int(t) for t in re.split(r'-|/', time)]
if year < 100:
# Quick hack for dates < 2000.
year += 2000
time = date(year, month, day)
return time.strftime('%Y-%m-%dT%H:%M:%SZ')
def convert(self, content, conversion):
"""Convert content to Python data structures."""
if not conversion:
data = content
elif self.format == 'json':
data = json.loads(content)
elif self.format == 'xml':
content = xml(content)
first = list(content.keys())[0]
data = content[first]
else:
data = content
return data
def discovery(self, url=None):
"""
Retrieve the standard discovery file that provides routing
information.
>>> Three().discovery()
{'discovery': 'data'}
"""
if url:
data = self.session.get(url).content
elif self.discovery_url:
response = self.session.get(self.discovery_url)
if self.format == 'xml':
# Because, SF doesn't follow the spec.
data = xml(response.text)
else:
# Spec calls for discovery always allowing JSON.
data = response.json()
else:
data = self.get('discovery')
return data
def requests(self, code=None, **kwargs):
"""
Retrieve open requests. You can also enter a specific service code
argument.
>>> Three('api.city.gov').requests()
{'all': {'requests': 'data'}}
>>> Three('api.city.gov').requests('123')
{'123': {'requests': 'data'}}
"""
if code:
kwargs['service_code'] = code
data = self.get('requests', **kwargs)
return data
def request(self, id, **kwargs):
"""
Retrieve a specific request using its service code ID.
>>> Three('api.city.gov').request('12345')
{'request': {'service_code': {'12345': 'data'}}}
"""
data = self.get('requests', id, **kwargs)
return data
def post(self, service_code='0', **kwargs):
"""
Post a new Open311 request.
>>> t = Three('api.city.gov')
>>> t.post('123', address='123 Any St', name='Zach Williams',
... phone='555-5555', description='My issue description.',
... media=open('photo.png', 'rb'))
{'successful': {'request': 'post'}}
"""
kwargs['service_code'] = service_code
kwargs = self._post_keywords(**kwargs)
media = kwargs.pop('media', None)
if media:
files = {'media': media}
else:
files = None
url = self._create_path('requests')
self.post_response = self.session.post(url,
data=kwargs, files=files)
content = self.post_response.content
if self.post_response.status_code >= 500:
conversion = False
else:
conversion = True
return self.convert(content, conversion)
def _post_keywords(self, **kwargs):
"""Configure keyword arguments for Open311 POST requests."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'address' in kwargs:
address = kwargs.pop('address')
kwargs['address_string'] = address
if 'name' in kwargs:
first, last = kwargs.pop('name').split(' ')
kwargs['first_name'] = first
kwargs['last_name'] = last
if 'api_key' not in kwargs:
kwargs['api_key'] = self.api_key
return kwargs
def token(self, id, **kwargs):
"""
Retrieve a service request ID from a token.
>>> Three('api.city.gov').token('12345')
{'service_request_id': {'for': {'token': '12345'}}}
"""
data = self.get('tokens', id, **kwargs)
return data
|
codeforamerica/three
|
three/core.py
|
Three.requests
|
python
|
def requests(self, code=None, **kwargs):
if code:
kwargs['service_code'] = code
data = self.get('requests', **kwargs)
return data
|
Retrieve open requests. You can also enter a specific service code
argument.
>>> Three('api.city.gov').requests()
{'all': {'requests': 'data'}}
>>> Three('api.city.gov').requests('123')
{'123': {'requests': 'data'}}
|
train
|
https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L207-L220
|
[
"def get(self, *args, **kwargs):\n \"\"\"Perform a get request.\"\"\"\n if 'convert' in kwargs:\n conversion = kwargs.pop('convert')\n else:\n conversion = True\n kwargs = self._get_keywords(**kwargs)\n url = self._create_path(*args)\n request = self.session.get(url, params=kwargs)\n content = request.content\n self._request = request\n return self.convert(content, conversion)\n"
] |
class Three(object):
"""The main class for interacting with the Open311 API."""
def __init__(self, endpoint=None, **kwargs):
keywords = defaultdict(str)
keywords.update(kwargs)
if endpoint:
endpoint = self._configure_endpoint(endpoint)
keywords['endpoint'] = endpoint
elif 'OPEN311_CITY_INFO' in os.environ:
info = json.loads(os.environ['OPEN311_CITY_INFO'])
endpoint = info['endpoint']
endpoint = self._configure_endpoint(endpoint)
keywords.update(info)
keywords['endpoint'] = endpoint
self._keywords = keywords
self.configure()
def _global_api_key(self):
"""
If a global Open311 API key is available as an environment variable,
then it will be used when querying.
"""
if 'OPEN311_API_KEY' in os.environ:
api_key = os.environ['OPEN311_API_KEY']
else:
api_key = ''
return api_key
def configure(self, endpoint=None, **kwargs):
"""Configure a previously initialized instance of the class."""
if endpoint:
kwargs['endpoint'] = endpoint
keywords = self._keywords.copy()
keywords.update(kwargs)
if 'endpoint' in kwargs:
# Then we need to correctly format the endpoint.
endpoint = kwargs['endpoint']
keywords['endpoint'] = self._configure_endpoint(endpoint)
self.api_key = keywords['api_key'] or self._global_api_key()
self.endpoint = keywords['endpoint']
self.format = keywords['format'] or 'json'
self.jurisdiction = keywords['jurisdiction']
self.proxy = keywords['proxy']
self.discovery_url = keywords['discovery'] or None
# Use a custom requests session and set the correct SSL version if
# specified.
self.session = requests.Session()
if 'ssl_version' in keywords:
self.session.mount('https://', SSLAdapter(keywords['ssl_version']))
def _configure_endpoint(self, endpoint):
"""Configure the endpoint with a schema and end slash."""
if not endpoint.startswith('http'):
endpoint = 'https://' + endpoint
if not endpoint.endswith('/'):
endpoint += '/'
return endpoint
def reset(self):
"""Reset the class back to the original keywords and values."""
self.configure()
def _create_path(self, *args):
"""Create URL path for endpoint and args."""
args = filter(None, args)
path = self.endpoint + '/'.join(args) + '.%s' % (self.format)
return path
def get(self, *args, **kwargs):
"""Perform a get request."""
if 'convert' in kwargs:
conversion = kwargs.pop('convert')
else:
conversion = True
kwargs = self._get_keywords(**kwargs)
url = self._create_path(*args)
request = self.session.get(url, params=kwargs)
content = request.content
self._request = request
return self.convert(content, conversion)
def _get_keywords(self, **kwargs):
"""Format GET request parameters and keywords."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'count' in kwargs:
kwargs['page_size'] = kwargs.pop('count')
if 'start' in kwargs:
start = kwargs.pop('start')
if 'end' in kwargs:
end = kwargs.pop('end')
else:
end = date.today().strftime('%m-%d-%Y')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
elif 'between' in kwargs:
start, end = kwargs.pop('between')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
return kwargs
def _format_dates(self, start, end):
"""Format start and end dates."""
start = self._split_date(start)
end = self._split_date(end)
return start, end
def _split_date(self, time):
"""Split apart a date string."""
if isinstance(time, str):
month, day, year = [int(t) for t in re.split(r'-|/', time)]
if year < 100:
# Quick hack for dates < 2000.
year += 2000
time = date(year, month, day)
return time.strftime('%Y-%m-%dT%H:%M:%SZ')
def convert(self, content, conversion):
"""Convert content to Python data structures."""
if not conversion:
data = content
elif self.format == 'json':
data = json.loads(content)
elif self.format == 'xml':
content = xml(content)
first = list(content.keys())[0]
data = content[first]
else:
data = content
return data
def discovery(self, url=None):
"""
Retrieve the standard discovery file that provides routing
information.
>>> Three().discovery()
{'discovery': 'data'}
"""
if url:
data = self.session.get(url).content
elif self.discovery_url:
response = self.session.get(self.discovery_url)
if self.format == 'xml':
# Because, SF doesn't follow the spec.
data = xml(response.text)
else:
# Spec calls for discovery always allowing JSON.
data = response.json()
else:
data = self.get('discovery')
return data
def services(self, code=None, **kwargs):
"""
Retrieve information about available services. You can also enter a
specific service code argument.
>>> Three().services()
{'all': {'service_code': 'data'}}
>>> Three().services('033')
{'033': {'service_code': 'data'}}
"""
data = self.get('services', code, **kwargs)
return data
def request(self, id, **kwargs):
"""
Retrieve a specific request using its service code ID.
>>> Three('api.city.gov').request('12345')
{'request': {'service_code': {'12345': 'data'}}}
"""
data = self.get('requests', id, **kwargs)
return data
def post(self, service_code='0', **kwargs):
"""
Post a new Open311 request.
>>> t = Three('api.city.gov')
>>> t.post('123', address='123 Any St', name='Zach Williams',
... phone='555-5555', description='My issue description.',
... media=open('photo.png', 'rb'))
{'successful': {'request': 'post'}}
"""
kwargs['service_code'] = service_code
kwargs = self._post_keywords(**kwargs)
media = kwargs.pop('media', None)
if media:
files = {'media': media}
else:
files = None
url = self._create_path('requests')
self.post_response = self.session.post(url,
data=kwargs, files=files)
content = self.post_response.content
if self.post_response.status_code >= 500:
conversion = False
else:
conversion = True
return self.convert(content, conversion)
def _post_keywords(self, **kwargs):
"""Configure keyword arguments for Open311 POST requests."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'address' in kwargs:
address = kwargs.pop('address')
kwargs['address_string'] = address
if 'name' in kwargs:
first, last = kwargs.pop('name').split(' ')
kwargs['first_name'] = first
kwargs['last_name'] = last
if 'api_key' not in kwargs:
kwargs['api_key'] = self.api_key
return kwargs
def token(self, id, **kwargs):
"""
Retrieve a service request ID from a token.
>>> Three('api.city.gov').token('12345')
{'service_request_id': {'for': {'token': '12345'}}}
"""
data = self.get('tokens', id, **kwargs)
return data
|
codeforamerica/three
|
three/core.py
|
Three.request
|
python
|
def request(self, id, **kwargs):
data = self.get('requests', id, **kwargs)
return data
|
Retrieve a specific request using its service code ID.
>>> Three('api.city.gov').request('12345')
{'request': {'service_code': {'12345': 'data'}}}
|
train
|
https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L222-L230
|
[
"def get(self, *args, **kwargs):\n \"\"\"Perform a get request.\"\"\"\n if 'convert' in kwargs:\n conversion = kwargs.pop('convert')\n else:\n conversion = True\n kwargs = self._get_keywords(**kwargs)\n url = self._create_path(*args)\n request = self.session.get(url, params=kwargs)\n content = request.content\n self._request = request\n return self.convert(content, conversion)\n"
] |
class Three(object):
"""The main class for interacting with the Open311 API."""
def __init__(self, endpoint=None, **kwargs):
keywords = defaultdict(str)
keywords.update(kwargs)
if endpoint:
endpoint = self._configure_endpoint(endpoint)
keywords['endpoint'] = endpoint
elif 'OPEN311_CITY_INFO' in os.environ:
info = json.loads(os.environ['OPEN311_CITY_INFO'])
endpoint = info['endpoint']
endpoint = self._configure_endpoint(endpoint)
keywords.update(info)
keywords['endpoint'] = endpoint
self._keywords = keywords
self.configure()
def _global_api_key(self):
"""
If a global Open311 API key is available as an environment variable,
then it will be used when querying.
"""
if 'OPEN311_API_KEY' in os.environ:
api_key = os.environ['OPEN311_API_KEY']
else:
api_key = ''
return api_key
def configure(self, endpoint=None, **kwargs):
"""Configure a previously initialized instance of the class."""
if endpoint:
kwargs['endpoint'] = endpoint
keywords = self._keywords.copy()
keywords.update(kwargs)
if 'endpoint' in kwargs:
# Then we need to correctly format the endpoint.
endpoint = kwargs['endpoint']
keywords['endpoint'] = self._configure_endpoint(endpoint)
self.api_key = keywords['api_key'] or self._global_api_key()
self.endpoint = keywords['endpoint']
self.format = keywords['format'] or 'json'
self.jurisdiction = keywords['jurisdiction']
self.proxy = keywords['proxy']
self.discovery_url = keywords['discovery'] or None
# Use a custom requests session and set the correct SSL version if
# specified.
self.session = requests.Session()
if 'ssl_version' in keywords:
self.session.mount('https://', SSLAdapter(keywords['ssl_version']))
def _configure_endpoint(self, endpoint):
"""Configure the endpoint with a schema and end slash."""
if not endpoint.startswith('http'):
endpoint = 'https://' + endpoint
if not endpoint.endswith('/'):
endpoint += '/'
return endpoint
def reset(self):
"""Reset the class back to the original keywords and values."""
self.configure()
def _create_path(self, *args):
"""Create URL path for endpoint and args."""
args = filter(None, args)
path = self.endpoint + '/'.join(args) + '.%s' % (self.format)
return path
def get(self, *args, **kwargs):
"""Perform a get request."""
if 'convert' in kwargs:
conversion = kwargs.pop('convert')
else:
conversion = True
kwargs = self._get_keywords(**kwargs)
url = self._create_path(*args)
request = self.session.get(url, params=kwargs)
content = request.content
self._request = request
return self.convert(content, conversion)
def _get_keywords(self, **kwargs):
"""Format GET request parameters and keywords."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'count' in kwargs:
kwargs['page_size'] = kwargs.pop('count')
if 'start' in kwargs:
start = kwargs.pop('start')
if 'end' in kwargs:
end = kwargs.pop('end')
else:
end = date.today().strftime('%m-%d-%Y')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
elif 'between' in kwargs:
start, end = kwargs.pop('between')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
return kwargs
def _format_dates(self, start, end):
"""Format start and end dates."""
start = self._split_date(start)
end = self._split_date(end)
return start, end
def _split_date(self, time):
"""Split apart a date string."""
if isinstance(time, str):
month, day, year = [int(t) for t in re.split(r'-|/', time)]
if year < 100:
# Quick hack for dates < 2000.
year += 2000
time = date(year, month, day)
return time.strftime('%Y-%m-%dT%H:%M:%SZ')
def convert(self, content, conversion):
"""Convert content to Python data structures."""
if not conversion:
data = content
elif self.format == 'json':
data = json.loads(content)
elif self.format == 'xml':
content = xml(content)
first = list(content.keys())[0]
data = content[first]
else:
data = content
return data
def discovery(self, url=None):
"""
Retrieve the standard discovery file that provides routing
information.
>>> Three().discovery()
{'discovery': 'data'}
"""
if url:
data = self.session.get(url).content
elif self.discovery_url:
response = self.session.get(self.discovery_url)
if self.format == 'xml':
# Because, SF doesn't follow the spec.
data = xml(response.text)
else:
# Spec calls for discovery always allowing JSON.
data = response.json()
else:
data = self.get('discovery')
return data
def services(self, code=None, **kwargs):
"""
Retrieve information about available services. You can also enter a
specific service code argument.
>>> Three().services()
{'all': {'service_code': 'data'}}
>>> Three().services('033')
{'033': {'service_code': 'data'}}
"""
data = self.get('services', code, **kwargs)
return data
def requests(self, code=None, **kwargs):
"""
Retrieve open requests. You can also enter a specific service code
argument.
>>> Three('api.city.gov').requests()
{'all': {'requests': 'data'}}
>>> Three('api.city.gov').requests('123')
{'123': {'requests': 'data'}}
"""
if code:
kwargs['service_code'] = code
data = self.get('requests', **kwargs)
return data
def post(self, service_code='0', **kwargs):
"""
Post a new Open311 request.
>>> t = Three('api.city.gov')
>>> t.post('123', address='123 Any St', name='Zach Williams',
... phone='555-5555', description='My issue description.',
... media=open('photo.png', 'rb'))
{'successful': {'request': 'post'}}
"""
kwargs['service_code'] = service_code
kwargs = self._post_keywords(**kwargs)
media = kwargs.pop('media', None)
if media:
files = {'media': media}
else:
files = None
url = self._create_path('requests')
self.post_response = self.session.post(url,
data=kwargs, files=files)
content = self.post_response.content
if self.post_response.status_code >= 500:
conversion = False
else:
conversion = True
return self.convert(content, conversion)
def _post_keywords(self, **kwargs):
"""Configure keyword arguments for Open311 POST requests."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'address' in kwargs:
address = kwargs.pop('address')
kwargs['address_string'] = address
if 'name' in kwargs:
first, last = kwargs.pop('name').split(' ')
kwargs['first_name'] = first
kwargs['last_name'] = last
if 'api_key' not in kwargs:
kwargs['api_key'] = self.api_key
return kwargs
def token(self, id, **kwargs):
"""
Retrieve a service request ID from a token.
>>> Three('api.city.gov').token('12345')
{'service_request_id': {'for': {'token': '12345'}}}
"""
data = self.get('tokens', id, **kwargs)
return data
|
codeforamerica/three
|
three/core.py
|
Three.post
|
python
|
def post(self, service_code='0', **kwargs):
kwargs['service_code'] = service_code
kwargs = self._post_keywords(**kwargs)
media = kwargs.pop('media', None)
if media:
files = {'media': media}
else:
files = None
url = self._create_path('requests')
self.post_response = self.session.post(url,
data=kwargs, files=files)
content = self.post_response.content
if self.post_response.status_code >= 500:
conversion = False
else:
conversion = True
return self.convert(content, conversion)
|
Post a new Open311 request.
>>> t = Three('api.city.gov')
>>> t.post('123', address='123 Any St', name='Zach Williams',
... phone='555-5555', description='My issue description.',
... media=open('photo.png', 'rb'))
{'successful': {'request': 'post'}}
|
train
|
https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L232-L257
|
[
"def _create_path(self, *args):\n \"\"\"Create URL path for endpoint and args.\"\"\"\n args = filter(None, args)\n path = self.endpoint + '/'.join(args) + '.%s' % (self.format)\n return path\n",
"def convert(self, content, conversion):\n \"\"\"Convert content to Python data structures.\"\"\"\n if not conversion:\n data = content\n elif self.format == 'json':\n data = json.loads(content)\n elif self.format == 'xml':\n content = xml(content)\n first = list(content.keys())[0]\n data = content[first]\n else:\n data = content\n return data\n",
"def _post_keywords(self, **kwargs):\n \"\"\"Configure keyword arguments for Open311 POST requests.\"\"\"\n if self.jurisdiction and 'jurisdiction_id' not in kwargs:\n kwargs['jurisdiction_id'] = self.jurisdiction\n if 'address' in kwargs:\n address = kwargs.pop('address')\n kwargs['address_string'] = address\n if 'name' in kwargs:\n first, last = kwargs.pop('name').split(' ')\n kwargs['first_name'] = first\n kwargs['last_name'] = last\n if 'api_key' not in kwargs:\n kwargs['api_key'] = self.api_key\n return kwargs\n"
] |
class Three(object):
"""The main class for interacting with the Open311 API."""
def __init__(self, endpoint=None, **kwargs):
keywords = defaultdict(str)
keywords.update(kwargs)
if endpoint:
endpoint = self._configure_endpoint(endpoint)
keywords['endpoint'] = endpoint
elif 'OPEN311_CITY_INFO' in os.environ:
info = json.loads(os.environ['OPEN311_CITY_INFO'])
endpoint = info['endpoint']
endpoint = self._configure_endpoint(endpoint)
keywords.update(info)
keywords['endpoint'] = endpoint
self._keywords = keywords
self.configure()
def _global_api_key(self):
"""
If a global Open311 API key is available as an environment variable,
then it will be used when querying.
"""
if 'OPEN311_API_KEY' in os.environ:
api_key = os.environ['OPEN311_API_KEY']
else:
api_key = ''
return api_key
def configure(self, endpoint=None, **kwargs):
"""Configure a previously initialized instance of the class."""
if endpoint:
kwargs['endpoint'] = endpoint
keywords = self._keywords.copy()
keywords.update(kwargs)
if 'endpoint' in kwargs:
# Then we need to correctly format the endpoint.
endpoint = kwargs['endpoint']
keywords['endpoint'] = self._configure_endpoint(endpoint)
self.api_key = keywords['api_key'] or self._global_api_key()
self.endpoint = keywords['endpoint']
self.format = keywords['format'] or 'json'
self.jurisdiction = keywords['jurisdiction']
self.proxy = keywords['proxy']
self.discovery_url = keywords['discovery'] or None
# Use a custom requests session and set the correct SSL version if
# specified.
self.session = requests.Session()
if 'ssl_version' in keywords:
self.session.mount('https://', SSLAdapter(keywords['ssl_version']))
def _configure_endpoint(self, endpoint):
"""Configure the endpoint with a schema and end slash."""
if not endpoint.startswith('http'):
endpoint = 'https://' + endpoint
if not endpoint.endswith('/'):
endpoint += '/'
return endpoint
def reset(self):
"""Reset the class back to the original keywords and values."""
self.configure()
def _create_path(self, *args):
"""Create URL path for endpoint and args."""
args = filter(None, args)
path = self.endpoint + '/'.join(args) + '.%s' % (self.format)
return path
def get(self, *args, **kwargs):
"""Perform a get request."""
if 'convert' in kwargs:
conversion = kwargs.pop('convert')
else:
conversion = True
kwargs = self._get_keywords(**kwargs)
url = self._create_path(*args)
request = self.session.get(url, params=kwargs)
content = request.content
self._request = request
return self.convert(content, conversion)
def _get_keywords(self, **kwargs):
"""Format GET request parameters and keywords."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'count' in kwargs:
kwargs['page_size'] = kwargs.pop('count')
if 'start' in kwargs:
start = kwargs.pop('start')
if 'end' in kwargs:
end = kwargs.pop('end')
else:
end = date.today().strftime('%m-%d-%Y')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
elif 'between' in kwargs:
start, end = kwargs.pop('between')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
return kwargs
def _format_dates(self, start, end):
"""Format start and end dates."""
start = self._split_date(start)
end = self._split_date(end)
return start, end
def _split_date(self, time):
"""Split apart a date string."""
if isinstance(time, str):
month, day, year = [int(t) for t in re.split(r'-|/', time)]
if year < 100:
# Quick hack for dates < 2000.
year += 2000
time = date(year, month, day)
return time.strftime('%Y-%m-%dT%H:%M:%SZ')
def convert(self, content, conversion):
"""Convert content to Python data structures."""
if not conversion:
data = content
elif self.format == 'json':
data = json.loads(content)
elif self.format == 'xml':
content = xml(content)
first = list(content.keys())[0]
data = content[first]
else:
data = content
return data
def discovery(self, url=None):
"""
Retrieve the standard discovery file that provides routing
information.
>>> Three().discovery()
{'discovery': 'data'}
"""
if url:
data = self.session.get(url).content
elif self.discovery_url:
response = self.session.get(self.discovery_url)
if self.format == 'xml':
# Because, SF doesn't follow the spec.
data = xml(response.text)
else:
# Spec calls for discovery always allowing JSON.
data = response.json()
else:
data = self.get('discovery')
return data
def services(self, code=None, **kwargs):
"""
Retrieve information about available services. You can also enter a
specific service code argument.
>>> Three().services()
{'all': {'service_code': 'data'}}
>>> Three().services('033')
{'033': {'service_code': 'data'}}
"""
data = self.get('services', code, **kwargs)
return data
def requests(self, code=None, **kwargs):
"""
Retrieve open requests. You can also enter a specific service code
argument.
>>> Three('api.city.gov').requests()
{'all': {'requests': 'data'}}
>>> Three('api.city.gov').requests('123')
{'123': {'requests': 'data'}}
"""
if code:
kwargs['service_code'] = code
data = self.get('requests', **kwargs)
return data
def request(self, id, **kwargs):
"""
Retrieve a specific request using its service code ID.
>>> Three('api.city.gov').request('12345')
{'request': {'service_code': {'12345': 'data'}}}
"""
data = self.get('requests', id, **kwargs)
return data
def _post_keywords(self, **kwargs):
"""Configure keyword arguments for Open311 POST requests."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'address' in kwargs:
address = kwargs.pop('address')
kwargs['address_string'] = address
if 'name' in kwargs:
first, last = kwargs.pop('name').split(' ')
kwargs['first_name'] = first
kwargs['last_name'] = last
if 'api_key' not in kwargs:
kwargs['api_key'] = self.api_key
return kwargs
def token(self, id, **kwargs):
"""
Retrieve a service request ID from a token.
>>> Three('api.city.gov').token('12345')
{'service_request_id': {'for': {'token': '12345'}}}
"""
data = self.get('tokens', id, **kwargs)
return data
|
codeforamerica/three
|
three/core.py
|
Three._post_keywords
|
python
|
def _post_keywords(self, **kwargs):
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'address' in kwargs:
address = kwargs.pop('address')
kwargs['address_string'] = address
if 'name' in kwargs:
first, last = kwargs.pop('name').split(' ')
kwargs['first_name'] = first
kwargs['last_name'] = last
if 'api_key' not in kwargs:
kwargs['api_key'] = self.api_key
return kwargs
|
Configure keyword arguments for Open311 POST requests.
|
train
|
https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L259-L272
| null |
class Three(object):
"""The main class for interacting with the Open311 API."""
def __init__(self, endpoint=None, **kwargs):
keywords = defaultdict(str)
keywords.update(kwargs)
if endpoint:
endpoint = self._configure_endpoint(endpoint)
keywords['endpoint'] = endpoint
elif 'OPEN311_CITY_INFO' in os.environ:
info = json.loads(os.environ['OPEN311_CITY_INFO'])
endpoint = info['endpoint']
endpoint = self._configure_endpoint(endpoint)
keywords.update(info)
keywords['endpoint'] = endpoint
self._keywords = keywords
self.configure()
def _global_api_key(self):
"""
If a global Open311 API key is available as an environment variable,
then it will be used when querying.
"""
if 'OPEN311_API_KEY' in os.environ:
api_key = os.environ['OPEN311_API_KEY']
else:
api_key = ''
return api_key
def configure(self, endpoint=None, **kwargs):
"""Configure a previously initialized instance of the class."""
if endpoint:
kwargs['endpoint'] = endpoint
keywords = self._keywords.copy()
keywords.update(kwargs)
if 'endpoint' in kwargs:
# Then we need to correctly format the endpoint.
endpoint = kwargs['endpoint']
keywords['endpoint'] = self._configure_endpoint(endpoint)
self.api_key = keywords['api_key'] or self._global_api_key()
self.endpoint = keywords['endpoint']
self.format = keywords['format'] or 'json'
self.jurisdiction = keywords['jurisdiction']
self.proxy = keywords['proxy']
self.discovery_url = keywords['discovery'] or None
# Use a custom requests session and set the correct SSL version if
# specified.
self.session = requests.Session()
if 'ssl_version' in keywords:
self.session.mount('https://', SSLAdapter(keywords['ssl_version']))
def _configure_endpoint(self, endpoint):
"""Configure the endpoint with a schema and end slash."""
if not endpoint.startswith('http'):
endpoint = 'https://' + endpoint
if not endpoint.endswith('/'):
endpoint += '/'
return endpoint
def reset(self):
"""Reset the class back to the original keywords and values."""
self.configure()
def _create_path(self, *args):
"""Create URL path for endpoint and args."""
args = filter(None, args)
path = self.endpoint + '/'.join(args) + '.%s' % (self.format)
return path
def get(self, *args, **kwargs):
"""Perform a get request."""
if 'convert' in kwargs:
conversion = kwargs.pop('convert')
else:
conversion = True
kwargs = self._get_keywords(**kwargs)
url = self._create_path(*args)
request = self.session.get(url, params=kwargs)
content = request.content
self._request = request
return self.convert(content, conversion)
def _get_keywords(self, **kwargs):
"""Format GET request parameters and keywords."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'count' in kwargs:
kwargs['page_size'] = kwargs.pop('count')
if 'start' in kwargs:
start = kwargs.pop('start')
if 'end' in kwargs:
end = kwargs.pop('end')
else:
end = date.today().strftime('%m-%d-%Y')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
elif 'between' in kwargs:
start, end = kwargs.pop('between')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
return kwargs
def _format_dates(self, start, end):
"""Format start and end dates."""
start = self._split_date(start)
end = self._split_date(end)
return start, end
def _split_date(self, time):
"""Split apart a date string."""
if isinstance(time, str):
month, day, year = [int(t) for t in re.split(r'-|/', time)]
if year < 100:
# Quick hack for dates < 2000.
year += 2000
time = date(year, month, day)
return time.strftime('%Y-%m-%dT%H:%M:%SZ')
def convert(self, content, conversion):
"""Convert content to Python data structures."""
if not conversion:
data = content
elif self.format == 'json':
data = json.loads(content)
elif self.format == 'xml':
content = xml(content)
first = list(content.keys())[0]
data = content[first]
else:
data = content
return data
def discovery(self, url=None):
"""
Retrieve the standard discovery file that provides routing
information.
>>> Three().discovery()
{'discovery': 'data'}
"""
if url:
data = self.session.get(url).content
elif self.discovery_url:
response = self.session.get(self.discovery_url)
if self.format == 'xml':
# Because, SF doesn't follow the spec.
data = xml(response.text)
else:
# Spec calls for discovery always allowing JSON.
data = response.json()
else:
data = self.get('discovery')
return data
def services(self, code=None, **kwargs):
"""
Retrieve information about available services. You can also enter a
specific service code argument.
>>> Three().services()
{'all': {'service_code': 'data'}}
>>> Three().services('033')
{'033': {'service_code': 'data'}}
"""
data = self.get('services', code, **kwargs)
return data
def requests(self, code=None, **kwargs):
"""
Retrieve open requests. You can also enter a specific service code
argument.
>>> Three('api.city.gov').requests()
{'all': {'requests': 'data'}}
>>> Three('api.city.gov').requests('123')
{'123': {'requests': 'data'}}
"""
if code:
kwargs['service_code'] = code
data = self.get('requests', **kwargs)
return data
def request(self, id, **kwargs):
"""
Retrieve a specific request using its service code ID.
>>> Three('api.city.gov').request('12345')
{'request': {'service_code': {'12345': 'data'}}}
"""
data = self.get('requests', id, **kwargs)
return data
def post(self, service_code='0', **kwargs):
"""
Post a new Open311 request.
>>> t = Three('api.city.gov')
>>> t.post('123', address='123 Any St', name='Zach Williams',
... phone='555-5555', description='My issue description.',
... media=open('photo.png', 'rb'))
{'successful': {'request': 'post'}}
"""
kwargs['service_code'] = service_code
kwargs = self._post_keywords(**kwargs)
media = kwargs.pop('media', None)
if media:
files = {'media': media}
else:
files = None
url = self._create_path('requests')
self.post_response = self.session.post(url,
data=kwargs, files=files)
content = self.post_response.content
if self.post_response.status_code >= 500:
conversion = False
else:
conversion = True
return self.convert(content, conversion)
def token(self, id, **kwargs):
"""
Retrieve a service request ID from a token.
>>> Three('api.city.gov').token('12345')
{'service_request_id': {'for': {'token': '12345'}}}
"""
data = self.get('tokens', id, **kwargs)
return data
|
codeforamerica/three
|
three/core.py
|
Three.token
|
python
|
def token(self, id, **kwargs):
data = self.get('tokens', id, **kwargs)
return data
|
Retrieve a service request ID from a token.
>>> Three('api.city.gov').token('12345')
{'service_request_id': {'for': {'token': '12345'}}}
|
train
|
https://github.com/codeforamerica/three/blob/67b4a4b233a57aa7995d01f6b0f69c2e85aea6c0/three/core.py#L274-L282
|
[
"def get(self, *args, **kwargs):\n \"\"\"Perform a get request.\"\"\"\n if 'convert' in kwargs:\n conversion = kwargs.pop('convert')\n else:\n conversion = True\n kwargs = self._get_keywords(**kwargs)\n url = self._create_path(*args)\n request = self.session.get(url, params=kwargs)\n content = request.content\n self._request = request\n return self.convert(content, conversion)\n"
] |
class Three(object):
"""The main class for interacting with the Open311 API."""
def __init__(self, endpoint=None, **kwargs):
keywords = defaultdict(str)
keywords.update(kwargs)
if endpoint:
endpoint = self._configure_endpoint(endpoint)
keywords['endpoint'] = endpoint
elif 'OPEN311_CITY_INFO' in os.environ:
info = json.loads(os.environ['OPEN311_CITY_INFO'])
endpoint = info['endpoint']
endpoint = self._configure_endpoint(endpoint)
keywords.update(info)
keywords['endpoint'] = endpoint
self._keywords = keywords
self.configure()
def _global_api_key(self):
"""
If a global Open311 API key is available as an environment variable,
then it will be used when querying.
"""
if 'OPEN311_API_KEY' in os.environ:
api_key = os.environ['OPEN311_API_KEY']
else:
api_key = ''
return api_key
def configure(self, endpoint=None, **kwargs):
"""Configure a previously initialized instance of the class."""
if endpoint:
kwargs['endpoint'] = endpoint
keywords = self._keywords.copy()
keywords.update(kwargs)
if 'endpoint' in kwargs:
# Then we need to correctly format the endpoint.
endpoint = kwargs['endpoint']
keywords['endpoint'] = self._configure_endpoint(endpoint)
self.api_key = keywords['api_key'] or self._global_api_key()
self.endpoint = keywords['endpoint']
self.format = keywords['format'] or 'json'
self.jurisdiction = keywords['jurisdiction']
self.proxy = keywords['proxy']
self.discovery_url = keywords['discovery'] or None
# Use a custom requests session and set the correct SSL version if
# specified.
self.session = requests.Session()
if 'ssl_version' in keywords:
self.session.mount('https://', SSLAdapter(keywords['ssl_version']))
def _configure_endpoint(self, endpoint):
"""Configure the endpoint with a schema and end slash."""
if not endpoint.startswith('http'):
endpoint = 'https://' + endpoint
if not endpoint.endswith('/'):
endpoint += '/'
return endpoint
def reset(self):
"""Reset the class back to the original keywords and values."""
self.configure()
def _create_path(self, *args):
"""Create URL path for endpoint and args."""
args = filter(None, args)
path = self.endpoint + '/'.join(args) + '.%s' % (self.format)
return path
def get(self, *args, **kwargs):
"""Perform a get request."""
if 'convert' in kwargs:
conversion = kwargs.pop('convert')
else:
conversion = True
kwargs = self._get_keywords(**kwargs)
url = self._create_path(*args)
request = self.session.get(url, params=kwargs)
content = request.content
self._request = request
return self.convert(content, conversion)
def _get_keywords(self, **kwargs):
"""Format GET request parameters and keywords."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'count' in kwargs:
kwargs['page_size'] = kwargs.pop('count')
if 'start' in kwargs:
start = kwargs.pop('start')
if 'end' in kwargs:
end = kwargs.pop('end')
else:
end = date.today().strftime('%m-%d-%Y')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
elif 'between' in kwargs:
start, end = kwargs.pop('between')
start, end = self._format_dates(start, end)
kwargs['start_date'] = start
kwargs['end_date'] = end
return kwargs
def _format_dates(self, start, end):
"""Format start and end dates."""
start = self._split_date(start)
end = self._split_date(end)
return start, end
def _split_date(self, time):
"""Split apart a date string."""
if isinstance(time, str):
month, day, year = [int(t) for t in re.split(r'-|/', time)]
if year < 100:
# Quick hack for dates < 2000.
year += 2000
time = date(year, month, day)
return time.strftime('%Y-%m-%dT%H:%M:%SZ')
def convert(self, content, conversion):
"""Convert content to Python data structures."""
if not conversion:
data = content
elif self.format == 'json':
data = json.loads(content)
elif self.format == 'xml':
content = xml(content)
first = list(content.keys())[0]
data = content[first]
else:
data = content
return data
def discovery(self, url=None):
"""
Retrieve the standard discovery file that provides routing
information.
>>> Three().discovery()
{'discovery': 'data'}
"""
if url:
data = self.session.get(url).content
elif self.discovery_url:
response = self.session.get(self.discovery_url)
if self.format == 'xml':
# Because, SF doesn't follow the spec.
data = xml(response.text)
else:
# Spec calls for discovery always allowing JSON.
data = response.json()
else:
data = self.get('discovery')
return data
def services(self, code=None, **kwargs):
"""
Retrieve information about available services. You can also enter a
specific service code argument.
>>> Three().services()
{'all': {'service_code': 'data'}}
>>> Three().services('033')
{'033': {'service_code': 'data'}}
"""
data = self.get('services', code, **kwargs)
return data
def requests(self, code=None, **kwargs):
"""
Retrieve open requests. You can also enter a specific service code
argument.
>>> Three('api.city.gov').requests()
{'all': {'requests': 'data'}}
>>> Three('api.city.gov').requests('123')
{'123': {'requests': 'data'}}
"""
if code:
kwargs['service_code'] = code
data = self.get('requests', **kwargs)
return data
def request(self, id, **kwargs):
"""
Retrieve a specific request using its service code ID.
>>> Three('api.city.gov').request('12345')
{'request': {'service_code': {'12345': 'data'}}}
"""
data = self.get('requests', id, **kwargs)
return data
def post(self, service_code='0', **kwargs):
"""
Post a new Open311 request.
>>> t = Three('api.city.gov')
>>> t.post('123', address='123 Any St', name='Zach Williams',
... phone='555-5555', description='My issue description.',
... media=open('photo.png', 'rb'))
{'successful': {'request': 'post'}}
"""
kwargs['service_code'] = service_code
kwargs = self._post_keywords(**kwargs)
media = kwargs.pop('media', None)
if media:
files = {'media': media}
else:
files = None
url = self._create_path('requests')
self.post_response = self.session.post(url,
data=kwargs, files=files)
content = self.post_response.content
if self.post_response.status_code >= 500:
conversion = False
else:
conversion = True
return self.convert(content, conversion)
def _post_keywords(self, **kwargs):
"""Configure keyword arguments for Open311 POST requests."""
if self.jurisdiction and 'jurisdiction_id' not in kwargs:
kwargs['jurisdiction_id'] = self.jurisdiction
if 'address' in kwargs:
address = kwargs.pop('address')
kwargs['address_string'] = address
if 'name' in kwargs:
first, last = kwargs.pop('name').split(' ')
kwargs['first_name'] = first
kwargs['last_name'] = last
if 'api_key' not in kwargs:
kwargs['api_key'] = self.api_key
return kwargs
|
malramsay64/experi
|
src/experi/scheduler.py
|
parse_setup
|
python
|
def parse_setup(options: Union[List, str]) -> str:
if isinstance(options, str):
return options
return "\n".join(options)
|
Convert potentially a list of commands into a single string.
This creates a single string with newlines between each element of the list
so that they will all run after each other in a bash script.
|
train
|
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/scheduler.py#L267-L276
| null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 Malcolm Ramsay <malramsay64@gmail.com>
#
# Distributed under terms of the MIT license.
"""Generate scheduler files for submission on batch systems.
This will generate a .pbs file with all the information required to run a single command
from the list of commands. The variables will be generated and iterated over using the
job array feature of pbs. """
import logging
from abc import ABC, abstractmethod
from collections import OrderedDict
from copy import deepcopy
from typing import Any, Dict, List, Union
from .commands import Job
logger = logging.getLogger(__name__)
logger.setLevel("DEBUG")
SCHEDULER_TEMPLATE = """
cd "{workdir}"
{setup}
COMMAND={command_list}
bash -c ${{COMMAND[{array_index}]}}
"""
class SchedulerOptions(ABC):
name: str = "Experi_Job"
resources: OrderedDict
time: OrderedDict
project: str = ""
log_dir: str = ""
email: str = ""
leftovers: OrderedDict
def __init__(self, **kwargs) -> None:
# Initialise data structures with default values
self.resources = OrderedDict(select=1, ncpus=1)
self.time = OrderedDict(walltime="1:00")
self.leftovers = OrderedDict()
for key, value in kwargs.items():
if key in ["name"]:
self.name = value
elif key in ["select", "nodes"]:
self.resources["select"] = value
elif key in ["ncpus", "cpus"]:
self.resources["ncpus"] = value
elif key in ["mem", "memory"]:
self.resources["mem"] = value
elif key in ["gpus", "ngpus"]:
self.resources["ngpus"] = value
elif key in ["walltime", "cputime"]:
self.time[key] = value
elif key in ["project", "account"]:
self.project = value
elif key in ["log", "logs", "output"]:
self.log_dir = value
elif key in ["email", "mail"]:
if isinstance(value, list):
self.email = ",".join(value)
else:
self.email = value
else:
self.leftovers[key] = value
def create_header(self) -> str:
header_string = "#!/bin/bash\n"
header_string += self.get_name()
header_string += self.get_resources()
header_string += self.get_times()
header_string += self.get_logging()
header_string += self.get_mail()
header_string += self.get_arbitrary_keys()
return header_string
@abstractmethod
def get_name(self):
pass
@abstractmethod
def get_resources(self):
pass
@abstractmethod
def get_times(self):
pass
@abstractmethod
def get_logging(self):
pass
@abstractmethod
def get_mail(self):
pass
@abstractmethod
def get_arbitrary_keys(self):
pass
class ShellOptions(SchedulerOptions):
def get_resources(self) -> str:
resource_str = ":".join(
["{}={}".format(key, val) for key, val in self.resources.items()]
)
return "#SHELL Resources: {}\n".format(resource_str)
def get_times(self) -> str:
time_str = ":".join(
["{}={}".format(key, val) for key, val in self.time.items()]
)
return "#SHELL Time Resources: {}\n".format(time_str)
def get_project(self) -> str:
return "#SHELL Project: {}\n".format(self.project)
def get_logging(self) -> str:
return "#SHELL Log: {}\n".format(self.log_dir)
def get_arbitrary_keys(self) -> str:
output = ""
for key, val in self.leftovers.items():
output += "#SHELL {}: {}\n".format(key, val)
return output
def get_name(self) -> str:
return "#SHELL Name: {}\n".format(self.name)
def get_mail(self) -> str:
if self.email:
return "#SHELL Email: {}\n".format(self.email)
return ""
def create_header(self) -> str:
header_string = "#!/bin/bash\n"
header_string += self.get_name()
header_string += self.get_resources()
header_string += self.get_times()
header_string += self.get_logging()
header_string += self.get_mail()
header_string += self.get_arbitrary_keys()
return header_string
class PBSOptions(SchedulerOptions):
def get_resources(self) -> str:
resource_str = ":".join(
["{}={}".format(key, val) for key, val in self.resources.items()]
)
return "#PBS -l {}\n".format(resource_str)
def get_times(self) -> str:
time_str = ":".join(
["{}={}".format(key, val) for key, val in self.time.items()]
)
return "#PBS -l {}\n".format(time_str)
def get_project(self) -> str:
if self.project:
return "#PBS -P {}\n".format(self.project)
return ""
def get_logging(self) -> str:
if self.log_dir:
log_str = "#PBS -o {}\n".format(self.log_dir)
# Join the output and the error to the one file
# This is what I would consider a sensible default value since it is the
# same as what you would see in the terminal and is the default behaviour in
# slurm
log_str += "#PBS -j oe\n"
return log_str
return ""
def get_arbitrary_keys(self) -> str:
output = ""
for key, val in self.leftovers.items():
if len(key) > 1:
output += "#PBS --{} {}\n".format(key, val)
else:
output += "#PBS -{} {}\n".format(key, val)
return output
def get_name(self) -> str:
return "#PBS -N {}\n".format(self.name)
def get_mail(self) -> str:
if self.email:
email_str = "#PBS -M {}\n".format(self.email)
# Email when the job is finished
# This is a sensible default value, providing a notification in the form of
# an email when a job is complete and further investigation is required.
email_str += "#PBS -m ae\n"
return email_str
return ""
class SLURMOptions(SchedulerOptions):
def get_resources(self) -> str:
resource_str = "#SBATCH --cpus-per-task {}\n".format(
self.resources.get("ncpus")
)
if self.resources.get("mem"):
resource_str += "#SBATCH --mem-per-task {}\n".format(self.resources["mem"])
if self.resources.get("ngpus"):
resource_str += "#SBATCH --gres=gpu:{}\n".format(self.resources["ngpus"])
return resource_str
def get_times(self) -> str:
return "#SBATCH --time {}\n".format(self.time["walltime"])
def get_project(self) -> str:
if self.project:
return "#SBATCH --account {}\n".format(self.project)
return ""
def get_logging(self) -> str:
if self.log_dir:
log_str = "#SBATCH --output {}/slurm-%A_%a.out\n".format(self.log_dir)
return log_str
return ""
def get_arbitrary_keys(self) -> str:
output = ""
for key, val in self.leftovers.items():
if len(key) > 1:
output += "#SBATCH --{} {}\n".format(key, val)
else:
output += "#SBATCH -{} {}\n".format(key, val)
return output
def get_name(self) -> str:
return "#SBATCH --job-name {}\n".format(self.name)
def get_mail(self) -> str:
if self.email:
email_str = "#SBATCH --mail-user {}\n".format(self.email)
# Email when the job is finished
# This is a sensible default value, providing a notification in the form of
# an email when a job is complete and further investigation is required.
email_str += "#SBATCH --mail-type END,FAIL\n"
return email_str
return ""
def create_header_string(scheduler: str, **kwargs) -> str:
assert isinstance(scheduler, str)
if scheduler.upper() == "PBS":
return PBSOptions(**kwargs).create_header()
if scheduler.upper() == "SLURM":
return SLURMOptions(**kwargs).create_header()
raise ValueError("Scheduler needs to be one of PBS or SLURM.")
def get_array_string(scheduler: str, num_commands: int) -> str:
if scheduler.upper() == "SLURM":
if num_commands > 1:
header_string = "#SBATCH -J 0-{}\n".format(num_commands - 1)
else:
header_string = "SLURM_ARRAY_TASK_ID=0\n"
elif scheduler.upper() == "PBS":
if num_commands > 1:
header_string = "#PBS -J 0-{}\n".format(num_commands - 1)
else:
header_string = "PBS_ARRAY_INDEX=0\n"
else:
raise ValueError("scheduler not recognised, must be one of [pbs|slurm]")
return header_string
def create_scheduler_file(scheduler: str, job: Job) -> str:
"""Substitute values into a template scheduler file."""
logger.debug("Create Scheduler File Function")
if job.scheduler_options is None:
scheduler_options: Dict[str, Any] = {}
else:
scheduler_options = deepcopy(job.scheduler_options)
try:
setup_string = parse_setup(scheduler_options["setup"])
del scheduler_options["setup"]
except KeyError:
setup_string = ""
# Create header
header_string = create_header_string(scheduler, **scheduler_options)
header_string += get_array_string(scheduler, len(job))
if scheduler.upper() == "SLURM":
workdir = r"$SLURM_SUBMIT_DIR"
array_index = r"$SLURM_ARRAY_TASK_ID"
elif scheduler.upper() == "PBS":
workdir = r"$PBS_O_WORKDIR"
array_index = r"$PBS_ARRAY_INDEX"
return header_string + SCHEDULER_TEMPLATE.format(
workdir=workdir,
command_list=job.as_bash_array(),
setup=setup_string,
array_index=array_index,
)
|
malramsay64/experi
|
src/experi/scheduler.py
|
create_scheduler_file
|
python
|
def create_scheduler_file(scheduler: str, job: Job) -> str:
logger.debug("Create Scheduler File Function")
if job.scheduler_options is None:
scheduler_options: Dict[str, Any] = {}
else:
scheduler_options = deepcopy(job.scheduler_options)
try:
setup_string = parse_setup(scheduler_options["setup"])
del scheduler_options["setup"]
except KeyError:
setup_string = ""
# Create header
header_string = create_header_string(scheduler, **scheduler_options)
header_string += get_array_string(scheduler, len(job))
if scheduler.upper() == "SLURM":
workdir = r"$SLURM_SUBMIT_DIR"
array_index = r"$SLURM_ARRAY_TASK_ID"
elif scheduler.upper() == "PBS":
workdir = r"$PBS_O_WORKDIR"
array_index = r"$PBS_ARRAY_INDEX"
return header_string + SCHEDULER_TEMPLATE.format(
workdir=workdir,
command_list=job.as_bash_array(),
setup=setup_string,
array_index=array_index,
)
|
Substitute values into a template scheduler file.
|
train
|
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/scheduler.py#L304-L333
|
[
"def parse_setup(options: Union[List, str]) -> str:\n \"\"\"Convert potentially a list of commands into a single string.\n\n This creates a single string with newlines between each element of the list\n so that they will all run after each other in a bash script.\n\n \"\"\"\n if isinstance(options, str):\n return options\n return \"\\n\".join(options)\n",
"def create_header_string(scheduler: str, **kwargs) -> str:\n assert isinstance(scheduler, str)\n if scheduler.upper() == \"PBS\":\n return PBSOptions(**kwargs).create_header()\n if scheduler.upper() == \"SLURM\":\n return SLURMOptions(**kwargs).create_header()\n raise ValueError(\"Scheduler needs to be one of PBS or SLURM.\")\n",
"def get_array_string(scheduler: str, num_commands: int) -> str:\n if scheduler.upper() == \"SLURM\":\n if num_commands > 1:\n header_string = \"#SBATCH -J 0-{}\\n\".format(num_commands - 1)\n else:\n header_string = \"SLURM_ARRAY_TASK_ID=0\\n\"\n elif scheduler.upper() == \"PBS\":\n if num_commands > 1:\n header_string = \"#PBS -J 0-{}\\n\".format(num_commands - 1)\n else:\n header_string = \"PBS_ARRAY_INDEX=0\\n\"\n else:\n raise ValueError(\"scheduler not recognised, must be one of [pbs|slurm]\")\n return header_string\n",
"def as_bash_array(self) -> str:\n \"\"\"Return a representation as a bash array.\n\n This creates a string formatted as a bash array containing all the commands in the job.\n\n \"\"\"\n return_string = \"( \\\\\\n\"\n for command in self:\n return_string += '\"' + str(command) + '\" \\\\\\n'\n return_string += \")\"\n return return_string\n"
] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 Malcolm Ramsay <malramsay64@gmail.com>
#
# Distributed under terms of the MIT license.
"""Generate scheduler files for submission on batch systems.
This will generate a .pbs file with all the information required to run a single command
from the list of commands. The variables will be generated and iterated over using the
job array feature of pbs. """
import logging
from abc import ABC, abstractmethod
from collections import OrderedDict
from copy import deepcopy
from typing import Any, Dict, List, Union
from .commands import Job
logger = logging.getLogger(__name__)
logger.setLevel("DEBUG")
SCHEDULER_TEMPLATE = """
cd "{workdir}"
{setup}
COMMAND={command_list}
bash -c ${{COMMAND[{array_index}]}}
"""
class SchedulerOptions(ABC):
name: str = "Experi_Job"
resources: OrderedDict
time: OrderedDict
project: str = ""
log_dir: str = ""
email: str = ""
leftovers: OrderedDict
def __init__(self, **kwargs) -> None:
# Initialise data structures with default values
self.resources = OrderedDict(select=1, ncpus=1)
self.time = OrderedDict(walltime="1:00")
self.leftovers = OrderedDict()
for key, value in kwargs.items():
if key in ["name"]:
self.name = value
elif key in ["select", "nodes"]:
self.resources["select"] = value
elif key in ["ncpus", "cpus"]:
self.resources["ncpus"] = value
elif key in ["mem", "memory"]:
self.resources["mem"] = value
elif key in ["gpus", "ngpus"]:
self.resources["ngpus"] = value
elif key in ["walltime", "cputime"]:
self.time[key] = value
elif key in ["project", "account"]:
self.project = value
elif key in ["log", "logs", "output"]:
self.log_dir = value
elif key in ["email", "mail"]:
if isinstance(value, list):
self.email = ",".join(value)
else:
self.email = value
else:
self.leftovers[key] = value
def create_header(self) -> str:
header_string = "#!/bin/bash\n"
header_string += self.get_name()
header_string += self.get_resources()
header_string += self.get_times()
header_string += self.get_logging()
header_string += self.get_mail()
header_string += self.get_arbitrary_keys()
return header_string
@abstractmethod
def get_name(self):
pass
@abstractmethod
def get_resources(self):
pass
@abstractmethod
def get_times(self):
pass
@abstractmethod
def get_logging(self):
pass
@abstractmethod
def get_mail(self):
pass
@abstractmethod
def get_arbitrary_keys(self):
pass
class ShellOptions(SchedulerOptions):
def get_resources(self) -> str:
resource_str = ":".join(
["{}={}".format(key, val) for key, val in self.resources.items()]
)
return "#SHELL Resources: {}\n".format(resource_str)
def get_times(self) -> str:
time_str = ":".join(
["{}={}".format(key, val) for key, val in self.time.items()]
)
return "#SHELL Time Resources: {}\n".format(time_str)
def get_project(self) -> str:
return "#SHELL Project: {}\n".format(self.project)
def get_logging(self) -> str:
return "#SHELL Log: {}\n".format(self.log_dir)
def get_arbitrary_keys(self) -> str:
output = ""
for key, val in self.leftovers.items():
output += "#SHELL {}: {}\n".format(key, val)
return output
def get_name(self) -> str:
return "#SHELL Name: {}\n".format(self.name)
def get_mail(self) -> str:
if self.email:
return "#SHELL Email: {}\n".format(self.email)
return ""
def create_header(self) -> str:
header_string = "#!/bin/bash\n"
header_string += self.get_name()
header_string += self.get_resources()
header_string += self.get_times()
header_string += self.get_logging()
header_string += self.get_mail()
header_string += self.get_arbitrary_keys()
return header_string
class PBSOptions(SchedulerOptions):
def get_resources(self) -> str:
resource_str = ":".join(
["{}={}".format(key, val) for key, val in self.resources.items()]
)
return "#PBS -l {}\n".format(resource_str)
def get_times(self) -> str:
time_str = ":".join(
["{}={}".format(key, val) for key, val in self.time.items()]
)
return "#PBS -l {}\n".format(time_str)
def get_project(self) -> str:
if self.project:
return "#PBS -P {}\n".format(self.project)
return ""
def get_logging(self) -> str:
if self.log_dir:
log_str = "#PBS -o {}\n".format(self.log_dir)
# Join the output and the error to the one file
# This is what I would consider a sensible default value since it is the
# same as what you would see in the terminal and is the default behaviour in
# slurm
log_str += "#PBS -j oe\n"
return log_str
return ""
def get_arbitrary_keys(self) -> str:
output = ""
for key, val in self.leftovers.items():
if len(key) > 1:
output += "#PBS --{} {}\n".format(key, val)
else:
output += "#PBS -{} {}\n".format(key, val)
return output
def get_name(self) -> str:
return "#PBS -N {}\n".format(self.name)
def get_mail(self) -> str:
if self.email:
email_str = "#PBS -M {}\n".format(self.email)
# Email when the job is finished
# This is a sensible default value, providing a notification in the form of
# an email when a job is complete and further investigation is required.
email_str += "#PBS -m ae\n"
return email_str
return ""
class SLURMOptions(SchedulerOptions):
def get_resources(self) -> str:
resource_str = "#SBATCH --cpus-per-task {}\n".format(
self.resources.get("ncpus")
)
if self.resources.get("mem"):
resource_str += "#SBATCH --mem-per-task {}\n".format(self.resources["mem"])
if self.resources.get("ngpus"):
resource_str += "#SBATCH --gres=gpu:{}\n".format(self.resources["ngpus"])
return resource_str
def get_times(self) -> str:
return "#SBATCH --time {}\n".format(self.time["walltime"])
def get_project(self) -> str:
if self.project:
return "#SBATCH --account {}\n".format(self.project)
return ""
def get_logging(self) -> str:
if self.log_dir:
log_str = "#SBATCH --output {}/slurm-%A_%a.out\n".format(self.log_dir)
return log_str
return ""
def get_arbitrary_keys(self) -> str:
output = ""
for key, val in self.leftovers.items():
if len(key) > 1:
output += "#SBATCH --{} {}\n".format(key, val)
else:
output += "#SBATCH -{} {}\n".format(key, val)
return output
def get_name(self) -> str:
return "#SBATCH --job-name {}\n".format(self.name)
def get_mail(self) -> str:
if self.email:
email_str = "#SBATCH --mail-user {}\n".format(self.email)
# Email when the job is finished
# This is a sensible default value, providing a notification in the form of
# an email when a job is complete and further investigation is required.
email_str += "#SBATCH --mail-type END,FAIL\n"
return email_str
return ""
def parse_setup(options: Union[List, str]) -> str:
"""Convert potentially a list of commands into a single string.
This creates a single string with newlines between each element of the list
so that they will all run after each other in a bash script.
"""
if isinstance(options, str):
return options
return "\n".join(options)
def create_header_string(scheduler: str, **kwargs) -> str:
assert isinstance(scheduler, str)
if scheduler.upper() == "PBS":
return PBSOptions(**kwargs).create_header()
if scheduler.upper() == "SLURM":
return SLURMOptions(**kwargs).create_header()
raise ValueError("Scheduler needs to be one of PBS or SLURM.")
def get_array_string(scheduler: str, num_commands: int) -> str:
if scheduler.upper() == "SLURM":
if num_commands > 1:
header_string = "#SBATCH -J 0-{}\n".format(num_commands - 1)
else:
header_string = "SLURM_ARRAY_TASK_ID=0\n"
elif scheduler.upper() == "PBS":
if num_commands > 1:
header_string = "#PBS -J 0-{}\n".format(num_commands - 1)
else:
header_string = "PBS_ARRAY_INDEX=0\n"
else:
raise ValueError("scheduler not recognised, must be one of [pbs|slurm]")
return header_string
|
malramsay64/experi
|
src/experi/commands.py
|
Command.get_variables
|
python
|
def get_variables(self) -> Set[str]:
variables = set()
for cmd in self._cmd:
for var in self.__formatter.parse(cmd):
logger.debug("Checking variable: %s", var)
# creates and requires are special class values
if var[1] is not None and var[1] not in ["creates", "requires"]:
variables.add(var[1])
return variables
|
Find all the variables specified in a format string.
This returns a list of all the different variables specified in a format string,
that is the variables inside the braces.
|
train
|
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/commands.py#L53-L67
| null |
class Command:
"""A command to be run for an experiment."""
_cmd: List[str]
variables: Dict[str, Any]
_creates: str = ""
_requires: str = ""
__formatter = Formatter()
def __init__(
self,
cmd: Union[List[str], str],
variables: Dict[str, Any] = None,
creates: str = "",
requires: str = "",
) -> None:
self.cmd = cmd
if variables is not None:
self.variables = variables
else:
self.variables = {}
# variables in cmd are a subset of those passed in
if self.get_variables() > set(self.variables.keys()):
logger.debug("Command Keys: %s", self.get_variables())
logger.debug("Variables Keys: %s", set(self.variables.keys()))
# Find missing variables
missing_vars = self.get_variables() - set(self.variables.keys())
raise ValueError(f"The following variables have no value: {missing_vars}")
self._creates = creates
self._requires = requires
@property
def creates(self) -> str:
return self._creates.format(**self.variables)
@property
def requires(self) -> str:
return self._requires.format(**self.variables)
@property
def cmd(self) -> List[str]:
return [self._format_string(cmd) for cmd in self._cmd]
@cmd.setter
def cmd(self, value) -> None:
if isinstance(value, str):
self._cmd = [value]
else:
self._cmd = list(value)
def _format_string(self, string: str) -> str:
return string.format(
creates=self.creates, requires=self.requires, **self.variables
)
def __iter__(self):
yield from self.cmd
def __str__(self) -> str:
return " && ".join(self.cmd).strip()
def __eq__(self, other) -> bool:
if isinstance(other, type(self)):
return self.cmd == other.cmd
return False
def __hash__(self):
return hash(tuple(self.cmd))
|
malramsay64/experi
|
src/experi/commands.py
|
Job.as_bash_array
|
python
|
def as_bash_array(self) -> str:
return_string = "( \\\n"
for command in self:
return_string += '"' + str(command) + '" \\\n'
return_string += ")"
return return_string
|
Return a representation as a bash array.
This creates a string formatted as a bash array containing all the commands in the job.
|
train
|
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/commands.py#L140-L150
| null |
class Job:
"""A task to perform within a simulation."""
commands: List[Command]
shell: str = "bash"
scheduler_options: Optional[Dict[str, Any]] = None
use_dependencies: bool = False
directory: Optional[Path] = None
def __init__(
self, commands, scheduler_options=None, directory=None, use_dependencies=False
) -> None:
if use_dependencies and directory is None:
raise ValueError("Directory must be set when overwrite is False.")
self.commands = commands
self.scheduler_options = scheduler_options
self.directory = directory
self.use_dependencies = use_dependencies
def __iter__(self):
if self.use_dependencies and self.directory is None:
raise ValueError("Directory must be set when overwrite is False.")
for command in self.commands:
if self.use_dependencies and (self.directory / command.creates).is_file():
# This file already exists, we don't need to create it again
continue
yield command
def __len__(self) -> int:
return len(list(self.__iter__()))
|
malramsay64/experi
|
src/experi/run.py
|
combine_dictionaries
|
python
|
def combine_dictionaries(dicts: List[Dict[str, Any]]) -> Dict[str, Any]:
return dict(ChainMap(*dicts))
|
Merge a list of dictionaries into a single dictionary.
Where there are collisions the first value in the list will be set
as this function is using ChainMap to combine the dicts.
|
train
|
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L39-L46
| null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 Malcolm Ramsay <malramsay64@gmail.com>
#
# Distributed under terms of the MIT license.
"""Run an experiment varying a number of variables."""
import logging
import os
import shutil
import subprocess
import sys
from collections import ChainMap
from itertools import chain, product, repeat
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Union
import click
import numpy as np
import yaml
from .commands import Command, Job
from .scheduler import create_scheduler_file
logger = logging.getLogger(__name__)
logger.setLevel("DEBUG")
# Type definitions
PathLike = Union[str, Path]
YamlValue = Union[str, int, float]
CommandInput = Union[str, Dict[str, YamlValue]]
VarType = Union[YamlValue, List[YamlValue], Dict[str, YamlValue]]
VarMatrix = List[Dict[str, YamlValue]]
def iterator_zip(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""Apply the zip operator to a set of variables.
This uses the python zip iterator to combine multiple lists of variables such that
the nth variable in each list is aligned.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from zip iterator")
if isinstance(variables, list):
for item in variables:
yield list(variable_matrix(item, parent, "zip"))
else:
yield list(variable_matrix(variables, parent, "zip"))
def iterator_product(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""Apply the product operator to a set of variables.
This uses the python itertools.product iterator to combine multiple variables
such that all possible combinations are generated. This is the default iterator
however this is a method of manually specifying the option.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from product iterator")
if isinstance(variables, list):
raise ValueError(
f"Product only takes mappings of values, got {variables} of type {type(variables)}"
)
yield list(variable_matrix(variables, parent, "product"))
def iterator_chain(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""This successively appends each element of an array to a single list of values.
This takes a list of values and puts all the values generated for each element in
the list into a single list of values. It uses the :func:`itertools.chain` function to
achieve this. This function is particularly useful for specifying multiple types of
simulations with different parameters.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from append iterator")
if not isinstance(variables, list):
raise ValueError(
f"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}"
)
# Create a single list containing all the values
yield list(
chain.from_iterable(
variable_matrix(item, parent, "product") for item in variables
)
)
def arange(start=None, stop=None, step=None, dtype=None) -> np.ndarray:
if stop and not start:
return np.arange(stop)
return np.arange(start=start, stop=stop, step=step, dtype=dtype)
def iterator_arange(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Create a list of values using the :func:`numpy.arange` function.
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
assert parent is not None
if isinstance(variables, (int, float)):
yield [{parent: i} for i in np.arange(variables)]
elif isinstance(variables, dict):
if variables.get("stop"):
yield [{parent: i} for i in arange(**variables)]
else:
raise ValueError(f"Stop is a required keyword for the arange iterator.")
else:
raise ValueError(
f"The arange keyword only takes a dict as arguments, got {variables} of type {type(variables)}"
)
def iterator_cycle(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Cycle through a list of values a specified number of times
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
if isinstance(variables, dict):
if variables.get("times"):
times = int(variables["times"])
del variables["times"]
yield list(variable_matrix(variables, parent, "product")) * times
else:
raise ValueError(f"times is a required keyword for the repeat iterator.")
else:
raise ValueError(
f"The repeat operator only takes a dict as arguments, got {variables} of type {type(variables)}"
)
def variable_matrix(
variables: VarType, parent: str = None, iterator: str = "product"
) -> Iterable[Dict[str, YamlValue]]:
"""Process the variables into a list of the appropriate combinations.
This function performs recursive processing of the input variables, creating an
iterator which has all the combinations of variables specified in the input.
"""
_iters: Dict[str, Callable] = {"product": product, "zip": zip}
_special_keys: Dict[str, Callable[[VarType, Any], Iterable[VarMatrix]]] = {
"zip": iterator_zip,
"product": iterator_product,
"arange": iterator_arange,
"chain": iterator_chain,
"append": iterator_chain,
"cycle": iterator_cycle,
"repeat": iterator_cycle,
}
if isinstance(variables, dict):
key_vars: List[List[Dict[str, YamlValue]]] = []
# Handling of specialised iterators
for key, function in _special_keys.items():
if variables.get(key):
item = variables[key]
assert item is not None
for val in function(item, parent):
key_vars.append(val)
del variables[key]
for key, value in variables.items():
key_vars.append(list(variable_matrix(value, key, iterator)))
logger.debug("key vars: %s", key_vars)
# Iterate through all possible products generating a dictionary
for i in _iters[iterator](*key_vars):
logger.debug("dicts: %s", i)
yield combine_dictionaries(i)
# Iterate through a list of values
elif isinstance(variables, list):
for item in variables:
yield from variable_matrix(item, parent, iterator)
# Stopping condition -> we have either a single value from a list
# or a value had only one item
else:
assert parent is not None
yield {parent: variables}
def uniqueify(my_list: Any) -> List[Any]:
"""Remove duplicate entries in a list retaining order."""
if sys.version_info >= (3, 6):
# An implementation specific detail of py3.6 is the retention of order
# within a dictionary. In py3.7 this becomes the documented behaviour.
return list(dict.fromkeys(my_list))
# Slower method of order preserving unique list in older python versions
seen = set()
return [x for x in my_list if x not in seen and not seen.add(x)]
def process_jobs(
jobs: List[Dict],
matrix: VarMatrix,
scheduler_options: Dict[str, Any] = None,
directory: Path = None,
use_dependencies: bool = False,
) -> Iterator[Job]:
assert jobs is not None
logger.debug("Found %d jobs in file", len(jobs))
for job in jobs:
command = job.get("command")
assert command is not None
yield Job(
process_command(command, matrix),
scheduler_options,
directory,
use_dependencies,
)
def process_command(command: CommandInput, matrix: VarMatrix) -> List[Command]:
"""Generate all combinations of commands given a variable matrix.
Processes the commands to be sequences of strings.
"""
assert command is not None
if isinstance(command, str):
command_list = [Command(command, variables=variables) for variables in matrix]
elif isinstance(command, list):
command_list = [Command(command, variables=variables) for variables in matrix]
else:
if command.get("command") is not None:
cmd = command.get("command")
else:
cmd = command.get("cmd")
creates = str(command.get("creates", ""))
requires = str(command.get("requires", ""))
assert isinstance(cmd, (list, str))
command_list = [
Command(cmd, variables, creates, requires) for variables in matrix
]
return uniqueify(command_list)
def read_file(filename: PathLike = "experiment.yml") -> Dict[str, Any]:
"""Read and parse yaml file."""
logger.debug("Input file: %s", filename)
with open(filename, "r") as stream:
structure = yaml.safe_load(stream)
return structure
def process_structure(
structure: Dict[str, Any],
scheduler: str = "shell",
directory: Path = None,
use_dependencies: bool = False,
) -> Iterator[Job]:
input_variables = structure.get("variables")
if input_variables is None:
raise KeyError('The key "variables" was not found in the input file.')
assert isinstance(input_variables, Dict)
# create variable matrix
variables = list(variable_matrix(input_variables))
assert variables
# Check for scheduler options
scheduler_options: Dict[str, YamlValue] = {}
if structure.get("scheduler"):
new_options = structure.get("scheduler")
assert new_options is not None
assert isinstance(new_options, dict)
scheduler_options.update(new_options)
if structure.get(scheduler):
new_options = structure.get(scheduler)
assert new_options is not None
assert isinstance(new_options, dict)
scheduler_options.update(new_options)
assert isinstance(scheduler_options, dict)
if structure.get("name"):
name = structure.get("name")
assert isinstance(name, str)
# set the name attribute in scheduler to global name if no name defined
scheduler_options.setdefault("name", name)
jobs_dict = structure.get("jobs")
if jobs_dict is None:
input_command = structure.get("command")
if isinstance(input_command, list):
jobs_dict = [{"command": cmd} for cmd in input_command]
else:
jobs_dict = [{"command": input_command}]
yield from process_jobs(
jobs_dict, variables, scheduler_options, directory, use_dependencies
)
def run_jobs(
jobs: Iterator[Job],
scheduler: str = "shell",
directory=Path.cwd(),
dry_run: bool = False,
) -> None:
if scheduler == "shell":
run_bash_jobs(jobs, directory, dry_run=dry_run)
elif scheduler in ["pbs", "slurm"]:
run_scheduler_jobs(scheduler, jobs, directory, dry_run=dry_run)
else:
raise ValueError(
f"Scheduler '{scheduler}'was not recognised. Possible values are ['shell', 'pbs', 'slurm']"
)
def run_bash_jobs(
jobs: Iterator[Job], directory: PathLike = Path.cwd(), dry_run: bool = False
) -> None:
"""Submit commands to the bash shell.
This function runs the commands iteratively but handles errors in the
same way as with the pbs_commands function. A command will run for all
combinations of variables in the variable matrix, however if any one of
those commands fails then the next command will not run.
"""
logger.debug("Running commands in bash shell")
# iterate through command groups
for job in jobs:
# Check shell exists
if shutil.which(job.shell) is None:
raise ProcessLookupError(f"The shell '{job.shell}' was not found.")
failed = False
for command in job:
for cmd in command:
logger.info(cmd)
if dry_run:
print(f"{job.shell} -c '{cmd}'")
else:
result = subprocess.run(
[job.shell, "-c", f"{cmd}"], cwd=str(directory)
)
if result.returncode != 0:
failed = True
logger.error("Command failed: %s", command)
break
if failed:
logger.error("A command failed, not continuing further.")
return
def run_scheduler_jobs(
scheduler: str,
jobs: Iterator[Job],
directory: PathLike = Path.cwd(),
basename: str = "experi",
dry_run: bool = False,
) -> None:
"""Submit a series of commands to a batch scheduler.
This takes a list of strings which are the contents of the pbs files, writes the
files to disk and submits the job to the scheduler. Files which match the pattern of
the resulting files <basename>_<index>.pbs are deleted before writing the new files.
To ensure that commands run consecutively the aditional requirement to the run
script `-W depend=afterok:<prev_jobid>` is added. This allows for all the components
of the experiment to be conducted in a single script.
Note: Having this function submit jobs requires that the command `qsub` exists,
implying that a job scheduler is installed.
"""
submit_job = True
logger.debug("Creating commands in %s files.", scheduler)
# Check scheduler submit command exists
if scheduler == "pbs":
submit_executable = "qsub"
elif scheduler == "slurm":
submit_executable = "sbatch"
else:
raise ValueError("scheduler can only take values ['pbs', 'slurm']")
if shutil.which(submit_executable) is None:
logger.warning(
"The `%s` command is not found."
"Skipping job submission and just generating files",
submit_executable,
)
submit_job = False
# Ensure directory is a Path
directory = Path(directory)
# remove existing files
for fname in directory.glob(basename + f"*.{scheduler}"):
print("Removing {}".format(fname))
os.remove(str(fname))
# Write new files and generate commands
prev_jobids: List[str] = []
for index, job in enumerate(jobs):
# Generate scheduler file
content = create_scheduler_file(scheduler, job)
logger.debug("File contents:\n%s", content)
# Write file to disk
fname = Path(directory / "{}_{:02d}.{}".format(basename, index, scheduler))
with fname.open("w") as dst:
dst.write(content)
if submit_job or dry_run:
# Construct command
submit_cmd = [submit_executable]
if prev_jobids:
# Continue to append all previous jobs to submit_cmd so subsequent jobs die along
# with the first.
afterok = f"afterok:{':'.join(prev_jobids)}"
if scheduler == "pbs":
submit_cmd += ["-W", f"depend={afterok}"]
elif scheduler == "slurm":
submit_cmd += ["--dependency", afterok]
# actually run the command
logger.info(str(submit_cmd))
try:
if dry_run:
print(f"{submit_cmd} {fname.name}")
prev_jobids.append("dry_run")
else:
cmd_res = subprocess.check_output(
submit_cmd + [fname.name], cwd=str(directory)
)
prev_jobids.append(cmd_res.decode().strip())
except subprocess.CalledProcessError:
logger.error("Submitting job to the queue failed.")
break
def determine_scheduler(
scheduler: Optional[str], experiment_definition: Dict[str, YamlValue]
) -> str:
"""Determine the scheduler to use to run the jobs."""
# Scheduler value from command line has first priority
if scheduler is not None:
if scheduler in ["shell", "pbs", "slurm"]:
return scheduler
raise ValueError(
"Argument scheduler only supports input values of ['shell', 'pbs', 'slurm']"
)
# Next priority goes to the experiment.yml file
if experiment_definition.get("pbs"):
return "pbs"
if experiment_definition.get("slurm"):
return "slurm"
if experiment_definition.get("shell"):
return "shell"
# Final priority goes to the auto-discovery
if shutil.which("pbs") is not None:
return "pbs"
if shutil.which("slurm") is not None:
return "slurm"
# Default if nothing else is found goes to shell
return "shell"
def _set_verbosity(ctx, param, value):
if value == 1:
logging.basicConfig(level=logging.INFO)
if value == 2:
logging.basicConfig(level=logging.DEBUG)
def launch(
input_file="experiment.yml", use_dependencies=False, dry_run=False, scheduler=None
) -> None:
# This function provides an API to access experi's functionality from within
# python scripts, as an alternative to the command-line interface
# Process and run commands
input_file = Path(input_file)
structure = read_file(input_file)
scheduler = determine_scheduler(scheduler, structure)
jobs = process_structure(
structure, scheduler, Path(input_file.parent), use_dependencies
)
run_jobs(jobs, scheduler, input_file.parent, dry_run)
@click.command()
@click.version_option()
@click.option(
"-f",
"--input-file",
type=click.Path(exists=True, dir_okay=False),
default="experiment.yml",
help="""Path to a YAML file containing experiment data. Note that the experiment
will be run from the directory in which the file exists, not the directory the
script was run from.""",
)
@click.option(
"-s",
"--scheduler",
type=click.Choice(["shell", "pbs", "slurm"]),
default=None,
help="The scheduler with which to run the jobs.",
)
@click.option(
"--use-dependencies",
default=False,
is_flag=True,
help="Use the dependencies specified in the command to reduce the processing",
)
@click.option(
"--dry-run",
is_flag=True,
default=False,
help="Don't run commands or submit jobs, just show the commands that would be run.",
)
@click.option(
"-v",
"--verbose",
callback=_set_verbosity,
expose_value=False,
count=True,
help="Increase the verbosity of logging events.",
)
def main(input_file, use_dependencies, dry_run, scheduler) -> None:
launch(input_file, use_dependencies, dry_run, scheduler)
|
malramsay64/experi
|
src/experi/run.py
|
iterator_zip
|
python
|
def iterator_zip(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
logger.debug("Yielding from zip iterator")
if isinstance(variables, list):
for item in variables:
yield list(variable_matrix(item, parent, "zip"))
else:
yield list(variable_matrix(variables, parent, "zip"))
|
Apply the zip operator to a set of variables.
This uses the python zip iterator to combine multiple lists of variables such that
the nth variable in each list is aligned.
Args:
variables: The variables object
parent: Unused
|
train
|
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L49-L66
|
[
"def variable_matrix(\n variables: VarType, parent: str = None, iterator: str = \"product\"\n) -> Iterable[Dict[str, YamlValue]]:\n \"\"\"Process the variables into a list of the appropriate combinations.\n\n This function performs recursive processing of the input variables, creating an\n iterator which has all the combinations of variables specified in the input.\n\n \"\"\"\n _iters: Dict[str, Callable] = {\"product\": product, \"zip\": zip}\n _special_keys: Dict[str, Callable[[VarType, Any], Iterable[VarMatrix]]] = {\n \"zip\": iterator_zip,\n \"product\": iterator_product,\n \"arange\": iterator_arange,\n \"chain\": iterator_chain,\n \"append\": iterator_chain,\n \"cycle\": iterator_cycle,\n \"repeat\": iterator_cycle,\n }\n\n if isinstance(variables, dict):\n key_vars: List[List[Dict[str, YamlValue]]] = []\n\n # Handling of specialised iterators\n for key, function in _special_keys.items():\n if variables.get(key):\n item = variables[key]\n assert item is not None\n for val in function(item, parent):\n key_vars.append(val)\n\n del variables[key]\n\n for key, value in variables.items():\n key_vars.append(list(variable_matrix(value, key, iterator)))\n\n logger.debug(\"key vars: %s\", key_vars)\n\n # Iterate through all possible products generating a dictionary\n for i in _iters[iterator](*key_vars):\n logger.debug(\"dicts: %s\", i)\n yield combine_dictionaries(i)\n\n # Iterate through a list of values\n elif isinstance(variables, list):\n for item in variables:\n yield from variable_matrix(item, parent, iterator)\n\n # Stopping condition -> we have either a single value from a list\n # or a value had only one item\n else:\n assert parent is not None\n yield {parent: variables}\n"
] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 Malcolm Ramsay <malramsay64@gmail.com>
#
# Distributed under terms of the MIT license.
"""Run an experiment varying a number of variables."""
import logging
import os
import shutil
import subprocess
import sys
from collections import ChainMap
from itertools import chain, product, repeat
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Union
import click
import numpy as np
import yaml
from .commands import Command, Job
from .scheduler import create_scheduler_file
logger = logging.getLogger(__name__)
logger.setLevel("DEBUG")
# Type definitions
PathLike = Union[str, Path]
YamlValue = Union[str, int, float]
CommandInput = Union[str, Dict[str, YamlValue]]
VarType = Union[YamlValue, List[YamlValue], Dict[str, YamlValue]]
VarMatrix = List[Dict[str, YamlValue]]
def combine_dictionaries(dicts: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Merge a list of dictionaries into a single dictionary.
Where there are collisions the first value in the list will be set
as this function is using ChainMap to combine the dicts.
"""
return dict(ChainMap(*dicts))
def iterator_product(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""Apply the product operator to a set of variables.
This uses the python itertools.product iterator to combine multiple variables
such that all possible combinations are generated. This is the default iterator
however this is a method of manually specifying the option.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from product iterator")
if isinstance(variables, list):
raise ValueError(
f"Product only takes mappings of values, got {variables} of type {type(variables)}"
)
yield list(variable_matrix(variables, parent, "product"))
def iterator_chain(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""This successively appends each element of an array to a single list of values.
This takes a list of values and puts all the values generated for each element in
the list into a single list of values. It uses the :func:`itertools.chain` function to
achieve this. This function is particularly useful for specifying multiple types of
simulations with different parameters.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from append iterator")
if not isinstance(variables, list):
raise ValueError(
f"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}"
)
# Create a single list containing all the values
yield list(
chain.from_iterable(
variable_matrix(item, parent, "product") for item in variables
)
)
def arange(start=None, stop=None, step=None, dtype=None) -> np.ndarray:
if stop and not start:
return np.arange(stop)
return np.arange(start=start, stop=stop, step=step, dtype=dtype)
def iterator_arange(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Create a list of values using the :func:`numpy.arange` function.
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
assert parent is not None
if isinstance(variables, (int, float)):
yield [{parent: i} for i in np.arange(variables)]
elif isinstance(variables, dict):
if variables.get("stop"):
yield [{parent: i} for i in arange(**variables)]
else:
raise ValueError(f"Stop is a required keyword for the arange iterator.")
else:
raise ValueError(
f"The arange keyword only takes a dict as arguments, got {variables} of type {type(variables)}"
)
def iterator_cycle(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Cycle through a list of values a specified number of times
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
if isinstance(variables, dict):
if variables.get("times"):
times = int(variables["times"])
del variables["times"]
yield list(variable_matrix(variables, parent, "product")) * times
else:
raise ValueError(f"times is a required keyword for the repeat iterator.")
else:
raise ValueError(
f"The repeat operator only takes a dict as arguments, got {variables} of type {type(variables)}"
)
def variable_matrix(
variables: VarType, parent: str = None, iterator: str = "product"
) -> Iterable[Dict[str, YamlValue]]:
"""Process the variables into a list of the appropriate combinations.
This function performs recursive processing of the input variables, creating an
iterator which has all the combinations of variables specified in the input.
"""
_iters: Dict[str, Callable] = {"product": product, "zip": zip}
_special_keys: Dict[str, Callable[[VarType, Any], Iterable[VarMatrix]]] = {
"zip": iterator_zip,
"product": iterator_product,
"arange": iterator_arange,
"chain": iterator_chain,
"append": iterator_chain,
"cycle": iterator_cycle,
"repeat": iterator_cycle,
}
if isinstance(variables, dict):
key_vars: List[List[Dict[str, YamlValue]]] = []
# Handling of specialised iterators
for key, function in _special_keys.items():
if variables.get(key):
item = variables[key]
assert item is not None
for val in function(item, parent):
key_vars.append(val)
del variables[key]
for key, value in variables.items():
key_vars.append(list(variable_matrix(value, key, iterator)))
logger.debug("key vars: %s", key_vars)
# Iterate through all possible products generating a dictionary
for i in _iters[iterator](*key_vars):
logger.debug("dicts: %s", i)
yield combine_dictionaries(i)
# Iterate through a list of values
elif isinstance(variables, list):
for item in variables:
yield from variable_matrix(item, parent, iterator)
# Stopping condition -> we have either a single value from a list
# or a value had only one item
else:
assert parent is not None
yield {parent: variables}
def uniqueify(my_list: Any) -> List[Any]:
"""Remove duplicate entries in a list retaining order."""
if sys.version_info >= (3, 6):
# An implementation specific detail of py3.6 is the retention of order
# within a dictionary. In py3.7 this becomes the documented behaviour.
return list(dict.fromkeys(my_list))
# Slower method of order preserving unique list in older python versions
seen = set()
return [x for x in my_list if x not in seen and not seen.add(x)]
def process_jobs(
jobs: List[Dict],
matrix: VarMatrix,
scheduler_options: Dict[str, Any] = None,
directory: Path = None,
use_dependencies: bool = False,
) -> Iterator[Job]:
assert jobs is not None
logger.debug("Found %d jobs in file", len(jobs))
for job in jobs:
command = job.get("command")
assert command is not None
yield Job(
process_command(command, matrix),
scheduler_options,
directory,
use_dependencies,
)
def process_command(command: CommandInput, matrix: VarMatrix) -> List[Command]:
"""Generate all combinations of commands given a variable matrix.
Processes the commands to be sequences of strings.
"""
assert command is not None
if isinstance(command, str):
command_list = [Command(command, variables=variables) for variables in matrix]
elif isinstance(command, list):
command_list = [Command(command, variables=variables) for variables in matrix]
else:
if command.get("command") is not None:
cmd = command.get("command")
else:
cmd = command.get("cmd")
creates = str(command.get("creates", ""))
requires = str(command.get("requires", ""))
assert isinstance(cmd, (list, str))
command_list = [
Command(cmd, variables, creates, requires) for variables in matrix
]
return uniqueify(command_list)
def read_file(filename: PathLike = "experiment.yml") -> Dict[str, Any]:
"""Read and parse yaml file."""
logger.debug("Input file: %s", filename)
with open(filename, "r") as stream:
structure = yaml.safe_load(stream)
return structure
def process_structure(
structure: Dict[str, Any],
scheduler: str = "shell",
directory: Path = None,
use_dependencies: bool = False,
) -> Iterator[Job]:
input_variables = structure.get("variables")
if input_variables is None:
raise KeyError('The key "variables" was not found in the input file.')
assert isinstance(input_variables, Dict)
# create variable matrix
variables = list(variable_matrix(input_variables))
assert variables
# Check for scheduler options
scheduler_options: Dict[str, YamlValue] = {}
if structure.get("scheduler"):
new_options = structure.get("scheduler")
assert new_options is not None
assert isinstance(new_options, dict)
scheduler_options.update(new_options)
if structure.get(scheduler):
new_options = structure.get(scheduler)
assert new_options is not None
assert isinstance(new_options, dict)
scheduler_options.update(new_options)
assert isinstance(scheduler_options, dict)
if structure.get("name"):
name = structure.get("name")
assert isinstance(name, str)
# set the name attribute in scheduler to global name if no name defined
scheduler_options.setdefault("name", name)
jobs_dict = structure.get("jobs")
if jobs_dict is None:
input_command = structure.get("command")
if isinstance(input_command, list):
jobs_dict = [{"command": cmd} for cmd in input_command]
else:
jobs_dict = [{"command": input_command}]
yield from process_jobs(
jobs_dict, variables, scheduler_options, directory, use_dependencies
)
def run_jobs(
jobs: Iterator[Job],
scheduler: str = "shell",
directory=Path.cwd(),
dry_run: bool = False,
) -> None:
if scheduler == "shell":
run_bash_jobs(jobs, directory, dry_run=dry_run)
elif scheduler in ["pbs", "slurm"]:
run_scheduler_jobs(scheduler, jobs, directory, dry_run=dry_run)
else:
raise ValueError(
f"Scheduler '{scheduler}'was not recognised. Possible values are ['shell', 'pbs', 'slurm']"
)
def run_bash_jobs(
jobs: Iterator[Job], directory: PathLike = Path.cwd(), dry_run: bool = False
) -> None:
"""Submit commands to the bash shell.
This function runs the commands iteratively but handles errors in the
same way as with the pbs_commands function. A command will run for all
combinations of variables in the variable matrix, however if any one of
those commands fails then the next command will not run.
"""
logger.debug("Running commands in bash shell")
# iterate through command groups
for job in jobs:
# Check shell exists
if shutil.which(job.shell) is None:
raise ProcessLookupError(f"The shell '{job.shell}' was not found.")
failed = False
for command in job:
for cmd in command:
logger.info(cmd)
if dry_run:
print(f"{job.shell} -c '{cmd}'")
else:
result = subprocess.run(
[job.shell, "-c", f"{cmd}"], cwd=str(directory)
)
if result.returncode != 0:
failed = True
logger.error("Command failed: %s", command)
break
if failed:
logger.error("A command failed, not continuing further.")
return
def run_scheduler_jobs(
scheduler: str,
jobs: Iterator[Job],
directory: PathLike = Path.cwd(),
basename: str = "experi",
dry_run: bool = False,
) -> None:
"""Submit a series of commands to a batch scheduler.
This takes a list of strings which are the contents of the pbs files, writes the
files to disk and submits the job to the scheduler. Files which match the pattern of
the resulting files <basename>_<index>.pbs are deleted before writing the new files.
To ensure that commands run consecutively the aditional requirement to the run
script `-W depend=afterok:<prev_jobid>` is added. This allows for all the components
of the experiment to be conducted in a single script.
Note: Having this function submit jobs requires that the command `qsub` exists,
implying that a job scheduler is installed.
"""
submit_job = True
logger.debug("Creating commands in %s files.", scheduler)
# Check scheduler submit command exists
if scheduler == "pbs":
submit_executable = "qsub"
elif scheduler == "slurm":
submit_executable = "sbatch"
else:
raise ValueError("scheduler can only take values ['pbs', 'slurm']")
if shutil.which(submit_executable) is None:
logger.warning(
"The `%s` command is not found."
"Skipping job submission and just generating files",
submit_executable,
)
submit_job = False
# Ensure directory is a Path
directory = Path(directory)
# remove existing files
for fname in directory.glob(basename + f"*.{scheduler}"):
print("Removing {}".format(fname))
os.remove(str(fname))
# Write new files and generate commands
prev_jobids: List[str] = []
for index, job in enumerate(jobs):
# Generate scheduler file
content = create_scheduler_file(scheduler, job)
logger.debug("File contents:\n%s", content)
# Write file to disk
fname = Path(directory / "{}_{:02d}.{}".format(basename, index, scheduler))
with fname.open("w") as dst:
dst.write(content)
if submit_job or dry_run:
# Construct command
submit_cmd = [submit_executable]
if prev_jobids:
# Continue to append all previous jobs to submit_cmd so subsequent jobs die along
# with the first.
afterok = f"afterok:{':'.join(prev_jobids)}"
if scheduler == "pbs":
submit_cmd += ["-W", f"depend={afterok}"]
elif scheduler == "slurm":
submit_cmd += ["--dependency", afterok]
# actually run the command
logger.info(str(submit_cmd))
try:
if dry_run:
print(f"{submit_cmd} {fname.name}")
prev_jobids.append("dry_run")
else:
cmd_res = subprocess.check_output(
submit_cmd + [fname.name], cwd=str(directory)
)
prev_jobids.append(cmd_res.decode().strip())
except subprocess.CalledProcessError:
logger.error("Submitting job to the queue failed.")
break
def determine_scheduler(
scheduler: Optional[str], experiment_definition: Dict[str, YamlValue]
) -> str:
"""Determine the scheduler to use to run the jobs."""
# Scheduler value from command line has first priority
if scheduler is not None:
if scheduler in ["shell", "pbs", "slurm"]:
return scheduler
raise ValueError(
"Argument scheduler only supports input values of ['shell', 'pbs', 'slurm']"
)
# Next priority goes to the experiment.yml file
if experiment_definition.get("pbs"):
return "pbs"
if experiment_definition.get("slurm"):
return "slurm"
if experiment_definition.get("shell"):
return "shell"
# Final priority goes to the auto-discovery
if shutil.which("pbs") is not None:
return "pbs"
if shutil.which("slurm") is not None:
return "slurm"
# Default if nothing else is found goes to shell
return "shell"
def _set_verbosity(ctx, param, value):
if value == 1:
logging.basicConfig(level=logging.INFO)
if value == 2:
logging.basicConfig(level=logging.DEBUG)
def launch(
input_file="experiment.yml", use_dependencies=False, dry_run=False, scheduler=None
) -> None:
# This function provides an API to access experi's functionality from within
# python scripts, as an alternative to the command-line interface
# Process and run commands
input_file = Path(input_file)
structure = read_file(input_file)
scheduler = determine_scheduler(scheduler, structure)
jobs = process_structure(
structure, scheduler, Path(input_file.parent), use_dependencies
)
run_jobs(jobs, scheduler, input_file.parent, dry_run)
@click.command()
@click.version_option()
@click.option(
"-f",
"--input-file",
type=click.Path(exists=True, dir_okay=False),
default="experiment.yml",
help="""Path to a YAML file containing experiment data. Note that the experiment
will be run from the directory in which the file exists, not the directory the
script was run from.""",
)
@click.option(
"-s",
"--scheduler",
type=click.Choice(["shell", "pbs", "slurm"]),
default=None,
help="The scheduler with which to run the jobs.",
)
@click.option(
"--use-dependencies",
default=False,
is_flag=True,
help="Use the dependencies specified in the command to reduce the processing",
)
@click.option(
"--dry-run",
is_flag=True,
default=False,
help="Don't run commands or submit jobs, just show the commands that would be run.",
)
@click.option(
"-v",
"--verbose",
callback=_set_verbosity,
expose_value=False,
count=True,
help="Increase the verbosity of logging events.",
)
def main(input_file, use_dependencies, dry_run, scheduler) -> None:
launch(input_file, use_dependencies, dry_run, scheduler)
|
malramsay64/experi
|
src/experi/run.py
|
iterator_product
|
python
|
def iterator_product(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
logger.debug("Yielding from product iterator")
if isinstance(variables, list):
raise ValueError(
f"Product only takes mappings of values, got {variables} of type {type(variables)}"
)
yield list(variable_matrix(variables, parent, "product"))
|
Apply the product operator to a set of variables.
This uses the python itertools.product iterator to combine multiple variables
such that all possible combinations are generated. This is the default iterator
however this is a method of manually specifying the option.
Args:
variables: The variables object
parent: Unused
|
train
|
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L69-L87
|
[
"def variable_matrix(\n variables: VarType, parent: str = None, iterator: str = \"product\"\n) -> Iterable[Dict[str, YamlValue]]:\n \"\"\"Process the variables into a list of the appropriate combinations.\n\n This function performs recursive processing of the input variables, creating an\n iterator which has all the combinations of variables specified in the input.\n\n \"\"\"\n _iters: Dict[str, Callable] = {\"product\": product, \"zip\": zip}\n _special_keys: Dict[str, Callable[[VarType, Any], Iterable[VarMatrix]]] = {\n \"zip\": iterator_zip,\n \"product\": iterator_product,\n \"arange\": iterator_arange,\n \"chain\": iterator_chain,\n \"append\": iterator_chain,\n \"cycle\": iterator_cycle,\n \"repeat\": iterator_cycle,\n }\n\n if isinstance(variables, dict):\n key_vars: List[List[Dict[str, YamlValue]]] = []\n\n # Handling of specialised iterators\n for key, function in _special_keys.items():\n if variables.get(key):\n item = variables[key]\n assert item is not None\n for val in function(item, parent):\n key_vars.append(val)\n\n del variables[key]\n\n for key, value in variables.items():\n key_vars.append(list(variable_matrix(value, key, iterator)))\n\n logger.debug(\"key vars: %s\", key_vars)\n\n # Iterate through all possible products generating a dictionary\n for i in _iters[iterator](*key_vars):\n logger.debug(\"dicts: %s\", i)\n yield combine_dictionaries(i)\n\n # Iterate through a list of values\n elif isinstance(variables, list):\n for item in variables:\n yield from variable_matrix(item, parent, iterator)\n\n # Stopping condition -> we have either a single value from a list\n # or a value had only one item\n else:\n assert parent is not None\n yield {parent: variables}\n"
] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 Malcolm Ramsay <malramsay64@gmail.com>
#
# Distributed under terms of the MIT license.
"""Run an experiment varying a number of variables."""
import logging
import os
import shutil
import subprocess
import sys
from collections import ChainMap
from itertools import chain, product, repeat
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Union
import click
import numpy as np
import yaml
from .commands import Command, Job
from .scheduler import create_scheduler_file
logger = logging.getLogger(__name__)
logger.setLevel("DEBUG")
# Type definitions
PathLike = Union[str, Path]
YamlValue = Union[str, int, float]
CommandInput = Union[str, Dict[str, YamlValue]]
VarType = Union[YamlValue, List[YamlValue], Dict[str, YamlValue]]
VarMatrix = List[Dict[str, YamlValue]]
def combine_dictionaries(dicts: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Merge a list of dictionaries into a single dictionary.
Where there are collisions the first value in the list will be set
as this function is using ChainMap to combine the dicts.
"""
return dict(ChainMap(*dicts))
def iterator_zip(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""Apply the zip operator to a set of variables.
This uses the python zip iterator to combine multiple lists of variables such that
the nth variable in each list is aligned.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from zip iterator")
if isinstance(variables, list):
for item in variables:
yield list(variable_matrix(item, parent, "zip"))
else:
yield list(variable_matrix(variables, parent, "zip"))
def iterator_chain(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""This successively appends each element of an array to a single list of values.
This takes a list of values and puts all the values generated for each element in
the list into a single list of values. It uses the :func:`itertools.chain` function to
achieve this. This function is particularly useful for specifying multiple types of
simulations with different parameters.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from append iterator")
if not isinstance(variables, list):
raise ValueError(
f"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}"
)
# Create a single list containing all the values
yield list(
chain.from_iterable(
variable_matrix(item, parent, "product") for item in variables
)
)
def arange(start=None, stop=None, step=None, dtype=None) -> np.ndarray:
if stop and not start:
return np.arange(stop)
return np.arange(start=start, stop=stop, step=step, dtype=dtype)
def iterator_arange(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Create a list of values using the :func:`numpy.arange` function.
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
assert parent is not None
if isinstance(variables, (int, float)):
yield [{parent: i} for i in np.arange(variables)]
elif isinstance(variables, dict):
if variables.get("stop"):
yield [{parent: i} for i in arange(**variables)]
else:
raise ValueError(f"Stop is a required keyword for the arange iterator.")
else:
raise ValueError(
f"The arange keyword only takes a dict as arguments, got {variables} of type {type(variables)}"
)
def iterator_cycle(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Cycle through a list of values a specified number of times
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
if isinstance(variables, dict):
if variables.get("times"):
times = int(variables["times"])
del variables["times"]
yield list(variable_matrix(variables, parent, "product")) * times
else:
raise ValueError(f"times is a required keyword for the repeat iterator.")
else:
raise ValueError(
f"The repeat operator only takes a dict as arguments, got {variables} of type {type(variables)}"
)
def variable_matrix(
variables: VarType, parent: str = None, iterator: str = "product"
) -> Iterable[Dict[str, YamlValue]]:
"""Process the variables into a list of the appropriate combinations.
This function performs recursive processing of the input variables, creating an
iterator which has all the combinations of variables specified in the input.
"""
_iters: Dict[str, Callable] = {"product": product, "zip": zip}
_special_keys: Dict[str, Callable[[VarType, Any], Iterable[VarMatrix]]] = {
"zip": iterator_zip,
"product": iterator_product,
"arange": iterator_arange,
"chain": iterator_chain,
"append": iterator_chain,
"cycle": iterator_cycle,
"repeat": iterator_cycle,
}
if isinstance(variables, dict):
key_vars: List[List[Dict[str, YamlValue]]] = []
# Handling of specialised iterators
for key, function in _special_keys.items():
if variables.get(key):
item = variables[key]
assert item is not None
for val in function(item, parent):
key_vars.append(val)
del variables[key]
for key, value in variables.items():
key_vars.append(list(variable_matrix(value, key, iterator)))
logger.debug("key vars: %s", key_vars)
# Iterate through all possible products generating a dictionary
for i in _iters[iterator](*key_vars):
logger.debug("dicts: %s", i)
yield combine_dictionaries(i)
# Iterate through a list of values
elif isinstance(variables, list):
for item in variables:
yield from variable_matrix(item, parent, iterator)
# Stopping condition -> we have either a single value from a list
# or a value had only one item
else:
assert parent is not None
yield {parent: variables}
def uniqueify(my_list: Any) -> List[Any]:
"""Remove duplicate entries in a list retaining order."""
if sys.version_info >= (3, 6):
# An implementation specific detail of py3.6 is the retention of order
# within a dictionary. In py3.7 this becomes the documented behaviour.
return list(dict.fromkeys(my_list))
# Slower method of order preserving unique list in older python versions
seen = set()
return [x for x in my_list if x not in seen and not seen.add(x)]
def process_jobs(
jobs: List[Dict],
matrix: VarMatrix,
scheduler_options: Dict[str, Any] = None,
directory: Path = None,
use_dependencies: bool = False,
) -> Iterator[Job]:
assert jobs is not None
logger.debug("Found %d jobs in file", len(jobs))
for job in jobs:
command = job.get("command")
assert command is not None
yield Job(
process_command(command, matrix),
scheduler_options,
directory,
use_dependencies,
)
def process_command(command: CommandInput, matrix: VarMatrix) -> List[Command]:
"""Generate all combinations of commands given a variable matrix.
Processes the commands to be sequences of strings.
"""
assert command is not None
if isinstance(command, str):
command_list = [Command(command, variables=variables) for variables in matrix]
elif isinstance(command, list):
command_list = [Command(command, variables=variables) for variables in matrix]
else:
if command.get("command") is not None:
cmd = command.get("command")
else:
cmd = command.get("cmd")
creates = str(command.get("creates", ""))
requires = str(command.get("requires", ""))
assert isinstance(cmd, (list, str))
command_list = [
Command(cmd, variables, creates, requires) for variables in matrix
]
return uniqueify(command_list)
def read_file(filename: PathLike = "experiment.yml") -> Dict[str, Any]:
"""Read and parse yaml file."""
logger.debug("Input file: %s", filename)
with open(filename, "r") as stream:
structure = yaml.safe_load(stream)
return structure
def process_structure(
structure: Dict[str, Any],
scheduler: str = "shell",
directory: Path = None,
use_dependencies: bool = False,
) -> Iterator[Job]:
input_variables = structure.get("variables")
if input_variables is None:
raise KeyError('The key "variables" was not found in the input file.')
assert isinstance(input_variables, Dict)
# create variable matrix
variables = list(variable_matrix(input_variables))
assert variables
# Check for scheduler options
scheduler_options: Dict[str, YamlValue] = {}
if structure.get("scheduler"):
new_options = structure.get("scheduler")
assert new_options is not None
assert isinstance(new_options, dict)
scheduler_options.update(new_options)
if structure.get(scheduler):
new_options = structure.get(scheduler)
assert new_options is not None
assert isinstance(new_options, dict)
scheduler_options.update(new_options)
assert isinstance(scheduler_options, dict)
if structure.get("name"):
name = structure.get("name")
assert isinstance(name, str)
# set the name attribute in scheduler to global name if no name defined
scheduler_options.setdefault("name", name)
jobs_dict = structure.get("jobs")
if jobs_dict is None:
input_command = structure.get("command")
if isinstance(input_command, list):
jobs_dict = [{"command": cmd} for cmd in input_command]
else:
jobs_dict = [{"command": input_command}]
yield from process_jobs(
jobs_dict, variables, scheduler_options, directory, use_dependencies
)
def run_jobs(
jobs: Iterator[Job],
scheduler: str = "shell",
directory=Path.cwd(),
dry_run: bool = False,
) -> None:
if scheduler == "shell":
run_bash_jobs(jobs, directory, dry_run=dry_run)
elif scheduler in ["pbs", "slurm"]:
run_scheduler_jobs(scheduler, jobs, directory, dry_run=dry_run)
else:
raise ValueError(
f"Scheduler '{scheduler}'was not recognised. Possible values are ['shell', 'pbs', 'slurm']"
)
def run_bash_jobs(
jobs: Iterator[Job], directory: PathLike = Path.cwd(), dry_run: bool = False
) -> None:
"""Submit commands to the bash shell.
This function runs the commands iteratively but handles errors in the
same way as with the pbs_commands function. A command will run for all
combinations of variables in the variable matrix, however if any one of
those commands fails then the next command will not run.
"""
logger.debug("Running commands in bash shell")
# iterate through command groups
for job in jobs:
# Check shell exists
if shutil.which(job.shell) is None:
raise ProcessLookupError(f"The shell '{job.shell}' was not found.")
failed = False
for command in job:
for cmd in command:
logger.info(cmd)
if dry_run:
print(f"{job.shell} -c '{cmd}'")
else:
result = subprocess.run(
[job.shell, "-c", f"{cmd}"], cwd=str(directory)
)
if result.returncode != 0:
failed = True
logger.error("Command failed: %s", command)
break
if failed:
logger.error("A command failed, not continuing further.")
return
def run_scheduler_jobs(
scheduler: str,
jobs: Iterator[Job],
directory: PathLike = Path.cwd(),
basename: str = "experi",
dry_run: bool = False,
) -> None:
"""Submit a series of commands to a batch scheduler.
This takes a list of strings which are the contents of the pbs files, writes the
files to disk and submits the job to the scheduler. Files which match the pattern of
the resulting files <basename>_<index>.pbs are deleted before writing the new files.
To ensure that commands run consecutively the aditional requirement to the run
script `-W depend=afterok:<prev_jobid>` is added. This allows for all the components
of the experiment to be conducted in a single script.
Note: Having this function submit jobs requires that the command `qsub` exists,
implying that a job scheduler is installed.
"""
submit_job = True
logger.debug("Creating commands in %s files.", scheduler)
# Check scheduler submit command exists
if scheduler == "pbs":
submit_executable = "qsub"
elif scheduler == "slurm":
submit_executable = "sbatch"
else:
raise ValueError("scheduler can only take values ['pbs', 'slurm']")
if shutil.which(submit_executable) is None:
logger.warning(
"The `%s` command is not found."
"Skipping job submission and just generating files",
submit_executable,
)
submit_job = False
# Ensure directory is a Path
directory = Path(directory)
# remove existing files
for fname in directory.glob(basename + f"*.{scheduler}"):
print("Removing {}".format(fname))
os.remove(str(fname))
# Write new files and generate commands
prev_jobids: List[str] = []
for index, job in enumerate(jobs):
# Generate scheduler file
content = create_scheduler_file(scheduler, job)
logger.debug("File contents:\n%s", content)
# Write file to disk
fname = Path(directory / "{}_{:02d}.{}".format(basename, index, scheduler))
with fname.open("w") as dst:
dst.write(content)
if submit_job or dry_run:
# Construct command
submit_cmd = [submit_executable]
if prev_jobids:
# Continue to append all previous jobs to submit_cmd so subsequent jobs die along
# with the first.
afterok = f"afterok:{':'.join(prev_jobids)}"
if scheduler == "pbs":
submit_cmd += ["-W", f"depend={afterok}"]
elif scheduler == "slurm":
submit_cmd += ["--dependency", afterok]
# actually run the command
logger.info(str(submit_cmd))
try:
if dry_run:
print(f"{submit_cmd} {fname.name}")
prev_jobids.append("dry_run")
else:
cmd_res = subprocess.check_output(
submit_cmd + [fname.name], cwd=str(directory)
)
prev_jobids.append(cmd_res.decode().strip())
except subprocess.CalledProcessError:
logger.error("Submitting job to the queue failed.")
break
def determine_scheduler(
scheduler: Optional[str], experiment_definition: Dict[str, YamlValue]
) -> str:
"""Determine the scheduler to use to run the jobs."""
# Scheduler value from command line has first priority
if scheduler is not None:
if scheduler in ["shell", "pbs", "slurm"]:
return scheduler
raise ValueError(
"Argument scheduler only supports input values of ['shell', 'pbs', 'slurm']"
)
# Next priority goes to the experiment.yml file
if experiment_definition.get("pbs"):
return "pbs"
if experiment_definition.get("slurm"):
return "slurm"
if experiment_definition.get("shell"):
return "shell"
# Final priority goes to the auto-discovery
if shutil.which("pbs") is not None:
return "pbs"
if shutil.which("slurm") is not None:
return "slurm"
# Default if nothing else is found goes to shell
return "shell"
def _set_verbosity(ctx, param, value):
if value == 1:
logging.basicConfig(level=logging.INFO)
if value == 2:
logging.basicConfig(level=logging.DEBUG)
def launch(
input_file="experiment.yml", use_dependencies=False, dry_run=False, scheduler=None
) -> None:
# This function provides an API to access experi's functionality from within
# python scripts, as an alternative to the command-line interface
# Process and run commands
input_file = Path(input_file)
structure = read_file(input_file)
scheduler = determine_scheduler(scheduler, structure)
jobs = process_structure(
structure, scheduler, Path(input_file.parent), use_dependencies
)
run_jobs(jobs, scheduler, input_file.parent, dry_run)
@click.command()
@click.version_option()
@click.option(
"-f",
"--input-file",
type=click.Path(exists=True, dir_okay=False),
default="experiment.yml",
help="""Path to a YAML file containing experiment data. Note that the experiment
will be run from the directory in which the file exists, not the directory the
script was run from.""",
)
@click.option(
"-s",
"--scheduler",
type=click.Choice(["shell", "pbs", "slurm"]),
default=None,
help="The scheduler with which to run the jobs.",
)
@click.option(
"--use-dependencies",
default=False,
is_flag=True,
help="Use the dependencies specified in the command to reduce the processing",
)
@click.option(
"--dry-run",
is_flag=True,
default=False,
help="Don't run commands or submit jobs, just show the commands that would be run.",
)
@click.option(
"-v",
"--verbose",
callback=_set_verbosity,
expose_value=False,
count=True,
help="Increase the verbosity of logging events.",
)
def main(input_file, use_dependencies, dry_run, scheduler) -> None:
launch(input_file, use_dependencies, dry_run, scheduler)
|
malramsay64/experi
|
src/experi/run.py
|
iterator_chain
|
python
|
def iterator_chain(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
logger.debug("Yielding from append iterator")
if not isinstance(variables, list):
raise ValueError(
f"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}"
)
# Create a single list containing all the values
yield list(
chain.from_iterable(
variable_matrix(item, parent, "product") for item in variables
)
)
|
This successively appends each element of an array to a single list of values.
This takes a list of values and puts all the values generated for each element in
the list into a single list of values. It uses the :func:`itertools.chain` function to
achieve this. This function is particularly useful for specifying multiple types of
simulations with different parameters.
Args:
variables: The variables object
parent: Unused
|
train
|
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L90-L114
| null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 Malcolm Ramsay <malramsay64@gmail.com>
#
# Distributed under terms of the MIT license.
"""Run an experiment varying a number of variables."""
import logging
import os
import shutil
import subprocess
import sys
from collections import ChainMap
from itertools import chain, product, repeat
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Union
import click
import numpy as np
import yaml
from .commands import Command, Job
from .scheduler import create_scheduler_file
logger = logging.getLogger(__name__)
logger.setLevel("DEBUG")
# Type definitions
PathLike = Union[str, Path]
YamlValue = Union[str, int, float]
CommandInput = Union[str, Dict[str, YamlValue]]
VarType = Union[YamlValue, List[YamlValue], Dict[str, YamlValue]]
VarMatrix = List[Dict[str, YamlValue]]
def combine_dictionaries(dicts: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Merge a list of dictionaries into a single dictionary.
Where there are collisions the first value in the list will be set
as this function is using ChainMap to combine the dicts.
"""
return dict(ChainMap(*dicts))
def iterator_zip(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""Apply the zip operator to a set of variables.
This uses the python zip iterator to combine multiple lists of variables such that
the nth variable in each list is aligned.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from zip iterator")
if isinstance(variables, list):
for item in variables:
yield list(variable_matrix(item, parent, "zip"))
else:
yield list(variable_matrix(variables, parent, "zip"))
def iterator_product(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""Apply the product operator to a set of variables.
This uses the python itertools.product iterator to combine multiple variables
such that all possible combinations are generated. This is the default iterator
however this is a method of manually specifying the option.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from product iterator")
if isinstance(variables, list):
raise ValueError(
f"Product only takes mappings of values, got {variables} of type {type(variables)}"
)
yield list(variable_matrix(variables, parent, "product"))
def arange(start=None, stop=None, step=None, dtype=None) -> np.ndarray:
if stop and not start:
return np.arange(stop)
return np.arange(start=start, stop=stop, step=step, dtype=dtype)
def iterator_arange(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Create a list of values using the :func:`numpy.arange` function.
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
assert parent is not None
if isinstance(variables, (int, float)):
yield [{parent: i} for i in np.arange(variables)]
elif isinstance(variables, dict):
if variables.get("stop"):
yield [{parent: i} for i in arange(**variables)]
else:
raise ValueError(f"Stop is a required keyword for the arange iterator.")
else:
raise ValueError(
f"The arange keyword only takes a dict as arguments, got {variables} of type {type(variables)}"
)
def iterator_cycle(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Cycle through a list of values a specified number of times
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
if isinstance(variables, dict):
if variables.get("times"):
times = int(variables["times"])
del variables["times"]
yield list(variable_matrix(variables, parent, "product")) * times
else:
raise ValueError(f"times is a required keyword for the repeat iterator.")
else:
raise ValueError(
f"The repeat operator only takes a dict as arguments, got {variables} of type {type(variables)}"
)
def variable_matrix(
variables: VarType, parent: str = None, iterator: str = "product"
) -> Iterable[Dict[str, YamlValue]]:
"""Process the variables into a list of the appropriate combinations.
This function performs recursive processing of the input variables, creating an
iterator which has all the combinations of variables specified in the input.
"""
_iters: Dict[str, Callable] = {"product": product, "zip": zip}
_special_keys: Dict[str, Callable[[VarType, Any], Iterable[VarMatrix]]] = {
"zip": iterator_zip,
"product": iterator_product,
"arange": iterator_arange,
"chain": iterator_chain,
"append": iterator_chain,
"cycle": iterator_cycle,
"repeat": iterator_cycle,
}
if isinstance(variables, dict):
key_vars: List[List[Dict[str, YamlValue]]] = []
# Handling of specialised iterators
for key, function in _special_keys.items():
if variables.get(key):
item = variables[key]
assert item is not None
for val in function(item, parent):
key_vars.append(val)
del variables[key]
for key, value in variables.items():
key_vars.append(list(variable_matrix(value, key, iterator)))
logger.debug("key vars: %s", key_vars)
# Iterate through all possible products generating a dictionary
for i in _iters[iterator](*key_vars):
logger.debug("dicts: %s", i)
yield combine_dictionaries(i)
# Iterate through a list of values
elif isinstance(variables, list):
for item in variables:
yield from variable_matrix(item, parent, iterator)
# Stopping condition -> we have either a single value from a list
# or a value had only one item
else:
assert parent is not None
yield {parent: variables}
def uniqueify(my_list: Any) -> List[Any]:
"""Remove duplicate entries in a list retaining order."""
if sys.version_info >= (3, 6):
# An implementation specific detail of py3.6 is the retention of order
# within a dictionary. In py3.7 this becomes the documented behaviour.
return list(dict.fromkeys(my_list))
# Slower method of order preserving unique list in older python versions
seen = set()
return [x for x in my_list if x not in seen and not seen.add(x)]
def process_jobs(
jobs: List[Dict],
matrix: VarMatrix,
scheduler_options: Dict[str, Any] = None,
directory: Path = None,
use_dependencies: bool = False,
) -> Iterator[Job]:
assert jobs is not None
logger.debug("Found %d jobs in file", len(jobs))
for job in jobs:
command = job.get("command")
assert command is not None
yield Job(
process_command(command, matrix),
scheduler_options,
directory,
use_dependencies,
)
def process_command(command: CommandInput, matrix: VarMatrix) -> List[Command]:
"""Generate all combinations of commands given a variable matrix.
Processes the commands to be sequences of strings.
"""
assert command is not None
if isinstance(command, str):
command_list = [Command(command, variables=variables) for variables in matrix]
elif isinstance(command, list):
command_list = [Command(command, variables=variables) for variables in matrix]
else:
if command.get("command") is not None:
cmd = command.get("command")
else:
cmd = command.get("cmd")
creates = str(command.get("creates", ""))
requires = str(command.get("requires", ""))
assert isinstance(cmd, (list, str))
command_list = [
Command(cmd, variables, creates, requires) for variables in matrix
]
return uniqueify(command_list)
def read_file(filename: PathLike = "experiment.yml") -> Dict[str, Any]:
"""Read and parse yaml file."""
logger.debug("Input file: %s", filename)
with open(filename, "r") as stream:
structure = yaml.safe_load(stream)
return structure
def process_structure(
structure: Dict[str, Any],
scheduler: str = "shell",
directory: Path = None,
use_dependencies: bool = False,
) -> Iterator[Job]:
input_variables = structure.get("variables")
if input_variables is None:
raise KeyError('The key "variables" was not found in the input file.')
assert isinstance(input_variables, Dict)
# create variable matrix
variables = list(variable_matrix(input_variables))
assert variables
# Check for scheduler options
scheduler_options: Dict[str, YamlValue] = {}
if structure.get("scheduler"):
new_options = structure.get("scheduler")
assert new_options is not None
assert isinstance(new_options, dict)
scheduler_options.update(new_options)
if structure.get(scheduler):
new_options = structure.get(scheduler)
assert new_options is not None
assert isinstance(new_options, dict)
scheduler_options.update(new_options)
assert isinstance(scheduler_options, dict)
if structure.get("name"):
name = structure.get("name")
assert isinstance(name, str)
# set the name attribute in scheduler to global name if no name defined
scheduler_options.setdefault("name", name)
jobs_dict = structure.get("jobs")
if jobs_dict is None:
input_command = structure.get("command")
if isinstance(input_command, list):
jobs_dict = [{"command": cmd} for cmd in input_command]
else:
jobs_dict = [{"command": input_command}]
yield from process_jobs(
jobs_dict, variables, scheduler_options, directory, use_dependencies
)
def run_jobs(
jobs: Iterator[Job],
scheduler: str = "shell",
directory=Path.cwd(),
dry_run: bool = False,
) -> None:
if scheduler == "shell":
run_bash_jobs(jobs, directory, dry_run=dry_run)
elif scheduler in ["pbs", "slurm"]:
run_scheduler_jobs(scheduler, jobs, directory, dry_run=dry_run)
else:
raise ValueError(
f"Scheduler '{scheduler}'was not recognised. Possible values are ['shell', 'pbs', 'slurm']"
)
def run_bash_jobs(
jobs: Iterator[Job], directory: PathLike = Path.cwd(), dry_run: bool = False
) -> None:
"""Submit commands to the bash shell.
This function runs the commands iteratively but handles errors in the
same way as with the pbs_commands function. A command will run for all
combinations of variables in the variable matrix, however if any one of
those commands fails then the next command will not run.
"""
logger.debug("Running commands in bash shell")
# iterate through command groups
for job in jobs:
# Check shell exists
if shutil.which(job.shell) is None:
raise ProcessLookupError(f"The shell '{job.shell}' was not found.")
failed = False
for command in job:
for cmd in command:
logger.info(cmd)
if dry_run:
print(f"{job.shell} -c '{cmd}'")
else:
result = subprocess.run(
[job.shell, "-c", f"{cmd}"], cwd=str(directory)
)
if result.returncode != 0:
failed = True
logger.error("Command failed: %s", command)
break
if failed:
logger.error("A command failed, not continuing further.")
return
def run_scheduler_jobs(
scheduler: str,
jobs: Iterator[Job],
directory: PathLike = Path.cwd(),
basename: str = "experi",
dry_run: bool = False,
) -> None:
"""Submit a series of commands to a batch scheduler.
This takes a list of strings which are the contents of the pbs files, writes the
files to disk and submits the job to the scheduler. Files which match the pattern of
the resulting files <basename>_<index>.pbs are deleted before writing the new files.
To ensure that commands run consecutively the aditional requirement to the run
script `-W depend=afterok:<prev_jobid>` is added. This allows for all the components
of the experiment to be conducted in a single script.
Note: Having this function submit jobs requires that the command `qsub` exists,
implying that a job scheduler is installed.
"""
submit_job = True
logger.debug("Creating commands in %s files.", scheduler)
# Check scheduler submit command exists
if scheduler == "pbs":
submit_executable = "qsub"
elif scheduler == "slurm":
submit_executable = "sbatch"
else:
raise ValueError("scheduler can only take values ['pbs', 'slurm']")
if shutil.which(submit_executable) is None:
logger.warning(
"The `%s` command is not found."
"Skipping job submission and just generating files",
submit_executable,
)
submit_job = False
# Ensure directory is a Path
directory = Path(directory)
# remove existing files
for fname in directory.glob(basename + f"*.{scheduler}"):
print("Removing {}".format(fname))
os.remove(str(fname))
# Write new files and generate commands
prev_jobids: List[str] = []
for index, job in enumerate(jobs):
# Generate scheduler file
content = create_scheduler_file(scheduler, job)
logger.debug("File contents:\n%s", content)
# Write file to disk
fname = Path(directory / "{}_{:02d}.{}".format(basename, index, scheduler))
with fname.open("w") as dst:
dst.write(content)
if submit_job or dry_run:
# Construct command
submit_cmd = [submit_executable]
if prev_jobids:
# Continue to append all previous jobs to submit_cmd so subsequent jobs die along
# with the first.
afterok = f"afterok:{':'.join(prev_jobids)}"
if scheduler == "pbs":
submit_cmd += ["-W", f"depend={afterok}"]
elif scheduler == "slurm":
submit_cmd += ["--dependency", afterok]
# actually run the command
logger.info(str(submit_cmd))
try:
if dry_run:
print(f"{submit_cmd} {fname.name}")
prev_jobids.append("dry_run")
else:
cmd_res = subprocess.check_output(
submit_cmd + [fname.name], cwd=str(directory)
)
prev_jobids.append(cmd_res.decode().strip())
except subprocess.CalledProcessError:
logger.error("Submitting job to the queue failed.")
break
def determine_scheduler(
scheduler: Optional[str], experiment_definition: Dict[str, YamlValue]
) -> str:
"""Determine the scheduler to use to run the jobs."""
# Scheduler value from command line has first priority
if scheduler is not None:
if scheduler in ["shell", "pbs", "slurm"]:
return scheduler
raise ValueError(
"Argument scheduler only supports input values of ['shell', 'pbs', 'slurm']"
)
# Next priority goes to the experiment.yml file
if experiment_definition.get("pbs"):
return "pbs"
if experiment_definition.get("slurm"):
return "slurm"
if experiment_definition.get("shell"):
return "shell"
# Final priority goes to the auto-discovery
if shutil.which("pbs") is not None:
return "pbs"
if shutil.which("slurm") is not None:
return "slurm"
# Default if nothing else is found goes to shell
return "shell"
def _set_verbosity(ctx, param, value):
if value == 1:
logging.basicConfig(level=logging.INFO)
if value == 2:
logging.basicConfig(level=logging.DEBUG)
def launch(
input_file="experiment.yml", use_dependencies=False, dry_run=False, scheduler=None
) -> None:
# This function provides an API to access experi's functionality from within
# python scripts, as an alternative to the command-line interface
# Process and run commands
input_file = Path(input_file)
structure = read_file(input_file)
scheduler = determine_scheduler(scheduler, structure)
jobs = process_structure(
structure, scheduler, Path(input_file.parent), use_dependencies
)
run_jobs(jobs, scheduler, input_file.parent, dry_run)
@click.command()
@click.version_option()
@click.option(
"-f",
"--input-file",
type=click.Path(exists=True, dir_okay=False),
default="experiment.yml",
help="""Path to a YAML file containing experiment data. Note that the experiment
will be run from the directory in which the file exists, not the directory the
script was run from.""",
)
@click.option(
"-s",
"--scheduler",
type=click.Choice(["shell", "pbs", "slurm"]),
default=None,
help="The scheduler with which to run the jobs.",
)
@click.option(
"--use-dependencies",
default=False,
is_flag=True,
help="Use the dependencies specified in the command to reduce the processing",
)
@click.option(
"--dry-run",
is_flag=True,
default=False,
help="Don't run commands or submit jobs, just show the commands that would be run.",
)
@click.option(
"-v",
"--verbose",
callback=_set_verbosity,
expose_value=False,
count=True,
help="Increase the verbosity of logging events.",
)
def main(input_file, use_dependencies, dry_run, scheduler) -> None:
launch(input_file, use_dependencies, dry_run, scheduler)
|
malramsay64/experi
|
src/experi/run.py
|
iterator_arange
|
python
|
def iterator_arange(variables: VarType, parent: str) -> Iterable[VarMatrix]:
assert parent is not None
if isinstance(variables, (int, float)):
yield [{parent: i} for i in np.arange(variables)]
elif isinstance(variables, dict):
if variables.get("stop"):
yield [{parent: i} for i in arange(**variables)]
else:
raise ValueError(f"Stop is a required keyword for the arange iterator.")
else:
raise ValueError(
f"The arange keyword only takes a dict as arguments, got {variables} of type {type(variables)}"
)
|
Create a list of values using the :func:`numpy.arange` function.
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
|
train
|
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L123-L146
|
[
"def arange(start=None, stop=None, step=None, dtype=None) -> np.ndarray:\n if stop and not start:\n return np.arange(stop)\n return np.arange(start=start, stop=stop, step=step, dtype=dtype)\n"
] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 Malcolm Ramsay <malramsay64@gmail.com>
#
# Distributed under terms of the MIT license.
"""Run an experiment varying a number of variables."""
import logging
import os
import shutil
import subprocess
import sys
from collections import ChainMap
from itertools import chain, product, repeat
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Union
import click
import numpy as np
import yaml
from .commands import Command, Job
from .scheduler import create_scheduler_file
logger = logging.getLogger(__name__)
logger.setLevel("DEBUG")
# Type definitions
PathLike = Union[str, Path]
YamlValue = Union[str, int, float]
CommandInput = Union[str, Dict[str, YamlValue]]
VarType = Union[YamlValue, List[YamlValue], Dict[str, YamlValue]]
VarMatrix = List[Dict[str, YamlValue]]
def combine_dictionaries(dicts: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Merge a list of dictionaries into a single dictionary.
Where there are collisions the first value in the list will be set
as this function is using ChainMap to combine the dicts.
"""
return dict(ChainMap(*dicts))
def iterator_zip(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""Apply the zip operator to a set of variables.
This uses the python zip iterator to combine multiple lists of variables such that
the nth variable in each list is aligned.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from zip iterator")
if isinstance(variables, list):
for item in variables:
yield list(variable_matrix(item, parent, "zip"))
else:
yield list(variable_matrix(variables, parent, "zip"))
def iterator_product(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""Apply the product operator to a set of variables.
This uses the python itertools.product iterator to combine multiple variables
such that all possible combinations are generated. This is the default iterator
however this is a method of manually specifying the option.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from product iterator")
if isinstance(variables, list):
raise ValueError(
f"Product only takes mappings of values, got {variables} of type {type(variables)}"
)
yield list(variable_matrix(variables, parent, "product"))
def iterator_chain(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""This successively appends each element of an array to a single list of values.
This takes a list of values and puts all the values generated for each element in
the list into a single list of values. It uses the :func:`itertools.chain` function to
achieve this. This function is particularly useful for specifying multiple types of
simulations with different parameters.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from append iterator")
if not isinstance(variables, list):
raise ValueError(
f"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}"
)
# Create a single list containing all the values
yield list(
chain.from_iterable(
variable_matrix(item, parent, "product") for item in variables
)
)
def arange(start=None, stop=None, step=None, dtype=None) -> np.ndarray:
if stop and not start:
return np.arange(stop)
return np.arange(start=start, stop=stop, step=step, dtype=dtype)
def iterator_cycle(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Cycle through a list of values a specified number of times
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
if isinstance(variables, dict):
if variables.get("times"):
times = int(variables["times"])
del variables["times"]
yield list(variable_matrix(variables, parent, "product")) * times
else:
raise ValueError(f"times is a required keyword for the repeat iterator.")
else:
raise ValueError(
f"The repeat operator only takes a dict as arguments, got {variables} of type {type(variables)}"
)
def variable_matrix(
variables: VarType, parent: str = None, iterator: str = "product"
) -> Iterable[Dict[str, YamlValue]]:
"""Process the variables into a list of the appropriate combinations.
This function performs recursive processing of the input variables, creating an
iterator which has all the combinations of variables specified in the input.
"""
_iters: Dict[str, Callable] = {"product": product, "zip": zip}
_special_keys: Dict[str, Callable[[VarType, Any], Iterable[VarMatrix]]] = {
"zip": iterator_zip,
"product": iterator_product,
"arange": iterator_arange,
"chain": iterator_chain,
"append": iterator_chain,
"cycle": iterator_cycle,
"repeat": iterator_cycle,
}
if isinstance(variables, dict):
key_vars: List[List[Dict[str, YamlValue]]] = []
# Handling of specialised iterators
for key, function in _special_keys.items():
if variables.get(key):
item = variables[key]
assert item is not None
for val in function(item, parent):
key_vars.append(val)
del variables[key]
for key, value in variables.items():
key_vars.append(list(variable_matrix(value, key, iterator)))
logger.debug("key vars: %s", key_vars)
# Iterate through all possible products generating a dictionary
for i in _iters[iterator](*key_vars):
logger.debug("dicts: %s", i)
yield combine_dictionaries(i)
# Iterate through a list of values
elif isinstance(variables, list):
for item in variables:
yield from variable_matrix(item, parent, iterator)
# Stopping condition -> we have either a single value from a list
# or a value had only one item
else:
assert parent is not None
yield {parent: variables}
def uniqueify(my_list: Any) -> List[Any]:
"""Remove duplicate entries in a list retaining order."""
if sys.version_info >= (3, 6):
# An implementation specific detail of py3.6 is the retention of order
# within a dictionary. In py3.7 this becomes the documented behaviour.
return list(dict.fromkeys(my_list))
# Slower method of order preserving unique list in older python versions
seen = set()
return [x for x in my_list if x not in seen and not seen.add(x)]
def process_jobs(
jobs: List[Dict],
matrix: VarMatrix,
scheduler_options: Dict[str, Any] = None,
directory: Path = None,
use_dependencies: bool = False,
) -> Iterator[Job]:
assert jobs is not None
logger.debug("Found %d jobs in file", len(jobs))
for job in jobs:
command = job.get("command")
assert command is not None
yield Job(
process_command(command, matrix),
scheduler_options,
directory,
use_dependencies,
)
def process_command(command: CommandInput, matrix: VarMatrix) -> List[Command]:
"""Generate all combinations of commands given a variable matrix.
Processes the commands to be sequences of strings.
"""
assert command is not None
if isinstance(command, str):
command_list = [Command(command, variables=variables) for variables in matrix]
elif isinstance(command, list):
command_list = [Command(command, variables=variables) for variables in matrix]
else:
if command.get("command") is not None:
cmd = command.get("command")
else:
cmd = command.get("cmd")
creates = str(command.get("creates", ""))
requires = str(command.get("requires", ""))
assert isinstance(cmd, (list, str))
command_list = [
Command(cmd, variables, creates, requires) for variables in matrix
]
return uniqueify(command_list)
def read_file(filename: PathLike = "experiment.yml") -> Dict[str, Any]:
"""Read and parse yaml file."""
logger.debug("Input file: %s", filename)
with open(filename, "r") as stream:
structure = yaml.safe_load(stream)
return structure
def process_structure(
structure: Dict[str, Any],
scheduler: str = "shell",
directory: Path = None,
use_dependencies: bool = False,
) -> Iterator[Job]:
input_variables = structure.get("variables")
if input_variables is None:
raise KeyError('The key "variables" was not found in the input file.')
assert isinstance(input_variables, Dict)
# create variable matrix
variables = list(variable_matrix(input_variables))
assert variables
# Check for scheduler options
scheduler_options: Dict[str, YamlValue] = {}
if structure.get("scheduler"):
new_options = structure.get("scheduler")
assert new_options is not None
assert isinstance(new_options, dict)
scheduler_options.update(new_options)
if structure.get(scheduler):
new_options = structure.get(scheduler)
assert new_options is not None
assert isinstance(new_options, dict)
scheduler_options.update(new_options)
assert isinstance(scheduler_options, dict)
if structure.get("name"):
name = structure.get("name")
assert isinstance(name, str)
# set the name attribute in scheduler to global name if no name defined
scheduler_options.setdefault("name", name)
jobs_dict = structure.get("jobs")
if jobs_dict is None:
input_command = structure.get("command")
if isinstance(input_command, list):
jobs_dict = [{"command": cmd} for cmd in input_command]
else:
jobs_dict = [{"command": input_command}]
yield from process_jobs(
jobs_dict, variables, scheduler_options, directory, use_dependencies
)
def run_jobs(
jobs: Iterator[Job],
scheduler: str = "shell",
directory=Path.cwd(),
dry_run: bool = False,
) -> None:
if scheduler == "shell":
run_bash_jobs(jobs, directory, dry_run=dry_run)
elif scheduler in ["pbs", "slurm"]:
run_scheduler_jobs(scheduler, jobs, directory, dry_run=dry_run)
else:
raise ValueError(
f"Scheduler '{scheduler}'was not recognised. Possible values are ['shell', 'pbs', 'slurm']"
)
def run_bash_jobs(
jobs: Iterator[Job], directory: PathLike = Path.cwd(), dry_run: bool = False
) -> None:
"""Submit commands to the bash shell.
This function runs the commands iteratively but handles errors in the
same way as with the pbs_commands function. A command will run for all
combinations of variables in the variable matrix, however if any one of
those commands fails then the next command will not run.
"""
logger.debug("Running commands in bash shell")
# iterate through command groups
for job in jobs:
# Check shell exists
if shutil.which(job.shell) is None:
raise ProcessLookupError(f"The shell '{job.shell}' was not found.")
failed = False
for command in job:
for cmd in command:
logger.info(cmd)
if dry_run:
print(f"{job.shell} -c '{cmd}'")
else:
result = subprocess.run(
[job.shell, "-c", f"{cmd}"], cwd=str(directory)
)
if result.returncode != 0:
failed = True
logger.error("Command failed: %s", command)
break
if failed:
logger.error("A command failed, not continuing further.")
return
def run_scheduler_jobs(
scheduler: str,
jobs: Iterator[Job],
directory: PathLike = Path.cwd(),
basename: str = "experi",
dry_run: bool = False,
) -> None:
"""Submit a series of commands to a batch scheduler.
This takes a list of strings which are the contents of the pbs files, writes the
files to disk and submits the job to the scheduler. Files which match the pattern of
the resulting files <basename>_<index>.pbs are deleted before writing the new files.
To ensure that commands run consecutively the aditional requirement to the run
script `-W depend=afterok:<prev_jobid>` is added. This allows for all the components
of the experiment to be conducted in a single script.
Note: Having this function submit jobs requires that the command `qsub` exists,
implying that a job scheduler is installed.
"""
submit_job = True
logger.debug("Creating commands in %s files.", scheduler)
# Check scheduler submit command exists
if scheduler == "pbs":
submit_executable = "qsub"
elif scheduler == "slurm":
submit_executable = "sbatch"
else:
raise ValueError("scheduler can only take values ['pbs', 'slurm']")
if shutil.which(submit_executable) is None:
logger.warning(
"The `%s` command is not found."
"Skipping job submission and just generating files",
submit_executable,
)
submit_job = False
# Ensure directory is a Path
directory = Path(directory)
# remove existing files
for fname in directory.glob(basename + f"*.{scheduler}"):
print("Removing {}".format(fname))
os.remove(str(fname))
# Write new files and generate commands
prev_jobids: List[str] = []
for index, job in enumerate(jobs):
# Generate scheduler file
content = create_scheduler_file(scheduler, job)
logger.debug("File contents:\n%s", content)
# Write file to disk
fname = Path(directory / "{}_{:02d}.{}".format(basename, index, scheduler))
with fname.open("w") as dst:
dst.write(content)
if submit_job or dry_run:
# Construct command
submit_cmd = [submit_executable]
if prev_jobids:
# Continue to append all previous jobs to submit_cmd so subsequent jobs die along
# with the first.
afterok = f"afterok:{':'.join(prev_jobids)}"
if scheduler == "pbs":
submit_cmd += ["-W", f"depend={afterok}"]
elif scheduler == "slurm":
submit_cmd += ["--dependency", afterok]
# actually run the command
logger.info(str(submit_cmd))
try:
if dry_run:
print(f"{submit_cmd} {fname.name}")
prev_jobids.append("dry_run")
else:
cmd_res = subprocess.check_output(
submit_cmd + [fname.name], cwd=str(directory)
)
prev_jobids.append(cmd_res.decode().strip())
except subprocess.CalledProcessError:
logger.error("Submitting job to the queue failed.")
break
def determine_scheduler(
scheduler: Optional[str], experiment_definition: Dict[str, YamlValue]
) -> str:
"""Determine the scheduler to use to run the jobs."""
# Scheduler value from command line has first priority
if scheduler is not None:
if scheduler in ["shell", "pbs", "slurm"]:
return scheduler
raise ValueError(
"Argument scheduler only supports input values of ['shell', 'pbs', 'slurm']"
)
# Next priority goes to the experiment.yml file
if experiment_definition.get("pbs"):
return "pbs"
if experiment_definition.get("slurm"):
return "slurm"
if experiment_definition.get("shell"):
return "shell"
# Final priority goes to the auto-discovery
if shutil.which("pbs") is not None:
return "pbs"
if shutil.which("slurm") is not None:
return "slurm"
# Default if nothing else is found goes to shell
return "shell"
def _set_verbosity(ctx, param, value):
if value == 1:
logging.basicConfig(level=logging.INFO)
if value == 2:
logging.basicConfig(level=logging.DEBUG)
def launch(
input_file="experiment.yml", use_dependencies=False, dry_run=False, scheduler=None
) -> None:
# This function provides an API to access experi's functionality from within
# python scripts, as an alternative to the command-line interface
# Process and run commands
input_file = Path(input_file)
structure = read_file(input_file)
scheduler = determine_scheduler(scheduler, structure)
jobs = process_structure(
structure, scheduler, Path(input_file.parent), use_dependencies
)
run_jobs(jobs, scheduler, input_file.parent, dry_run)
@click.command()
@click.version_option()
@click.option(
"-f",
"--input-file",
type=click.Path(exists=True, dir_okay=False),
default="experiment.yml",
help="""Path to a YAML file containing experiment data. Note that the experiment
will be run from the directory in which the file exists, not the directory the
script was run from.""",
)
@click.option(
"-s",
"--scheduler",
type=click.Choice(["shell", "pbs", "slurm"]),
default=None,
help="The scheduler with which to run the jobs.",
)
@click.option(
"--use-dependencies",
default=False,
is_flag=True,
help="Use the dependencies specified in the command to reduce the processing",
)
@click.option(
"--dry-run",
is_flag=True,
default=False,
help="Don't run commands or submit jobs, just show the commands that would be run.",
)
@click.option(
"-v",
"--verbose",
callback=_set_verbosity,
expose_value=False,
count=True,
help="Increase the verbosity of logging events.",
)
def main(input_file, use_dependencies, dry_run, scheduler) -> None:
launch(input_file, use_dependencies, dry_run, scheduler)
|
malramsay64/experi
|
src/experi/run.py
|
iterator_cycle
|
python
|
def iterator_cycle(variables: VarType, parent: str) -> Iterable[VarMatrix]:
if isinstance(variables, dict):
if variables.get("times"):
times = int(variables["times"])
del variables["times"]
yield list(variable_matrix(variables, parent, "product")) * times
else:
raise ValueError(f"times is a required keyword for the repeat iterator.")
else:
raise ValueError(
f"The repeat operator only takes a dict as arguments, got {variables} of type {type(variables)}"
)
|
Cycle through a list of values a specified number of times
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
|
train
|
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L149-L171
|
[
"def variable_matrix(\n variables: VarType, parent: str = None, iterator: str = \"product\"\n) -> Iterable[Dict[str, YamlValue]]:\n \"\"\"Process the variables into a list of the appropriate combinations.\n\n This function performs recursive processing of the input variables, creating an\n iterator which has all the combinations of variables specified in the input.\n\n \"\"\"\n _iters: Dict[str, Callable] = {\"product\": product, \"zip\": zip}\n _special_keys: Dict[str, Callable[[VarType, Any], Iterable[VarMatrix]]] = {\n \"zip\": iterator_zip,\n \"product\": iterator_product,\n \"arange\": iterator_arange,\n \"chain\": iterator_chain,\n \"append\": iterator_chain,\n \"cycle\": iterator_cycle,\n \"repeat\": iterator_cycle,\n }\n\n if isinstance(variables, dict):\n key_vars: List[List[Dict[str, YamlValue]]] = []\n\n # Handling of specialised iterators\n for key, function in _special_keys.items():\n if variables.get(key):\n item = variables[key]\n assert item is not None\n for val in function(item, parent):\n key_vars.append(val)\n\n del variables[key]\n\n for key, value in variables.items():\n key_vars.append(list(variable_matrix(value, key, iterator)))\n\n logger.debug(\"key vars: %s\", key_vars)\n\n # Iterate through all possible products generating a dictionary\n for i in _iters[iterator](*key_vars):\n logger.debug(\"dicts: %s\", i)\n yield combine_dictionaries(i)\n\n # Iterate through a list of values\n elif isinstance(variables, list):\n for item in variables:\n yield from variable_matrix(item, parent, iterator)\n\n # Stopping condition -> we have either a single value from a list\n # or a value had only one item\n else:\n assert parent is not None\n yield {parent: variables}\n"
] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 Malcolm Ramsay <malramsay64@gmail.com>
#
# Distributed under terms of the MIT license.
"""Run an experiment varying a number of variables."""
import logging
import os
import shutil
import subprocess
import sys
from collections import ChainMap
from itertools import chain, product, repeat
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Union
import click
import numpy as np
import yaml
from .commands import Command, Job
from .scheduler import create_scheduler_file
logger = logging.getLogger(__name__)
logger.setLevel("DEBUG")
# Type definitions
PathLike = Union[str, Path]
YamlValue = Union[str, int, float]
CommandInput = Union[str, Dict[str, YamlValue]]
VarType = Union[YamlValue, List[YamlValue], Dict[str, YamlValue]]
VarMatrix = List[Dict[str, YamlValue]]
def combine_dictionaries(dicts: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Merge a list of dictionaries into a single dictionary.
Where there are collisions the first value in the list will be set
as this function is using ChainMap to combine the dicts.
"""
return dict(ChainMap(*dicts))
def iterator_zip(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""Apply the zip operator to a set of variables.
This uses the python zip iterator to combine multiple lists of variables such that
the nth variable in each list is aligned.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from zip iterator")
if isinstance(variables, list):
for item in variables:
yield list(variable_matrix(item, parent, "zip"))
else:
yield list(variable_matrix(variables, parent, "zip"))
def iterator_product(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""Apply the product operator to a set of variables.
This uses the python itertools.product iterator to combine multiple variables
such that all possible combinations are generated. This is the default iterator
however this is a method of manually specifying the option.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from product iterator")
if isinstance(variables, list):
raise ValueError(
f"Product only takes mappings of values, got {variables} of type {type(variables)}"
)
yield list(variable_matrix(variables, parent, "product"))
def iterator_chain(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""This successively appends each element of an array to a single list of values.
This takes a list of values and puts all the values generated for each element in
the list into a single list of values. It uses the :func:`itertools.chain` function to
achieve this. This function is particularly useful for specifying multiple types of
simulations with different parameters.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from append iterator")
if not isinstance(variables, list):
raise ValueError(
f"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}"
)
# Create a single list containing all the values
yield list(
chain.from_iterable(
variable_matrix(item, parent, "product") for item in variables
)
)
def arange(start=None, stop=None, step=None, dtype=None) -> np.ndarray:
if stop and not start:
return np.arange(stop)
return np.arange(start=start, stop=stop, step=step, dtype=dtype)
def iterator_arange(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Create a list of values using the :func:`numpy.arange` function.
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
assert parent is not None
if isinstance(variables, (int, float)):
yield [{parent: i} for i in np.arange(variables)]
elif isinstance(variables, dict):
if variables.get("stop"):
yield [{parent: i} for i in arange(**variables)]
else:
raise ValueError(f"Stop is a required keyword for the arange iterator.")
else:
raise ValueError(
f"The arange keyword only takes a dict as arguments, got {variables} of type {type(variables)}"
)
def variable_matrix(
variables: VarType, parent: str = None, iterator: str = "product"
) -> Iterable[Dict[str, YamlValue]]:
"""Process the variables into a list of the appropriate combinations.
This function performs recursive processing of the input variables, creating an
iterator which has all the combinations of variables specified in the input.
"""
_iters: Dict[str, Callable] = {"product": product, "zip": zip}
_special_keys: Dict[str, Callable[[VarType, Any], Iterable[VarMatrix]]] = {
"zip": iterator_zip,
"product": iterator_product,
"arange": iterator_arange,
"chain": iterator_chain,
"append": iterator_chain,
"cycle": iterator_cycle,
"repeat": iterator_cycle,
}
if isinstance(variables, dict):
key_vars: List[List[Dict[str, YamlValue]]] = []
# Handling of specialised iterators
for key, function in _special_keys.items():
if variables.get(key):
item = variables[key]
assert item is not None
for val in function(item, parent):
key_vars.append(val)
del variables[key]
for key, value in variables.items():
key_vars.append(list(variable_matrix(value, key, iterator)))
logger.debug("key vars: %s", key_vars)
# Iterate through all possible products generating a dictionary
for i in _iters[iterator](*key_vars):
logger.debug("dicts: %s", i)
yield combine_dictionaries(i)
# Iterate through a list of values
elif isinstance(variables, list):
for item in variables:
yield from variable_matrix(item, parent, iterator)
# Stopping condition -> we have either a single value from a list
# or a value had only one item
else:
assert parent is not None
yield {parent: variables}
def uniqueify(my_list: Any) -> List[Any]:
"""Remove duplicate entries in a list retaining order."""
if sys.version_info >= (3, 6):
# An implementation specific detail of py3.6 is the retention of order
# within a dictionary. In py3.7 this becomes the documented behaviour.
return list(dict.fromkeys(my_list))
# Slower method of order preserving unique list in older python versions
seen = set()
return [x for x in my_list if x not in seen and not seen.add(x)]
def process_jobs(
jobs: List[Dict],
matrix: VarMatrix,
scheduler_options: Dict[str, Any] = None,
directory: Path = None,
use_dependencies: bool = False,
) -> Iterator[Job]:
assert jobs is not None
logger.debug("Found %d jobs in file", len(jobs))
for job in jobs:
command = job.get("command")
assert command is not None
yield Job(
process_command(command, matrix),
scheduler_options,
directory,
use_dependencies,
)
def process_command(command: CommandInput, matrix: VarMatrix) -> List[Command]:
"""Generate all combinations of commands given a variable matrix.
Processes the commands to be sequences of strings.
"""
assert command is not None
if isinstance(command, str):
command_list = [Command(command, variables=variables) for variables in matrix]
elif isinstance(command, list):
command_list = [Command(command, variables=variables) for variables in matrix]
else:
if command.get("command") is not None:
cmd = command.get("command")
else:
cmd = command.get("cmd")
creates = str(command.get("creates", ""))
requires = str(command.get("requires", ""))
assert isinstance(cmd, (list, str))
command_list = [
Command(cmd, variables, creates, requires) for variables in matrix
]
return uniqueify(command_list)
def read_file(filename: PathLike = "experiment.yml") -> Dict[str, Any]:
"""Read and parse yaml file."""
logger.debug("Input file: %s", filename)
with open(filename, "r") as stream:
structure = yaml.safe_load(stream)
return structure
def process_structure(
structure: Dict[str, Any],
scheduler: str = "shell",
directory: Path = None,
use_dependencies: bool = False,
) -> Iterator[Job]:
input_variables = structure.get("variables")
if input_variables is None:
raise KeyError('The key "variables" was not found in the input file.')
assert isinstance(input_variables, Dict)
# create variable matrix
variables = list(variable_matrix(input_variables))
assert variables
# Check for scheduler options
scheduler_options: Dict[str, YamlValue] = {}
if structure.get("scheduler"):
new_options = structure.get("scheduler")
assert new_options is not None
assert isinstance(new_options, dict)
scheduler_options.update(new_options)
if structure.get(scheduler):
new_options = structure.get(scheduler)
assert new_options is not None
assert isinstance(new_options, dict)
scheduler_options.update(new_options)
assert isinstance(scheduler_options, dict)
if structure.get("name"):
name = structure.get("name")
assert isinstance(name, str)
# set the name attribute in scheduler to global name if no name defined
scheduler_options.setdefault("name", name)
jobs_dict = structure.get("jobs")
if jobs_dict is None:
input_command = structure.get("command")
if isinstance(input_command, list):
jobs_dict = [{"command": cmd} for cmd in input_command]
else:
jobs_dict = [{"command": input_command}]
yield from process_jobs(
jobs_dict, variables, scheduler_options, directory, use_dependencies
)
def run_jobs(
jobs: Iterator[Job],
scheduler: str = "shell",
directory=Path.cwd(),
dry_run: bool = False,
) -> None:
if scheduler == "shell":
run_bash_jobs(jobs, directory, dry_run=dry_run)
elif scheduler in ["pbs", "slurm"]:
run_scheduler_jobs(scheduler, jobs, directory, dry_run=dry_run)
else:
raise ValueError(
f"Scheduler '{scheduler}'was not recognised. Possible values are ['shell', 'pbs', 'slurm']"
)
def run_bash_jobs(
jobs: Iterator[Job], directory: PathLike = Path.cwd(), dry_run: bool = False
) -> None:
"""Submit commands to the bash shell.
This function runs the commands iteratively but handles errors in the
same way as with the pbs_commands function. A command will run for all
combinations of variables in the variable matrix, however if any one of
those commands fails then the next command will not run.
"""
logger.debug("Running commands in bash shell")
# iterate through command groups
for job in jobs:
# Check shell exists
if shutil.which(job.shell) is None:
raise ProcessLookupError(f"The shell '{job.shell}' was not found.")
failed = False
for command in job:
for cmd in command:
logger.info(cmd)
if dry_run:
print(f"{job.shell} -c '{cmd}'")
else:
result = subprocess.run(
[job.shell, "-c", f"{cmd}"], cwd=str(directory)
)
if result.returncode != 0:
failed = True
logger.error("Command failed: %s", command)
break
if failed:
logger.error("A command failed, not continuing further.")
return
def run_scheduler_jobs(
scheduler: str,
jobs: Iterator[Job],
directory: PathLike = Path.cwd(),
basename: str = "experi",
dry_run: bool = False,
) -> None:
"""Submit a series of commands to a batch scheduler.
This takes a list of strings which are the contents of the pbs files, writes the
files to disk and submits the job to the scheduler. Files which match the pattern of
the resulting files <basename>_<index>.pbs are deleted before writing the new files.
To ensure that commands run consecutively the aditional requirement to the run
script `-W depend=afterok:<prev_jobid>` is added. This allows for all the components
of the experiment to be conducted in a single script.
Note: Having this function submit jobs requires that the command `qsub` exists,
implying that a job scheduler is installed.
"""
submit_job = True
logger.debug("Creating commands in %s files.", scheduler)
# Check scheduler submit command exists
if scheduler == "pbs":
submit_executable = "qsub"
elif scheduler == "slurm":
submit_executable = "sbatch"
else:
raise ValueError("scheduler can only take values ['pbs', 'slurm']")
if shutil.which(submit_executable) is None:
logger.warning(
"The `%s` command is not found."
"Skipping job submission and just generating files",
submit_executable,
)
submit_job = False
# Ensure directory is a Path
directory = Path(directory)
# remove existing files
for fname in directory.glob(basename + f"*.{scheduler}"):
print("Removing {}".format(fname))
os.remove(str(fname))
# Write new files and generate commands
prev_jobids: List[str] = []
for index, job in enumerate(jobs):
# Generate scheduler file
content = create_scheduler_file(scheduler, job)
logger.debug("File contents:\n%s", content)
# Write file to disk
fname = Path(directory / "{}_{:02d}.{}".format(basename, index, scheduler))
with fname.open("w") as dst:
dst.write(content)
if submit_job or dry_run:
# Construct command
submit_cmd = [submit_executable]
if prev_jobids:
# Continue to append all previous jobs to submit_cmd so subsequent jobs die along
# with the first.
afterok = f"afterok:{':'.join(prev_jobids)}"
if scheduler == "pbs":
submit_cmd += ["-W", f"depend={afterok}"]
elif scheduler == "slurm":
submit_cmd += ["--dependency", afterok]
# actually run the command
logger.info(str(submit_cmd))
try:
if dry_run:
print(f"{submit_cmd} {fname.name}")
prev_jobids.append("dry_run")
else:
cmd_res = subprocess.check_output(
submit_cmd + [fname.name], cwd=str(directory)
)
prev_jobids.append(cmd_res.decode().strip())
except subprocess.CalledProcessError:
logger.error("Submitting job to the queue failed.")
break
def determine_scheduler(
scheduler: Optional[str], experiment_definition: Dict[str, YamlValue]
) -> str:
"""Determine the scheduler to use to run the jobs."""
# Scheduler value from command line has first priority
if scheduler is not None:
if scheduler in ["shell", "pbs", "slurm"]:
return scheduler
raise ValueError(
"Argument scheduler only supports input values of ['shell', 'pbs', 'slurm']"
)
# Next priority goes to the experiment.yml file
if experiment_definition.get("pbs"):
return "pbs"
if experiment_definition.get("slurm"):
return "slurm"
if experiment_definition.get("shell"):
return "shell"
# Final priority goes to the auto-discovery
if shutil.which("pbs") is not None:
return "pbs"
if shutil.which("slurm") is not None:
return "slurm"
# Default if nothing else is found goes to shell
return "shell"
def _set_verbosity(ctx, param, value):
if value == 1:
logging.basicConfig(level=logging.INFO)
if value == 2:
logging.basicConfig(level=logging.DEBUG)
def launch(
input_file="experiment.yml", use_dependencies=False, dry_run=False, scheduler=None
) -> None:
# This function provides an API to access experi's functionality from within
# python scripts, as an alternative to the command-line interface
# Process and run commands
input_file = Path(input_file)
structure = read_file(input_file)
scheduler = determine_scheduler(scheduler, structure)
jobs = process_structure(
structure, scheduler, Path(input_file.parent), use_dependencies
)
run_jobs(jobs, scheduler, input_file.parent, dry_run)
@click.command()
@click.version_option()
@click.option(
"-f",
"--input-file",
type=click.Path(exists=True, dir_okay=False),
default="experiment.yml",
help="""Path to a YAML file containing experiment data. Note that the experiment
will be run from the directory in which the file exists, not the directory the
script was run from.""",
)
@click.option(
"-s",
"--scheduler",
type=click.Choice(["shell", "pbs", "slurm"]),
default=None,
help="The scheduler with which to run the jobs.",
)
@click.option(
"--use-dependencies",
default=False,
is_flag=True,
help="Use the dependencies specified in the command to reduce the processing",
)
@click.option(
"--dry-run",
is_flag=True,
default=False,
help="Don't run commands or submit jobs, just show the commands that would be run.",
)
@click.option(
"-v",
"--verbose",
callback=_set_verbosity,
expose_value=False,
count=True,
help="Increase the verbosity of logging events.",
)
def main(input_file, use_dependencies, dry_run, scheduler) -> None:
launch(input_file, use_dependencies, dry_run, scheduler)
|
malramsay64/experi
|
src/experi/run.py
|
variable_matrix
|
python
|
def variable_matrix(
variables: VarType, parent: str = None, iterator: str = "product"
) -> Iterable[Dict[str, YamlValue]]:
_iters: Dict[str, Callable] = {"product": product, "zip": zip}
_special_keys: Dict[str, Callable[[VarType, Any], Iterable[VarMatrix]]] = {
"zip": iterator_zip,
"product": iterator_product,
"arange": iterator_arange,
"chain": iterator_chain,
"append": iterator_chain,
"cycle": iterator_cycle,
"repeat": iterator_cycle,
}
if isinstance(variables, dict):
key_vars: List[List[Dict[str, YamlValue]]] = []
# Handling of specialised iterators
for key, function in _special_keys.items():
if variables.get(key):
item = variables[key]
assert item is not None
for val in function(item, parent):
key_vars.append(val)
del variables[key]
for key, value in variables.items():
key_vars.append(list(variable_matrix(value, key, iterator)))
logger.debug("key vars: %s", key_vars)
# Iterate through all possible products generating a dictionary
for i in _iters[iterator](*key_vars):
logger.debug("dicts: %s", i)
yield combine_dictionaries(i)
# Iterate through a list of values
elif isinstance(variables, list):
for item in variables:
yield from variable_matrix(item, parent, iterator)
# Stopping condition -> we have either a single value from a list
# or a value had only one item
else:
assert parent is not None
yield {parent: variables}
|
Process the variables into a list of the appropriate combinations.
This function performs recursive processing of the input variables, creating an
iterator which has all the combinations of variables specified in the input.
|
train
|
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L174-L226
|
[
"def variable_matrix(\n variables: VarType, parent: str = None, iterator: str = \"product\"\n) -> Iterable[Dict[str, YamlValue]]:\n \"\"\"Process the variables into a list of the appropriate combinations.\n\n This function performs recursive processing of the input variables, creating an\n iterator which has all the combinations of variables specified in the input.\n\n \"\"\"\n _iters: Dict[str, Callable] = {\"product\": product, \"zip\": zip}\n _special_keys: Dict[str, Callable[[VarType, Any], Iterable[VarMatrix]]] = {\n \"zip\": iterator_zip,\n \"product\": iterator_product,\n \"arange\": iterator_arange,\n \"chain\": iterator_chain,\n \"append\": iterator_chain,\n \"cycle\": iterator_cycle,\n \"repeat\": iterator_cycle,\n }\n\n if isinstance(variables, dict):\n key_vars: List[List[Dict[str, YamlValue]]] = []\n\n # Handling of specialised iterators\n for key, function in _special_keys.items():\n if variables.get(key):\n item = variables[key]\n assert item is not None\n for val in function(item, parent):\n key_vars.append(val)\n\n del variables[key]\n\n for key, value in variables.items():\n key_vars.append(list(variable_matrix(value, key, iterator)))\n\n logger.debug(\"key vars: %s\", key_vars)\n\n # Iterate through all possible products generating a dictionary\n for i in _iters[iterator](*key_vars):\n logger.debug(\"dicts: %s\", i)\n yield combine_dictionaries(i)\n\n # Iterate through a list of values\n elif isinstance(variables, list):\n for item in variables:\n yield from variable_matrix(item, parent, iterator)\n\n # Stopping condition -> we have either a single value from a list\n # or a value had only one item\n else:\n assert parent is not None\n yield {parent: variables}\n",
"def combine_dictionaries(dicts: List[Dict[str, Any]]) -> Dict[str, Any]:\n \"\"\"Merge a list of dictionaries into a single dictionary.\n\n Where there are collisions the first value in the list will be set\n as this function is using ChainMap to combine the dicts.\n\n \"\"\"\n return dict(ChainMap(*dicts))\n"
] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 Malcolm Ramsay <malramsay64@gmail.com>
#
# Distributed under terms of the MIT license.
"""Run an experiment varying a number of variables."""
import logging
import os
import shutil
import subprocess
import sys
from collections import ChainMap
from itertools import chain, product, repeat
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Union
import click
import numpy as np
import yaml
from .commands import Command, Job
from .scheduler import create_scheduler_file
logger = logging.getLogger(__name__)
logger.setLevel("DEBUG")
# Type definitions
PathLike = Union[str, Path]
YamlValue = Union[str, int, float]
CommandInput = Union[str, Dict[str, YamlValue]]
VarType = Union[YamlValue, List[YamlValue], Dict[str, YamlValue]]
VarMatrix = List[Dict[str, YamlValue]]
def combine_dictionaries(dicts: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Merge a list of dictionaries into a single dictionary.
Where there are collisions the first value in the list will be set
as this function is using ChainMap to combine the dicts.
"""
return dict(ChainMap(*dicts))
def iterator_zip(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""Apply the zip operator to a set of variables.
This uses the python zip iterator to combine multiple lists of variables such that
the nth variable in each list is aligned.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from zip iterator")
if isinstance(variables, list):
for item in variables:
yield list(variable_matrix(item, parent, "zip"))
else:
yield list(variable_matrix(variables, parent, "zip"))
def iterator_product(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""Apply the product operator to a set of variables.
This uses the python itertools.product iterator to combine multiple variables
such that all possible combinations are generated. This is the default iterator
however this is a method of manually specifying the option.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from product iterator")
if isinstance(variables, list):
raise ValueError(
f"Product only takes mappings of values, got {variables} of type {type(variables)}"
)
yield list(variable_matrix(variables, parent, "product"))
def iterator_chain(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""This successively appends each element of an array to a single list of values.
This takes a list of values and puts all the values generated for each element in
the list into a single list of values. It uses the :func:`itertools.chain` function to
achieve this. This function is particularly useful for specifying multiple types of
simulations with different parameters.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from append iterator")
if not isinstance(variables, list):
raise ValueError(
f"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}"
)
# Create a single list containing all the values
yield list(
chain.from_iterable(
variable_matrix(item, parent, "product") for item in variables
)
)
def arange(start=None, stop=None, step=None, dtype=None) -> np.ndarray:
if stop and not start:
return np.arange(stop)
return np.arange(start=start, stop=stop, step=step, dtype=dtype)
def iterator_arange(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Create a list of values using the :func:`numpy.arange` function.
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
assert parent is not None
if isinstance(variables, (int, float)):
yield [{parent: i} for i in np.arange(variables)]
elif isinstance(variables, dict):
if variables.get("stop"):
yield [{parent: i} for i in arange(**variables)]
else:
raise ValueError(f"Stop is a required keyword for the arange iterator.")
else:
raise ValueError(
f"The arange keyword only takes a dict as arguments, got {variables} of type {type(variables)}"
)
def iterator_cycle(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Cycle through a list of values a specified number of times
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
if isinstance(variables, dict):
if variables.get("times"):
times = int(variables["times"])
del variables["times"]
yield list(variable_matrix(variables, parent, "product")) * times
else:
raise ValueError(f"times is a required keyword for the repeat iterator.")
else:
raise ValueError(
f"The repeat operator only takes a dict as arguments, got {variables} of type {type(variables)}"
)
def uniqueify(my_list: Any) -> List[Any]:
"""Remove duplicate entries in a list retaining order."""
if sys.version_info >= (3, 6):
# An implementation specific detail of py3.6 is the retention of order
# within a dictionary. In py3.7 this becomes the documented behaviour.
return list(dict.fromkeys(my_list))
# Slower method of order preserving unique list in older python versions
seen = set()
return [x for x in my_list if x not in seen and not seen.add(x)]
def process_jobs(
jobs: List[Dict],
matrix: VarMatrix,
scheduler_options: Dict[str, Any] = None,
directory: Path = None,
use_dependencies: bool = False,
) -> Iterator[Job]:
assert jobs is not None
logger.debug("Found %d jobs in file", len(jobs))
for job in jobs:
command = job.get("command")
assert command is not None
yield Job(
process_command(command, matrix),
scheduler_options,
directory,
use_dependencies,
)
def process_command(command: CommandInput, matrix: VarMatrix) -> List[Command]:
"""Generate all combinations of commands given a variable matrix.
Processes the commands to be sequences of strings.
"""
assert command is not None
if isinstance(command, str):
command_list = [Command(command, variables=variables) for variables in matrix]
elif isinstance(command, list):
command_list = [Command(command, variables=variables) for variables in matrix]
else:
if command.get("command") is not None:
cmd = command.get("command")
else:
cmd = command.get("cmd")
creates = str(command.get("creates", ""))
requires = str(command.get("requires", ""))
assert isinstance(cmd, (list, str))
command_list = [
Command(cmd, variables, creates, requires) for variables in matrix
]
return uniqueify(command_list)
def read_file(filename: PathLike = "experiment.yml") -> Dict[str, Any]:
"""Read and parse yaml file."""
logger.debug("Input file: %s", filename)
with open(filename, "r") as stream:
structure = yaml.safe_load(stream)
return structure
def process_structure(
structure: Dict[str, Any],
scheduler: str = "shell",
directory: Path = None,
use_dependencies: bool = False,
) -> Iterator[Job]:
input_variables = structure.get("variables")
if input_variables is None:
raise KeyError('The key "variables" was not found in the input file.')
assert isinstance(input_variables, Dict)
# create variable matrix
variables = list(variable_matrix(input_variables))
assert variables
# Check for scheduler options
scheduler_options: Dict[str, YamlValue] = {}
if structure.get("scheduler"):
new_options = structure.get("scheduler")
assert new_options is not None
assert isinstance(new_options, dict)
scheduler_options.update(new_options)
if structure.get(scheduler):
new_options = structure.get(scheduler)
assert new_options is not None
assert isinstance(new_options, dict)
scheduler_options.update(new_options)
assert isinstance(scheduler_options, dict)
if structure.get("name"):
name = structure.get("name")
assert isinstance(name, str)
# set the name attribute in scheduler to global name if no name defined
scheduler_options.setdefault("name", name)
jobs_dict = structure.get("jobs")
if jobs_dict is None:
input_command = structure.get("command")
if isinstance(input_command, list):
jobs_dict = [{"command": cmd} for cmd in input_command]
else:
jobs_dict = [{"command": input_command}]
yield from process_jobs(
jobs_dict, variables, scheduler_options, directory, use_dependencies
)
def run_jobs(
jobs: Iterator[Job],
scheduler: str = "shell",
directory=Path.cwd(),
dry_run: bool = False,
) -> None:
if scheduler == "shell":
run_bash_jobs(jobs, directory, dry_run=dry_run)
elif scheduler in ["pbs", "slurm"]:
run_scheduler_jobs(scheduler, jobs, directory, dry_run=dry_run)
else:
raise ValueError(
f"Scheduler '{scheduler}'was not recognised. Possible values are ['shell', 'pbs', 'slurm']"
)
def run_bash_jobs(
jobs: Iterator[Job], directory: PathLike = Path.cwd(), dry_run: bool = False
) -> None:
"""Submit commands to the bash shell.
This function runs the commands iteratively but handles errors in the
same way as with the pbs_commands function. A command will run for all
combinations of variables in the variable matrix, however if any one of
those commands fails then the next command will not run.
"""
logger.debug("Running commands in bash shell")
# iterate through command groups
for job in jobs:
# Check shell exists
if shutil.which(job.shell) is None:
raise ProcessLookupError(f"The shell '{job.shell}' was not found.")
failed = False
for command in job:
for cmd in command:
logger.info(cmd)
if dry_run:
print(f"{job.shell} -c '{cmd}'")
else:
result = subprocess.run(
[job.shell, "-c", f"{cmd}"], cwd=str(directory)
)
if result.returncode != 0:
failed = True
logger.error("Command failed: %s", command)
break
if failed:
logger.error("A command failed, not continuing further.")
return
def run_scheduler_jobs(
scheduler: str,
jobs: Iterator[Job],
directory: PathLike = Path.cwd(),
basename: str = "experi",
dry_run: bool = False,
) -> None:
"""Submit a series of commands to a batch scheduler.
This takes a list of strings which are the contents of the pbs files, writes the
files to disk and submits the job to the scheduler. Files which match the pattern of
the resulting files <basename>_<index>.pbs are deleted before writing the new files.
To ensure that commands run consecutively the aditional requirement to the run
script `-W depend=afterok:<prev_jobid>` is added. This allows for all the components
of the experiment to be conducted in a single script.
Note: Having this function submit jobs requires that the command `qsub` exists,
implying that a job scheduler is installed.
"""
submit_job = True
logger.debug("Creating commands in %s files.", scheduler)
# Check scheduler submit command exists
if scheduler == "pbs":
submit_executable = "qsub"
elif scheduler == "slurm":
submit_executable = "sbatch"
else:
raise ValueError("scheduler can only take values ['pbs', 'slurm']")
if shutil.which(submit_executable) is None:
logger.warning(
"The `%s` command is not found."
"Skipping job submission and just generating files",
submit_executable,
)
submit_job = False
# Ensure directory is a Path
directory = Path(directory)
# remove existing files
for fname in directory.glob(basename + f"*.{scheduler}"):
print("Removing {}".format(fname))
os.remove(str(fname))
# Write new files and generate commands
prev_jobids: List[str] = []
for index, job in enumerate(jobs):
# Generate scheduler file
content = create_scheduler_file(scheduler, job)
logger.debug("File contents:\n%s", content)
# Write file to disk
fname = Path(directory / "{}_{:02d}.{}".format(basename, index, scheduler))
with fname.open("w") as dst:
dst.write(content)
if submit_job or dry_run:
# Construct command
submit_cmd = [submit_executable]
if prev_jobids:
# Continue to append all previous jobs to submit_cmd so subsequent jobs die along
# with the first.
afterok = f"afterok:{':'.join(prev_jobids)}"
if scheduler == "pbs":
submit_cmd += ["-W", f"depend={afterok}"]
elif scheduler == "slurm":
submit_cmd += ["--dependency", afterok]
# actually run the command
logger.info(str(submit_cmd))
try:
if dry_run:
print(f"{submit_cmd} {fname.name}")
prev_jobids.append("dry_run")
else:
cmd_res = subprocess.check_output(
submit_cmd + [fname.name], cwd=str(directory)
)
prev_jobids.append(cmd_res.decode().strip())
except subprocess.CalledProcessError:
logger.error("Submitting job to the queue failed.")
break
def determine_scheduler(
scheduler: Optional[str], experiment_definition: Dict[str, YamlValue]
) -> str:
"""Determine the scheduler to use to run the jobs."""
# Scheduler value from command line has first priority
if scheduler is not None:
if scheduler in ["shell", "pbs", "slurm"]:
return scheduler
raise ValueError(
"Argument scheduler only supports input values of ['shell', 'pbs', 'slurm']"
)
# Next priority goes to the experiment.yml file
if experiment_definition.get("pbs"):
return "pbs"
if experiment_definition.get("slurm"):
return "slurm"
if experiment_definition.get("shell"):
return "shell"
# Final priority goes to the auto-discovery
if shutil.which("pbs") is not None:
return "pbs"
if shutil.which("slurm") is not None:
return "slurm"
# Default if nothing else is found goes to shell
return "shell"
def _set_verbosity(ctx, param, value):
if value == 1:
logging.basicConfig(level=logging.INFO)
if value == 2:
logging.basicConfig(level=logging.DEBUG)
def launch(
input_file="experiment.yml", use_dependencies=False, dry_run=False, scheduler=None
) -> None:
# This function provides an API to access experi's functionality from within
# python scripts, as an alternative to the command-line interface
# Process and run commands
input_file = Path(input_file)
structure = read_file(input_file)
scheduler = determine_scheduler(scheduler, structure)
jobs = process_structure(
structure, scheduler, Path(input_file.parent), use_dependencies
)
run_jobs(jobs, scheduler, input_file.parent, dry_run)
@click.command()
@click.version_option()
@click.option(
"-f",
"--input-file",
type=click.Path(exists=True, dir_okay=False),
default="experiment.yml",
help="""Path to a YAML file containing experiment data. Note that the experiment
will be run from the directory in which the file exists, not the directory the
script was run from.""",
)
@click.option(
"-s",
"--scheduler",
type=click.Choice(["shell", "pbs", "slurm"]),
default=None,
help="The scheduler with which to run the jobs.",
)
@click.option(
"--use-dependencies",
default=False,
is_flag=True,
help="Use the dependencies specified in the command to reduce the processing",
)
@click.option(
"--dry-run",
is_flag=True,
default=False,
help="Don't run commands or submit jobs, just show the commands that would be run.",
)
@click.option(
"-v",
"--verbose",
callback=_set_verbosity,
expose_value=False,
count=True,
help="Increase the verbosity of logging events.",
)
def main(input_file, use_dependencies, dry_run, scheduler) -> None:
launch(input_file, use_dependencies, dry_run, scheduler)
|
malramsay64/experi
|
src/experi/run.py
|
uniqueify
|
python
|
def uniqueify(my_list: Any) -> List[Any]:
if sys.version_info >= (3, 6):
# An implementation specific detail of py3.6 is the retention of order
# within a dictionary. In py3.7 this becomes the documented behaviour.
return list(dict.fromkeys(my_list))
# Slower method of order preserving unique list in older python versions
seen = set()
return [x for x in my_list if x not in seen and not seen.add(x)]
|
Remove duplicate entries in a list retaining order.
|
train
|
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L229-L238
| null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 Malcolm Ramsay <malramsay64@gmail.com>
#
# Distributed under terms of the MIT license.
"""Run an experiment varying a number of variables."""
import logging
import os
import shutil
import subprocess
import sys
from collections import ChainMap
from itertools import chain, product, repeat
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Union
import click
import numpy as np
import yaml
from .commands import Command, Job
from .scheduler import create_scheduler_file
logger = logging.getLogger(__name__)
logger.setLevel("DEBUG")
# Type definitions
PathLike = Union[str, Path]
YamlValue = Union[str, int, float]
CommandInput = Union[str, Dict[str, YamlValue]]
VarType = Union[YamlValue, List[YamlValue], Dict[str, YamlValue]]
VarMatrix = List[Dict[str, YamlValue]]
def combine_dictionaries(dicts: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Merge a list of dictionaries into a single dictionary.
Where there are collisions the first value in the list will be set
as this function is using ChainMap to combine the dicts.
"""
return dict(ChainMap(*dicts))
def iterator_zip(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""Apply the zip operator to a set of variables.
This uses the python zip iterator to combine multiple lists of variables such that
the nth variable in each list is aligned.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from zip iterator")
if isinstance(variables, list):
for item in variables:
yield list(variable_matrix(item, parent, "zip"))
else:
yield list(variable_matrix(variables, parent, "zip"))
def iterator_product(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""Apply the product operator to a set of variables.
This uses the python itertools.product iterator to combine multiple variables
such that all possible combinations are generated. This is the default iterator
however this is a method of manually specifying the option.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from product iterator")
if isinstance(variables, list):
raise ValueError(
f"Product only takes mappings of values, got {variables} of type {type(variables)}"
)
yield list(variable_matrix(variables, parent, "product"))
def iterator_chain(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""This successively appends each element of an array to a single list of values.
This takes a list of values and puts all the values generated for each element in
the list into a single list of values. It uses the :func:`itertools.chain` function to
achieve this. This function is particularly useful for specifying multiple types of
simulations with different parameters.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from append iterator")
if not isinstance(variables, list):
raise ValueError(
f"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}"
)
# Create a single list containing all the values
yield list(
chain.from_iterable(
variable_matrix(item, parent, "product") for item in variables
)
)
def arange(start=None, stop=None, step=None, dtype=None) -> np.ndarray:
if stop and not start:
return np.arange(stop)
return np.arange(start=start, stop=stop, step=step, dtype=dtype)
def iterator_arange(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Create a list of values using the :func:`numpy.arange` function.
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
assert parent is not None
if isinstance(variables, (int, float)):
yield [{parent: i} for i in np.arange(variables)]
elif isinstance(variables, dict):
if variables.get("stop"):
yield [{parent: i} for i in arange(**variables)]
else:
raise ValueError(f"Stop is a required keyword for the arange iterator.")
else:
raise ValueError(
f"The arange keyword only takes a dict as arguments, got {variables} of type {type(variables)}"
)
def iterator_cycle(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Cycle through a list of values a specified number of times
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
if isinstance(variables, dict):
if variables.get("times"):
times = int(variables["times"])
del variables["times"]
yield list(variable_matrix(variables, parent, "product")) * times
else:
raise ValueError(f"times is a required keyword for the repeat iterator.")
else:
raise ValueError(
f"The repeat operator only takes a dict as arguments, got {variables} of type {type(variables)}"
)
def variable_matrix(
variables: VarType, parent: str = None, iterator: str = "product"
) -> Iterable[Dict[str, YamlValue]]:
"""Process the variables into a list of the appropriate combinations.
This function performs recursive processing of the input variables, creating an
iterator which has all the combinations of variables specified in the input.
"""
_iters: Dict[str, Callable] = {"product": product, "zip": zip}
_special_keys: Dict[str, Callable[[VarType, Any], Iterable[VarMatrix]]] = {
"zip": iterator_zip,
"product": iterator_product,
"arange": iterator_arange,
"chain": iterator_chain,
"append": iterator_chain,
"cycle": iterator_cycle,
"repeat": iterator_cycle,
}
if isinstance(variables, dict):
key_vars: List[List[Dict[str, YamlValue]]] = []
# Handling of specialised iterators
for key, function in _special_keys.items():
if variables.get(key):
item = variables[key]
assert item is not None
for val in function(item, parent):
key_vars.append(val)
del variables[key]
for key, value in variables.items():
key_vars.append(list(variable_matrix(value, key, iterator)))
logger.debug("key vars: %s", key_vars)
# Iterate through all possible products generating a dictionary
for i in _iters[iterator](*key_vars):
logger.debug("dicts: %s", i)
yield combine_dictionaries(i)
# Iterate through a list of values
elif isinstance(variables, list):
for item in variables:
yield from variable_matrix(item, parent, iterator)
# Stopping condition -> we have either a single value from a list
# or a value had only one item
else:
assert parent is not None
yield {parent: variables}
def process_jobs(
jobs: List[Dict],
matrix: VarMatrix,
scheduler_options: Dict[str, Any] = None,
directory: Path = None,
use_dependencies: bool = False,
) -> Iterator[Job]:
assert jobs is not None
logger.debug("Found %d jobs in file", len(jobs))
for job in jobs:
command = job.get("command")
assert command is not None
yield Job(
process_command(command, matrix),
scheduler_options,
directory,
use_dependencies,
)
def process_command(command: CommandInput, matrix: VarMatrix) -> List[Command]:
"""Generate all combinations of commands given a variable matrix.
Processes the commands to be sequences of strings.
"""
assert command is not None
if isinstance(command, str):
command_list = [Command(command, variables=variables) for variables in matrix]
elif isinstance(command, list):
command_list = [Command(command, variables=variables) for variables in matrix]
else:
if command.get("command") is not None:
cmd = command.get("command")
else:
cmd = command.get("cmd")
creates = str(command.get("creates", ""))
requires = str(command.get("requires", ""))
assert isinstance(cmd, (list, str))
command_list = [
Command(cmd, variables, creates, requires) for variables in matrix
]
return uniqueify(command_list)
def read_file(filename: PathLike = "experiment.yml") -> Dict[str, Any]:
"""Read and parse yaml file."""
logger.debug("Input file: %s", filename)
with open(filename, "r") as stream:
structure = yaml.safe_load(stream)
return structure
def process_structure(
structure: Dict[str, Any],
scheduler: str = "shell",
directory: Path = None,
use_dependencies: bool = False,
) -> Iterator[Job]:
input_variables = structure.get("variables")
if input_variables is None:
raise KeyError('The key "variables" was not found in the input file.')
assert isinstance(input_variables, Dict)
# create variable matrix
variables = list(variable_matrix(input_variables))
assert variables
# Check for scheduler options
scheduler_options: Dict[str, YamlValue] = {}
if structure.get("scheduler"):
new_options = structure.get("scheduler")
assert new_options is not None
assert isinstance(new_options, dict)
scheduler_options.update(new_options)
if structure.get(scheduler):
new_options = structure.get(scheduler)
assert new_options is not None
assert isinstance(new_options, dict)
scheduler_options.update(new_options)
assert isinstance(scheduler_options, dict)
if structure.get("name"):
name = structure.get("name")
assert isinstance(name, str)
# set the name attribute in scheduler to global name if no name defined
scheduler_options.setdefault("name", name)
jobs_dict = structure.get("jobs")
if jobs_dict is None:
input_command = structure.get("command")
if isinstance(input_command, list):
jobs_dict = [{"command": cmd} for cmd in input_command]
else:
jobs_dict = [{"command": input_command}]
yield from process_jobs(
jobs_dict, variables, scheduler_options, directory, use_dependencies
)
def run_jobs(
jobs: Iterator[Job],
scheduler: str = "shell",
directory=Path.cwd(),
dry_run: bool = False,
) -> None:
if scheduler == "shell":
run_bash_jobs(jobs, directory, dry_run=dry_run)
elif scheduler in ["pbs", "slurm"]:
run_scheduler_jobs(scheduler, jobs, directory, dry_run=dry_run)
else:
raise ValueError(
f"Scheduler '{scheduler}'was not recognised. Possible values are ['shell', 'pbs', 'slurm']"
)
def run_bash_jobs(
jobs: Iterator[Job], directory: PathLike = Path.cwd(), dry_run: bool = False
) -> None:
"""Submit commands to the bash shell.
This function runs the commands iteratively but handles errors in the
same way as with the pbs_commands function. A command will run for all
combinations of variables in the variable matrix, however if any one of
those commands fails then the next command will not run.
"""
logger.debug("Running commands in bash shell")
# iterate through command groups
for job in jobs:
# Check shell exists
if shutil.which(job.shell) is None:
raise ProcessLookupError(f"The shell '{job.shell}' was not found.")
failed = False
for command in job:
for cmd in command:
logger.info(cmd)
if dry_run:
print(f"{job.shell} -c '{cmd}'")
else:
result = subprocess.run(
[job.shell, "-c", f"{cmd}"], cwd=str(directory)
)
if result.returncode != 0:
failed = True
logger.error("Command failed: %s", command)
break
if failed:
logger.error("A command failed, not continuing further.")
return
def run_scheduler_jobs(
scheduler: str,
jobs: Iterator[Job],
directory: PathLike = Path.cwd(),
basename: str = "experi",
dry_run: bool = False,
) -> None:
"""Submit a series of commands to a batch scheduler.
This takes a list of strings which are the contents of the pbs files, writes the
files to disk and submits the job to the scheduler. Files which match the pattern of
the resulting files <basename>_<index>.pbs are deleted before writing the new files.
To ensure that commands run consecutively the aditional requirement to the run
script `-W depend=afterok:<prev_jobid>` is added. This allows for all the components
of the experiment to be conducted in a single script.
Note: Having this function submit jobs requires that the command `qsub` exists,
implying that a job scheduler is installed.
"""
submit_job = True
logger.debug("Creating commands in %s files.", scheduler)
# Check scheduler submit command exists
if scheduler == "pbs":
submit_executable = "qsub"
elif scheduler == "slurm":
submit_executable = "sbatch"
else:
raise ValueError("scheduler can only take values ['pbs', 'slurm']")
if shutil.which(submit_executable) is None:
logger.warning(
"The `%s` command is not found."
"Skipping job submission and just generating files",
submit_executable,
)
submit_job = False
# Ensure directory is a Path
directory = Path(directory)
# remove existing files
for fname in directory.glob(basename + f"*.{scheduler}"):
print("Removing {}".format(fname))
os.remove(str(fname))
# Write new files and generate commands
prev_jobids: List[str] = []
for index, job in enumerate(jobs):
# Generate scheduler file
content = create_scheduler_file(scheduler, job)
logger.debug("File contents:\n%s", content)
# Write file to disk
fname = Path(directory / "{}_{:02d}.{}".format(basename, index, scheduler))
with fname.open("w") as dst:
dst.write(content)
if submit_job or dry_run:
# Construct command
submit_cmd = [submit_executable]
if prev_jobids:
# Continue to append all previous jobs to submit_cmd so subsequent jobs die along
# with the first.
afterok = f"afterok:{':'.join(prev_jobids)}"
if scheduler == "pbs":
submit_cmd += ["-W", f"depend={afterok}"]
elif scheduler == "slurm":
submit_cmd += ["--dependency", afterok]
# actually run the command
logger.info(str(submit_cmd))
try:
if dry_run:
print(f"{submit_cmd} {fname.name}")
prev_jobids.append("dry_run")
else:
cmd_res = subprocess.check_output(
submit_cmd + [fname.name], cwd=str(directory)
)
prev_jobids.append(cmd_res.decode().strip())
except subprocess.CalledProcessError:
logger.error("Submitting job to the queue failed.")
break
def determine_scheduler(
scheduler: Optional[str], experiment_definition: Dict[str, YamlValue]
) -> str:
"""Determine the scheduler to use to run the jobs."""
# Scheduler value from command line has first priority
if scheduler is not None:
if scheduler in ["shell", "pbs", "slurm"]:
return scheduler
raise ValueError(
"Argument scheduler only supports input values of ['shell', 'pbs', 'slurm']"
)
# Next priority goes to the experiment.yml file
if experiment_definition.get("pbs"):
return "pbs"
if experiment_definition.get("slurm"):
return "slurm"
if experiment_definition.get("shell"):
return "shell"
# Final priority goes to the auto-discovery
if shutil.which("pbs") is not None:
return "pbs"
if shutil.which("slurm") is not None:
return "slurm"
# Default if nothing else is found goes to shell
return "shell"
def _set_verbosity(ctx, param, value):
if value == 1:
logging.basicConfig(level=logging.INFO)
if value == 2:
logging.basicConfig(level=logging.DEBUG)
def launch(
input_file="experiment.yml", use_dependencies=False, dry_run=False, scheduler=None
) -> None:
# This function provides an API to access experi's functionality from within
# python scripts, as an alternative to the command-line interface
# Process and run commands
input_file = Path(input_file)
structure = read_file(input_file)
scheduler = determine_scheduler(scheduler, structure)
jobs = process_structure(
structure, scheduler, Path(input_file.parent), use_dependencies
)
run_jobs(jobs, scheduler, input_file.parent, dry_run)
@click.command()
@click.version_option()
@click.option(
"-f",
"--input-file",
type=click.Path(exists=True, dir_okay=False),
default="experiment.yml",
help="""Path to a YAML file containing experiment data. Note that the experiment
will be run from the directory in which the file exists, not the directory the
script was run from.""",
)
@click.option(
"-s",
"--scheduler",
type=click.Choice(["shell", "pbs", "slurm"]),
default=None,
help="The scheduler with which to run the jobs.",
)
@click.option(
"--use-dependencies",
default=False,
is_flag=True,
help="Use the dependencies specified in the command to reduce the processing",
)
@click.option(
"--dry-run",
is_flag=True,
default=False,
help="Don't run commands or submit jobs, just show the commands that would be run.",
)
@click.option(
"-v",
"--verbose",
callback=_set_verbosity,
expose_value=False,
count=True,
help="Increase the verbosity of logging events.",
)
def main(input_file, use_dependencies, dry_run, scheduler) -> None:
launch(input_file, use_dependencies, dry_run, scheduler)
|
malramsay64/experi
|
src/experi/run.py
|
process_command
|
python
|
def process_command(command: CommandInput, matrix: VarMatrix) -> List[Command]:
assert command is not None
if isinstance(command, str):
command_list = [Command(command, variables=variables) for variables in matrix]
elif isinstance(command, list):
command_list = [Command(command, variables=variables) for variables in matrix]
else:
if command.get("command") is not None:
cmd = command.get("command")
else:
cmd = command.get("cmd")
creates = str(command.get("creates", ""))
requires = str(command.get("requires", ""))
assert isinstance(cmd, (list, str))
command_list = [
Command(cmd, variables, creates, requires) for variables in matrix
]
return uniqueify(command_list)
|
Generate all combinations of commands given a variable matrix.
Processes the commands to be sequences of strings.
|
train
|
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L263-L286
|
[
"def uniqueify(my_list: Any) -> List[Any]:\n \"\"\"Remove duplicate entries in a list retaining order.\"\"\"\n if sys.version_info >= (3, 6):\n # An implementation specific detail of py3.6 is the retention of order\n # within a dictionary. In py3.7 this becomes the documented behaviour.\n return list(dict.fromkeys(my_list))\n\n # Slower method of order preserving unique list in older python versions\n seen = set()\n return [x for x in my_list if x not in seen and not seen.add(x)]\n"
] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 Malcolm Ramsay <malramsay64@gmail.com>
#
# Distributed under terms of the MIT license.
"""Run an experiment varying a number of variables."""
import logging
import os
import shutil
import subprocess
import sys
from collections import ChainMap
from itertools import chain, product, repeat
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Union
import click
import numpy as np
import yaml
from .commands import Command, Job
from .scheduler import create_scheduler_file
logger = logging.getLogger(__name__)
logger.setLevel("DEBUG")
# Type definitions
PathLike = Union[str, Path]
YamlValue = Union[str, int, float]
CommandInput = Union[str, Dict[str, YamlValue]]
VarType = Union[YamlValue, List[YamlValue], Dict[str, YamlValue]]
VarMatrix = List[Dict[str, YamlValue]]
def combine_dictionaries(dicts: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Merge a list of dictionaries into a single dictionary.
Where there are collisions the first value in the list will be set
as this function is using ChainMap to combine the dicts.
"""
return dict(ChainMap(*dicts))
def iterator_zip(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""Apply the zip operator to a set of variables.
This uses the python zip iterator to combine multiple lists of variables such that
the nth variable in each list is aligned.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from zip iterator")
if isinstance(variables, list):
for item in variables:
yield list(variable_matrix(item, parent, "zip"))
else:
yield list(variable_matrix(variables, parent, "zip"))
def iterator_product(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""Apply the product operator to a set of variables.
This uses the python itertools.product iterator to combine multiple variables
such that all possible combinations are generated. This is the default iterator
however this is a method of manually specifying the option.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from product iterator")
if isinstance(variables, list):
raise ValueError(
f"Product only takes mappings of values, got {variables} of type {type(variables)}"
)
yield list(variable_matrix(variables, parent, "product"))
def iterator_chain(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""This successively appends each element of an array to a single list of values.
This takes a list of values and puts all the values generated for each element in
the list into a single list of values. It uses the :func:`itertools.chain` function to
achieve this. This function is particularly useful for specifying multiple types of
simulations with different parameters.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from append iterator")
if not isinstance(variables, list):
raise ValueError(
f"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}"
)
# Create a single list containing all the values
yield list(
chain.from_iterable(
variable_matrix(item, parent, "product") for item in variables
)
)
def arange(start=None, stop=None, step=None, dtype=None) -> np.ndarray:
if stop and not start:
return np.arange(stop)
return np.arange(start=start, stop=stop, step=step, dtype=dtype)
def iterator_arange(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Create a list of values using the :func:`numpy.arange` function.
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
assert parent is not None
if isinstance(variables, (int, float)):
yield [{parent: i} for i in np.arange(variables)]
elif isinstance(variables, dict):
if variables.get("stop"):
yield [{parent: i} for i in arange(**variables)]
else:
raise ValueError(f"Stop is a required keyword for the arange iterator.")
else:
raise ValueError(
f"The arange keyword only takes a dict as arguments, got {variables} of type {type(variables)}"
)
def iterator_cycle(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Cycle through a list of values a specified number of times
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
if isinstance(variables, dict):
if variables.get("times"):
times = int(variables["times"])
del variables["times"]
yield list(variable_matrix(variables, parent, "product")) * times
else:
raise ValueError(f"times is a required keyword for the repeat iterator.")
else:
raise ValueError(
f"The repeat operator only takes a dict as arguments, got {variables} of type {type(variables)}"
)
def variable_matrix(
variables: VarType, parent: str = None, iterator: str = "product"
) -> Iterable[Dict[str, YamlValue]]:
"""Process the variables into a list of the appropriate combinations.
This function performs recursive processing of the input variables, creating an
iterator which has all the combinations of variables specified in the input.
"""
_iters: Dict[str, Callable] = {"product": product, "zip": zip}
_special_keys: Dict[str, Callable[[VarType, Any], Iterable[VarMatrix]]] = {
"zip": iterator_zip,
"product": iterator_product,
"arange": iterator_arange,
"chain": iterator_chain,
"append": iterator_chain,
"cycle": iterator_cycle,
"repeat": iterator_cycle,
}
if isinstance(variables, dict):
key_vars: List[List[Dict[str, YamlValue]]] = []
# Handling of specialised iterators
for key, function in _special_keys.items():
if variables.get(key):
item = variables[key]
assert item is not None
for val in function(item, parent):
key_vars.append(val)
del variables[key]
for key, value in variables.items():
key_vars.append(list(variable_matrix(value, key, iterator)))
logger.debug("key vars: %s", key_vars)
# Iterate through all possible products generating a dictionary
for i in _iters[iterator](*key_vars):
logger.debug("dicts: %s", i)
yield combine_dictionaries(i)
# Iterate through a list of values
elif isinstance(variables, list):
for item in variables:
yield from variable_matrix(item, parent, iterator)
# Stopping condition -> we have either a single value from a list
# or a value had only one item
else:
assert parent is not None
yield {parent: variables}
def uniqueify(my_list: Any) -> List[Any]:
"""Remove duplicate entries in a list retaining order."""
if sys.version_info >= (3, 6):
# An implementation specific detail of py3.6 is the retention of order
# within a dictionary. In py3.7 this becomes the documented behaviour.
return list(dict.fromkeys(my_list))
# Slower method of order preserving unique list in older python versions
seen = set()
return [x for x in my_list if x not in seen and not seen.add(x)]
def process_jobs(
jobs: List[Dict],
matrix: VarMatrix,
scheduler_options: Dict[str, Any] = None,
directory: Path = None,
use_dependencies: bool = False,
) -> Iterator[Job]:
assert jobs is not None
logger.debug("Found %d jobs in file", len(jobs))
for job in jobs:
command = job.get("command")
assert command is not None
yield Job(
process_command(command, matrix),
scheduler_options,
directory,
use_dependencies,
)
def read_file(filename: PathLike = "experiment.yml") -> Dict[str, Any]:
"""Read and parse yaml file."""
logger.debug("Input file: %s", filename)
with open(filename, "r") as stream:
structure = yaml.safe_load(stream)
return structure
def process_structure(
structure: Dict[str, Any],
scheduler: str = "shell",
directory: Path = None,
use_dependencies: bool = False,
) -> Iterator[Job]:
input_variables = structure.get("variables")
if input_variables is None:
raise KeyError('The key "variables" was not found in the input file.')
assert isinstance(input_variables, Dict)
# create variable matrix
variables = list(variable_matrix(input_variables))
assert variables
# Check for scheduler options
scheduler_options: Dict[str, YamlValue] = {}
if structure.get("scheduler"):
new_options = structure.get("scheduler")
assert new_options is not None
assert isinstance(new_options, dict)
scheduler_options.update(new_options)
if structure.get(scheduler):
new_options = structure.get(scheduler)
assert new_options is not None
assert isinstance(new_options, dict)
scheduler_options.update(new_options)
assert isinstance(scheduler_options, dict)
if structure.get("name"):
name = structure.get("name")
assert isinstance(name, str)
# set the name attribute in scheduler to global name if no name defined
scheduler_options.setdefault("name", name)
jobs_dict = structure.get("jobs")
if jobs_dict is None:
input_command = structure.get("command")
if isinstance(input_command, list):
jobs_dict = [{"command": cmd} for cmd in input_command]
else:
jobs_dict = [{"command": input_command}]
yield from process_jobs(
jobs_dict, variables, scheduler_options, directory, use_dependencies
)
def run_jobs(
jobs: Iterator[Job],
scheduler: str = "shell",
directory=Path.cwd(),
dry_run: bool = False,
) -> None:
if scheduler == "shell":
run_bash_jobs(jobs, directory, dry_run=dry_run)
elif scheduler in ["pbs", "slurm"]:
run_scheduler_jobs(scheduler, jobs, directory, dry_run=dry_run)
else:
raise ValueError(
f"Scheduler '{scheduler}'was not recognised. Possible values are ['shell', 'pbs', 'slurm']"
)
def run_bash_jobs(
jobs: Iterator[Job], directory: PathLike = Path.cwd(), dry_run: bool = False
) -> None:
"""Submit commands to the bash shell.
This function runs the commands iteratively but handles errors in the
same way as with the pbs_commands function. A command will run for all
combinations of variables in the variable matrix, however if any one of
those commands fails then the next command will not run.
"""
logger.debug("Running commands in bash shell")
# iterate through command groups
for job in jobs:
# Check shell exists
if shutil.which(job.shell) is None:
raise ProcessLookupError(f"The shell '{job.shell}' was not found.")
failed = False
for command in job:
for cmd in command:
logger.info(cmd)
if dry_run:
print(f"{job.shell} -c '{cmd}'")
else:
result = subprocess.run(
[job.shell, "-c", f"{cmd}"], cwd=str(directory)
)
if result.returncode != 0:
failed = True
logger.error("Command failed: %s", command)
break
if failed:
logger.error("A command failed, not continuing further.")
return
def run_scheduler_jobs(
scheduler: str,
jobs: Iterator[Job],
directory: PathLike = Path.cwd(),
basename: str = "experi",
dry_run: bool = False,
) -> None:
"""Submit a series of commands to a batch scheduler.
This takes a list of strings which are the contents of the pbs files, writes the
files to disk and submits the job to the scheduler. Files which match the pattern of
the resulting files <basename>_<index>.pbs are deleted before writing the new files.
To ensure that commands run consecutively the aditional requirement to the run
script `-W depend=afterok:<prev_jobid>` is added. This allows for all the components
of the experiment to be conducted in a single script.
Note: Having this function submit jobs requires that the command `qsub` exists,
implying that a job scheduler is installed.
"""
submit_job = True
logger.debug("Creating commands in %s files.", scheduler)
# Check scheduler submit command exists
if scheduler == "pbs":
submit_executable = "qsub"
elif scheduler == "slurm":
submit_executable = "sbatch"
else:
raise ValueError("scheduler can only take values ['pbs', 'slurm']")
if shutil.which(submit_executable) is None:
logger.warning(
"The `%s` command is not found."
"Skipping job submission and just generating files",
submit_executable,
)
submit_job = False
# Ensure directory is a Path
directory = Path(directory)
# remove existing files
for fname in directory.glob(basename + f"*.{scheduler}"):
print("Removing {}".format(fname))
os.remove(str(fname))
# Write new files and generate commands
prev_jobids: List[str] = []
for index, job in enumerate(jobs):
# Generate scheduler file
content = create_scheduler_file(scheduler, job)
logger.debug("File contents:\n%s", content)
# Write file to disk
fname = Path(directory / "{}_{:02d}.{}".format(basename, index, scheduler))
with fname.open("w") as dst:
dst.write(content)
if submit_job or dry_run:
# Construct command
submit_cmd = [submit_executable]
if prev_jobids:
# Continue to append all previous jobs to submit_cmd so subsequent jobs die along
# with the first.
afterok = f"afterok:{':'.join(prev_jobids)}"
if scheduler == "pbs":
submit_cmd += ["-W", f"depend={afterok}"]
elif scheduler == "slurm":
submit_cmd += ["--dependency", afterok]
# actually run the command
logger.info(str(submit_cmd))
try:
if dry_run:
print(f"{submit_cmd} {fname.name}")
prev_jobids.append("dry_run")
else:
cmd_res = subprocess.check_output(
submit_cmd + [fname.name], cwd=str(directory)
)
prev_jobids.append(cmd_res.decode().strip())
except subprocess.CalledProcessError:
logger.error("Submitting job to the queue failed.")
break
def determine_scheduler(
scheduler: Optional[str], experiment_definition: Dict[str, YamlValue]
) -> str:
"""Determine the scheduler to use to run the jobs."""
# Scheduler value from command line has first priority
if scheduler is not None:
if scheduler in ["shell", "pbs", "slurm"]:
return scheduler
raise ValueError(
"Argument scheduler only supports input values of ['shell', 'pbs', 'slurm']"
)
# Next priority goes to the experiment.yml file
if experiment_definition.get("pbs"):
return "pbs"
if experiment_definition.get("slurm"):
return "slurm"
if experiment_definition.get("shell"):
return "shell"
# Final priority goes to the auto-discovery
if shutil.which("pbs") is not None:
return "pbs"
if shutil.which("slurm") is not None:
return "slurm"
# Default if nothing else is found goes to shell
return "shell"
def _set_verbosity(ctx, param, value):
if value == 1:
logging.basicConfig(level=logging.INFO)
if value == 2:
logging.basicConfig(level=logging.DEBUG)
def launch(
input_file="experiment.yml", use_dependencies=False, dry_run=False, scheduler=None
) -> None:
# This function provides an API to access experi's functionality from within
# python scripts, as an alternative to the command-line interface
# Process and run commands
input_file = Path(input_file)
structure = read_file(input_file)
scheduler = determine_scheduler(scheduler, structure)
jobs = process_structure(
structure, scheduler, Path(input_file.parent), use_dependencies
)
run_jobs(jobs, scheduler, input_file.parent, dry_run)
@click.command()
@click.version_option()
@click.option(
"-f",
"--input-file",
type=click.Path(exists=True, dir_okay=False),
default="experiment.yml",
help="""Path to a YAML file containing experiment data. Note that the experiment
will be run from the directory in which the file exists, not the directory the
script was run from.""",
)
@click.option(
"-s",
"--scheduler",
type=click.Choice(["shell", "pbs", "slurm"]),
default=None,
help="The scheduler with which to run the jobs.",
)
@click.option(
"--use-dependencies",
default=False,
is_flag=True,
help="Use the dependencies specified in the command to reduce the processing",
)
@click.option(
"--dry-run",
is_flag=True,
default=False,
help="Don't run commands or submit jobs, just show the commands that would be run.",
)
@click.option(
"-v",
"--verbose",
callback=_set_verbosity,
expose_value=False,
count=True,
help="Increase the verbosity of logging events.",
)
def main(input_file, use_dependencies, dry_run, scheduler) -> None:
launch(input_file, use_dependencies, dry_run, scheduler)
|
malramsay64/experi
|
src/experi/run.py
|
read_file
|
python
|
def read_file(filename: PathLike = "experiment.yml") -> Dict[str, Any]:
logger.debug("Input file: %s", filename)
with open(filename, "r") as stream:
structure = yaml.safe_load(stream)
return structure
|
Read and parse yaml file.
|
train
|
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L289-L295
| null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 Malcolm Ramsay <malramsay64@gmail.com>
#
# Distributed under terms of the MIT license.
"""Run an experiment varying a number of variables."""
import logging
import os
import shutil
import subprocess
import sys
from collections import ChainMap
from itertools import chain, product, repeat
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Union
import click
import numpy as np
import yaml
from .commands import Command, Job
from .scheduler import create_scheduler_file
logger = logging.getLogger(__name__)
logger.setLevel("DEBUG")
# Type definitions
PathLike = Union[str, Path]
YamlValue = Union[str, int, float]
CommandInput = Union[str, Dict[str, YamlValue]]
VarType = Union[YamlValue, List[YamlValue], Dict[str, YamlValue]]
VarMatrix = List[Dict[str, YamlValue]]
def combine_dictionaries(dicts: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Merge a list of dictionaries into a single dictionary.
Where there are collisions the first value in the list will be set
as this function is using ChainMap to combine the dicts.
"""
return dict(ChainMap(*dicts))
def iterator_zip(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""Apply the zip operator to a set of variables.
This uses the python zip iterator to combine multiple lists of variables such that
the nth variable in each list is aligned.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from zip iterator")
if isinstance(variables, list):
for item in variables:
yield list(variable_matrix(item, parent, "zip"))
else:
yield list(variable_matrix(variables, parent, "zip"))
def iterator_product(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""Apply the product operator to a set of variables.
This uses the python itertools.product iterator to combine multiple variables
such that all possible combinations are generated. This is the default iterator
however this is a method of manually specifying the option.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from product iterator")
if isinstance(variables, list):
raise ValueError(
f"Product only takes mappings of values, got {variables} of type {type(variables)}"
)
yield list(variable_matrix(variables, parent, "product"))
def iterator_chain(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""This successively appends each element of an array to a single list of values.
This takes a list of values and puts all the values generated for each element in
the list into a single list of values. It uses the :func:`itertools.chain` function to
achieve this. This function is particularly useful for specifying multiple types of
simulations with different parameters.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from append iterator")
if not isinstance(variables, list):
raise ValueError(
f"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}"
)
# Create a single list containing all the values
yield list(
chain.from_iterable(
variable_matrix(item, parent, "product") for item in variables
)
)
def arange(start=None, stop=None, step=None, dtype=None) -> np.ndarray:
if stop and not start:
return np.arange(stop)
return np.arange(start=start, stop=stop, step=step, dtype=dtype)
def iterator_arange(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Create a list of values using the :func:`numpy.arange` function.
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
assert parent is not None
if isinstance(variables, (int, float)):
yield [{parent: i} for i in np.arange(variables)]
elif isinstance(variables, dict):
if variables.get("stop"):
yield [{parent: i} for i in arange(**variables)]
else:
raise ValueError(f"Stop is a required keyword for the arange iterator.")
else:
raise ValueError(
f"The arange keyword only takes a dict as arguments, got {variables} of type {type(variables)}"
)
def iterator_cycle(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Cycle through a list of values a specified number of times
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
if isinstance(variables, dict):
if variables.get("times"):
times = int(variables["times"])
del variables["times"]
yield list(variable_matrix(variables, parent, "product")) * times
else:
raise ValueError(f"times is a required keyword for the repeat iterator.")
else:
raise ValueError(
f"The repeat operator only takes a dict as arguments, got {variables} of type {type(variables)}"
)
def variable_matrix(
variables: VarType, parent: str = None, iterator: str = "product"
) -> Iterable[Dict[str, YamlValue]]:
"""Process the variables into a list of the appropriate combinations.
This function performs recursive processing of the input variables, creating an
iterator which has all the combinations of variables specified in the input.
"""
_iters: Dict[str, Callable] = {"product": product, "zip": zip}
_special_keys: Dict[str, Callable[[VarType, Any], Iterable[VarMatrix]]] = {
"zip": iterator_zip,
"product": iterator_product,
"arange": iterator_arange,
"chain": iterator_chain,
"append": iterator_chain,
"cycle": iterator_cycle,
"repeat": iterator_cycle,
}
if isinstance(variables, dict):
key_vars: List[List[Dict[str, YamlValue]]] = []
# Handling of specialised iterators
for key, function in _special_keys.items():
if variables.get(key):
item = variables[key]
assert item is not None
for val in function(item, parent):
key_vars.append(val)
del variables[key]
for key, value in variables.items():
key_vars.append(list(variable_matrix(value, key, iterator)))
logger.debug("key vars: %s", key_vars)
# Iterate through all possible products generating a dictionary
for i in _iters[iterator](*key_vars):
logger.debug("dicts: %s", i)
yield combine_dictionaries(i)
# Iterate through a list of values
elif isinstance(variables, list):
for item in variables:
yield from variable_matrix(item, parent, iterator)
# Stopping condition -> we have either a single value from a list
# or a value had only one item
else:
assert parent is not None
yield {parent: variables}
def uniqueify(my_list: Any) -> List[Any]:
"""Remove duplicate entries in a list retaining order."""
if sys.version_info >= (3, 6):
# An implementation specific detail of py3.6 is the retention of order
# within a dictionary. In py3.7 this becomes the documented behaviour.
return list(dict.fromkeys(my_list))
# Slower method of order preserving unique list in older python versions
seen = set()
return [x for x in my_list if x not in seen and not seen.add(x)]
def process_jobs(
jobs: List[Dict],
matrix: VarMatrix,
scheduler_options: Dict[str, Any] = None,
directory: Path = None,
use_dependencies: bool = False,
) -> Iterator[Job]:
assert jobs is not None
logger.debug("Found %d jobs in file", len(jobs))
for job in jobs:
command = job.get("command")
assert command is not None
yield Job(
process_command(command, matrix),
scheduler_options,
directory,
use_dependencies,
)
def process_command(command: CommandInput, matrix: VarMatrix) -> List[Command]:
"""Generate all combinations of commands given a variable matrix.
Processes the commands to be sequences of strings.
"""
assert command is not None
if isinstance(command, str):
command_list = [Command(command, variables=variables) for variables in matrix]
elif isinstance(command, list):
command_list = [Command(command, variables=variables) for variables in matrix]
else:
if command.get("command") is not None:
cmd = command.get("command")
else:
cmd = command.get("cmd")
creates = str(command.get("creates", ""))
requires = str(command.get("requires", ""))
assert isinstance(cmd, (list, str))
command_list = [
Command(cmd, variables, creates, requires) for variables in matrix
]
return uniqueify(command_list)
def process_structure(
structure: Dict[str, Any],
scheduler: str = "shell",
directory: Path = None,
use_dependencies: bool = False,
) -> Iterator[Job]:
input_variables = structure.get("variables")
if input_variables is None:
raise KeyError('The key "variables" was not found in the input file.')
assert isinstance(input_variables, Dict)
# create variable matrix
variables = list(variable_matrix(input_variables))
assert variables
# Check for scheduler options
scheduler_options: Dict[str, YamlValue] = {}
if structure.get("scheduler"):
new_options = structure.get("scheduler")
assert new_options is not None
assert isinstance(new_options, dict)
scheduler_options.update(new_options)
if structure.get(scheduler):
new_options = structure.get(scheduler)
assert new_options is not None
assert isinstance(new_options, dict)
scheduler_options.update(new_options)
assert isinstance(scheduler_options, dict)
if structure.get("name"):
name = structure.get("name")
assert isinstance(name, str)
# set the name attribute in scheduler to global name if no name defined
scheduler_options.setdefault("name", name)
jobs_dict = structure.get("jobs")
if jobs_dict is None:
input_command = structure.get("command")
if isinstance(input_command, list):
jobs_dict = [{"command": cmd} for cmd in input_command]
else:
jobs_dict = [{"command": input_command}]
yield from process_jobs(
jobs_dict, variables, scheduler_options, directory, use_dependencies
)
def run_jobs(
jobs: Iterator[Job],
scheduler: str = "shell",
directory=Path.cwd(),
dry_run: bool = False,
) -> None:
if scheduler == "shell":
run_bash_jobs(jobs, directory, dry_run=dry_run)
elif scheduler in ["pbs", "slurm"]:
run_scheduler_jobs(scheduler, jobs, directory, dry_run=dry_run)
else:
raise ValueError(
f"Scheduler '{scheduler}'was not recognised. Possible values are ['shell', 'pbs', 'slurm']"
)
def run_bash_jobs(
jobs: Iterator[Job], directory: PathLike = Path.cwd(), dry_run: bool = False
) -> None:
"""Submit commands to the bash shell.
This function runs the commands iteratively but handles errors in the
same way as with the pbs_commands function. A command will run for all
combinations of variables in the variable matrix, however if any one of
those commands fails then the next command will not run.
"""
logger.debug("Running commands in bash shell")
# iterate through command groups
for job in jobs:
# Check shell exists
if shutil.which(job.shell) is None:
raise ProcessLookupError(f"The shell '{job.shell}' was not found.")
failed = False
for command in job:
for cmd in command:
logger.info(cmd)
if dry_run:
print(f"{job.shell} -c '{cmd}'")
else:
result = subprocess.run(
[job.shell, "-c", f"{cmd}"], cwd=str(directory)
)
if result.returncode != 0:
failed = True
logger.error("Command failed: %s", command)
break
if failed:
logger.error("A command failed, not continuing further.")
return
def run_scheduler_jobs(
scheduler: str,
jobs: Iterator[Job],
directory: PathLike = Path.cwd(),
basename: str = "experi",
dry_run: bool = False,
) -> None:
"""Submit a series of commands to a batch scheduler.
This takes a list of strings which are the contents of the pbs files, writes the
files to disk and submits the job to the scheduler. Files which match the pattern of
the resulting files <basename>_<index>.pbs are deleted before writing the new files.
To ensure that commands run consecutively the aditional requirement to the run
script `-W depend=afterok:<prev_jobid>` is added. This allows for all the components
of the experiment to be conducted in a single script.
Note: Having this function submit jobs requires that the command `qsub` exists,
implying that a job scheduler is installed.
"""
submit_job = True
logger.debug("Creating commands in %s files.", scheduler)
# Check scheduler submit command exists
if scheduler == "pbs":
submit_executable = "qsub"
elif scheduler == "slurm":
submit_executable = "sbatch"
else:
raise ValueError("scheduler can only take values ['pbs', 'slurm']")
if shutil.which(submit_executable) is None:
logger.warning(
"The `%s` command is not found."
"Skipping job submission and just generating files",
submit_executable,
)
submit_job = False
# Ensure directory is a Path
directory = Path(directory)
# remove existing files
for fname in directory.glob(basename + f"*.{scheduler}"):
print("Removing {}".format(fname))
os.remove(str(fname))
# Write new files and generate commands
prev_jobids: List[str] = []
for index, job in enumerate(jobs):
# Generate scheduler file
content = create_scheduler_file(scheduler, job)
logger.debug("File contents:\n%s", content)
# Write file to disk
fname = Path(directory / "{}_{:02d}.{}".format(basename, index, scheduler))
with fname.open("w") as dst:
dst.write(content)
if submit_job or dry_run:
# Construct command
submit_cmd = [submit_executable]
if prev_jobids:
# Continue to append all previous jobs to submit_cmd so subsequent jobs die along
# with the first.
afterok = f"afterok:{':'.join(prev_jobids)}"
if scheduler == "pbs":
submit_cmd += ["-W", f"depend={afterok}"]
elif scheduler == "slurm":
submit_cmd += ["--dependency", afterok]
# actually run the command
logger.info(str(submit_cmd))
try:
if dry_run:
print(f"{submit_cmd} {fname.name}")
prev_jobids.append("dry_run")
else:
cmd_res = subprocess.check_output(
submit_cmd + [fname.name], cwd=str(directory)
)
prev_jobids.append(cmd_res.decode().strip())
except subprocess.CalledProcessError:
logger.error("Submitting job to the queue failed.")
break
def determine_scheduler(
scheduler: Optional[str], experiment_definition: Dict[str, YamlValue]
) -> str:
"""Determine the scheduler to use to run the jobs."""
# Scheduler value from command line has first priority
if scheduler is not None:
if scheduler in ["shell", "pbs", "slurm"]:
return scheduler
raise ValueError(
"Argument scheduler only supports input values of ['shell', 'pbs', 'slurm']"
)
# Next priority goes to the experiment.yml file
if experiment_definition.get("pbs"):
return "pbs"
if experiment_definition.get("slurm"):
return "slurm"
if experiment_definition.get("shell"):
return "shell"
# Final priority goes to the auto-discovery
if shutil.which("pbs") is not None:
return "pbs"
if shutil.which("slurm") is not None:
return "slurm"
# Default if nothing else is found goes to shell
return "shell"
def _set_verbosity(ctx, param, value):
if value == 1:
logging.basicConfig(level=logging.INFO)
if value == 2:
logging.basicConfig(level=logging.DEBUG)
def launch(
input_file="experiment.yml", use_dependencies=False, dry_run=False, scheduler=None
) -> None:
# This function provides an API to access experi's functionality from within
# python scripts, as an alternative to the command-line interface
# Process and run commands
input_file = Path(input_file)
structure = read_file(input_file)
scheduler = determine_scheduler(scheduler, structure)
jobs = process_structure(
structure, scheduler, Path(input_file.parent), use_dependencies
)
run_jobs(jobs, scheduler, input_file.parent, dry_run)
@click.command()
@click.version_option()
@click.option(
"-f",
"--input-file",
type=click.Path(exists=True, dir_okay=False),
default="experiment.yml",
help="""Path to a YAML file containing experiment data. Note that the experiment
will be run from the directory in which the file exists, not the directory the
script was run from.""",
)
@click.option(
"-s",
"--scheduler",
type=click.Choice(["shell", "pbs", "slurm"]),
default=None,
help="The scheduler with which to run the jobs.",
)
@click.option(
"--use-dependencies",
default=False,
is_flag=True,
help="Use the dependencies specified in the command to reduce the processing",
)
@click.option(
"--dry-run",
is_flag=True,
default=False,
help="Don't run commands or submit jobs, just show the commands that would be run.",
)
@click.option(
"-v",
"--verbose",
callback=_set_verbosity,
expose_value=False,
count=True,
help="Increase the verbosity of logging events.",
)
def main(input_file, use_dependencies, dry_run, scheduler) -> None:
launch(input_file, use_dependencies, dry_run, scheduler)
|
malramsay64/experi
|
src/experi/run.py
|
run_bash_jobs
|
python
|
def run_bash_jobs(
jobs: Iterator[Job], directory: PathLike = Path.cwd(), dry_run: bool = False
) -> None:
logger.debug("Running commands in bash shell")
# iterate through command groups
for job in jobs:
# Check shell exists
if shutil.which(job.shell) is None:
raise ProcessLookupError(f"The shell '{job.shell}' was not found.")
failed = False
for command in job:
for cmd in command:
logger.info(cmd)
if dry_run:
print(f"{job.shell} -c '{cmd}'")
else:
result = subprocess.run(
[job.shell, "-c", f"{cmd}"], cwd=str(directory)
)
if result.returncode != 0:
failed = True
logger.error("Command failed: %s", command)
break
if failed:
logger.error("A command failed, not continuing further.")
return
|
Submit commands to the bash shell.
This function runs the commands iteratively but handles errors in the
same way as with the pbs_commands function. A command will run for all
combinations of variables in the variable matrix, however if any one of
those commands fails then the next command will not run.
|
train
|
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L361-L395
| null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 Malcolm Ramsay <malramsay64@gmail.com>
#
# Distributed under terms of the MIT license.
"""Run an experiment varying a number of variables."""
import logging
import os
import shutil
import subprocess
import sys
from collections import ChainMap
from itertools import chain, product, repeat
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Union
import click
import numpy as np
import yaml
from .commands import Command, Job
from .scheduler import create_scheduler_file
logger = logging.getLogger(__name__)
logger.setLevel("DEBUG")
# Type definitions
PathLike = Union[str, Path]
YamlValue = Union[str, int, float]
CommandInput = Union[str, Dict[str, YamlValue]]
VarType = Union[YamlValue, List[YamlValue], Dict[str, YamlValue]]
VarMatrix = List[Dict[str, YamlValue]]
def combine_dictionaries(dicts: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Merge a list of dictionaries into a single dictionary.
Where there are collisions the first value in the list will be set
as this function is using ChainMap to combine the dicts.
"""
return dict(ChainMap(*dicts))
def iterator_zip(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""Apply the zip operator to a set of variables.
This uses the python zip iterator to combine multiple lists of variables such that
the nth variable in each list is aligned.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from zip iterator")
if isinstance(variables, list):
for item in variables:
yield list(variable_matrix(item, parent, "zip"))
else:
yield list(variable_matrix(variables, parent, "zip"))
def iterator_product(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""Apply the product operator to a set of variables.
This uses the python itertools.product iterator to combine multiple variables
such that all possible combinations are generated. This is the default iterator
however this is a method of manually specifying the option.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from product iterator")
if isinstance(variables, list):
raise ValueError(
f"Product only takes mappings of values, got {variables} of type {type(variables)}"
)
yield list(variable_matrix(variables, parent, "product"))
def iterator_chain(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""This successively appends each element of an array to a single list of values.
This takes a list of values and puts all the values generated for each element in
the list into a single list of values. It uses the :func:`itertools.chain` function to
achieve this. This function is particularly useful for specifying multiple types of
simulations with different parameters.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from append iterator")
if not isinstance(variables, list):
raise ValueError(
f"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}"
)
# Create a single list containing all the values
yield list(
chain.from_iterable(
variable_matrix(item, parent, "product") for item in variables
)
)
def arange(start=None, stop=None, step=None, dtype=None) -> np.ndarray:
if stop and not start:
return np.arange(stop)
return np.arange(start=start, stop=stop, step=step, dtype=dtype)
def iterator_arange(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Create a list of values using the :func:`numpy.arange` function.
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
assert parent is not None
if isinstance(variables, (int, float)):
yield [{parent: i} for i in np.arange(variables)]
elif isinstance(variables, dict):
if variables.get("stop"):
yield [{parent: i} for i in arange(**variables)]
else:
raise ValueError(f"Stop is a required keyword for the arange iterator.")
else:
raise ValueError(
f"The arange keyword only takes a dict as arguments, got {variables} of type {type(variables)}"
)
def iterator_cycle(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Cycle through a list of values a specified number of times
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
if isinstance(variables, dict):
if variables.get("times"):
times = int(variables["times"])
del variables["times"]
yield list(variable_matrix(variables, parent, "product")) * times
else:
raise ValueError(f"times is a required keyword for the repeat iterator.")
else:
raise ValueError(
f"The repeat operator only takes a dict as arguments, got {variables} of type {type(variables)}"
)
def variable_matrix(
variables: VarType, parent: str = None, iterator: str = "product"
) -> Iterable[Dict[str, YamlValue]]:
"""Process the variables into a list of the appropriate combinations.
This function performs recursive processing of the input variables, creating an
iterator which has all the combinations of variables specified in the input.
"""
_iters: Dict[str, Callable] = {"product": product, "zip": zip}
_special_keys: Dict[str, Callable[[VarType, Any], Iterable[VarMatrix]]] = {
"zip": iterator_zip,
"product": iterator_product,
"arange": iterator_arange,
"chain": iterator_chain,
"append": iterator_chain,
"cycle": iterator_cycle,
"repeat": iterator_cycle,
}
if isinstance(variables, dict):
key_vars: List[List[Dict[str, YamlValue]]] = []
# Handling of specialised iterators
for key, function in _special_keys.items():
if variables.get(key):
item = variables[key]
assert item is not None
for val in function(item, parent):
key_vars.append(val)
del variables[key]
for key, value in variables.items():
key_vars.append(list(variable_matrix(value, key, iterator)))
logger.debug("key vars: %s", key_vars)
# Iterate through all possible products generating a dictionary
for i in _iters[iterator](*key_vars):
logger.debug("dicts: %s", i)
yield combine_dictionaries(i)
# Iterate through a list of values
elif isinstance(variables, list):
for item in variables:
yield from variable_matrix(item, parent, iterator)
# Stopping condition -> we have either a single value from a list
# or a value had only one item
else:
assert parent is not None
yield {parent: variables}
def uniqueify(my_list: Any) -> List[Any]:
"""Remove duplicate entries in a list retaining order."""
if sys.version_info >= (3, 6):
# An implementation specific detail of py3.6 is the retention of order
# within a dictionary. In py3.7 this becomes the documented behaviour.
return list(dict.fromkeys(my_list))
# Slower method of order preserving unique list in older python versions
seen = set()
return [x for x in my_list if x not in seen and not seen.add(x)]
def process_jobs(
jobs: List[Dict],
matrix: VarMatrix,
scheduler_options: Dict[str, Any] = None,
directory: Path = None,
use_dependencies: bool = False,
) -> Iterator[Job]:
assert jobs is not None
logger.debug("Found %d jobs in file", len(jobs))
for job in jobs:
command = job.get("command")
assert command is not None
yield Job(
process_command(command, matrix),
scheduler_options,
directory,
use_dependencies,
)
def process_command(command: CommandInput, matrix: VarMatrix) -> List[Command]:
"""Generate all combinations of commands given a variable matrix.
Processes the commands to be sequences of strings.
"""
assert command is not None
if isinstance(command, str):
command_list = [Command(command, variables=variables) for variables in matrix]
elif isinstance(command, list):
command_list = [Command(command, variables=variables) for variables in matrix]
else:
if command.get("command") is not None:
cmd = command.get("command")
else:
cmd = command.get("cmd")
creates = str(command.get("creates", ""))
requires = str(command.get("requires", ""))
assert isinstance(cmd, (list, str))
command_list = [
Command(cmd, variables, creates, requires) for variables in matrix
]
return uniqueify(command_list)
def read_file(filename: PathLike = "experiment.yml") -> Dict[str, Any]:
"""Read and parse yaml file."""
logger.debug("Input file: %s", filename)
with open(filename, "r") as stream:
structure = yaml.safe_load(stream)
return structure
def process_structure(
structure: Dict[str, Any],
scheduler: str = "shell",
directory: Path = None,
use_dependencies: bool = False,
) -> Iterator[Job]:
input_variables = structure.get("variables")
if input_variables is None:
raise KeyError('The key "variables" was not found in the input file.')
assert isinstance(input_variables, Dict)
# create variable matrix
variables = list(variable_matrix(input_variables))
assert variables
# Check for scheduler options
scheduler_options: Dict[str, YamlValue] = {}
if structure.get("scheduler"):
new_options = structure.get("scheduler")
assert new_options is not None
assert isinstance(new_options, dict)
scheduler_options.update(new_options)
if structure.get(scheduler):
new_options = structure.get(scheduler)
assert new_options is not None
assert isinstance(new_options, dict)
scheduler_options.update(new_options)
assert isinstance(scheduler_options, dict)
if structure.get("name"):
name = structure.get("name")
assert isinstance(name, str)
# set the name attribute in scheduler to global name if no name defined
scheduler_options.setdefault("name", name)
jobs_dict = structure.get("jobs")
if jobs_dict is None:
input_command = structure.get("command")
if isinstance(input_command, list):
jobs_dict = [{"command": cmd} for cmd in input_command]
else:
jobs_dict = [{"command": input_command}]
yield from process_jobs(
jobs_dict, variables, scheduler_options, directory, use_dependencies
)
def run_jobs(
jobs: Iterator[Job],
scheduler: str = "shell",
directory=Path.cwd(),
dry_run: bool = False,
) -> None:
if scheduler == "shell":
run_bash_jobs(jobs, directory, dry_run=dry_run)
elif scheduler in ["pbs", "slurm"]:
run_scheduler_jobs(scheduler, jobs, directory, dry_run=dry_run)
else:
raise ValueError(
f"Scheduler '{scheduler}'was not recognised. Possible values are ['shell', 'pbs', 'slurm']"
)
def run_scheduler_jobs(
scheduler: str,
jobs: Iterator[Job],
directory: PathLike = Path.cwd(),
basename: str = "experi",
dry_run: bool = False,
) -> None:
"""Submit a series of commands to a batch scheduler.
This takes a list of strings which are the contents of the pbs files, writes the
files to disk and submits the job to the scheduler. Files which match the pattern of
the resulting files <basename>_<index>.pbs are deleted before writing the new files.
To ensure that commands run consecutively the aditional requirement to the run
script `-W depend=afterok:<prev_jobid>` is added. This allows for all the components
of the experiment to be conducted in a single script.
Note: Having this function submit jobs requires that the command `qsub` exists,
implying that a job scheduler is installed.
"""
submit_job = True
logger.debug("Creating commands in %s files.", scheduler)
# Check scheduler submit command exists
if scheduler == "pbs":
submit_executable = "qsub"
elif scheduler == "slurm":
submit_executable = "sbatch"
else:
raise ValueError("scheduler can only take values ['pbs', 'slurm']")
if shutil.which(submit_executable) is None:
logger.warning(
"The `%s` command is not found."
"Skipping job submission and just generating files",
submit_executable,
)
submit_job = False
# Ensure directory is a Path
directory = Path(directory)
# remove existing files
for fname in directory.glob(basename + f"*.{scheduler}"):
print("Removing {}".format(fname))
os.remove(str(fname))
# Write new files and generate commands
prev_jobids: List[str] = []
for index, job in enumerate(jobs):
# Generate scheduler file
content = create_scheduler_file(scheduler, job)
logger.debug("File contents:\n%s", content)
# Write file to disk
fname = Path(directory / "{}_{:02d}.{}".format(basename, index, scheduler))
with fname.open("w") as dst:
dst.write(content)
if submit_job or dry_run:
# Construct command
submit_cmd = [submit_executable]
if prev_jobids:
# Continue to append all previous jobs to submit_cmd so subsequent jobs die along
# with the first.
afterok = f"afterok:{':'.join(prev_jobids)}"
if scheduler == "pbs":
submit_cmd += ["-W", f"depend={afterok}"]
elif scheduler == "slurm":
submit_cmd += ["--dependency", afterok]
# actually run the command
logger.info(str(submit_cmd))
try:
if dry_run:
print(f"{submit_cmd} {fname.name}")
prev_jobids.append("dry_run")
else:
cmd_res = subprocess.check_output(
submit_cmd + [fname.name], cwd=str(directory)
)
prev_jobids.append(cmd_res.decode().strip())
except subprocess.CalledProcessError:
logger.error("Submitting job to the queue failed.")
break
def determine_scheduler(
scheduler: Optional[str], experiment_definition: Dict[str, YamlValue]
) -> str:
"""Determine the scheduler to use to run the jobs."""
# Scheduler value from command line has first priority
if scheduler is not None:
if scheduler in ["shell", "pbs", "slurm"]:
return scheduler
raise ValueError(
"Argument scheduler only supports input values of ['shell', 'pbs', 'slurm']"
)
# Next priority goes to the experiment.yml file
if experiment_definition.get("pbs"):
return "pbs"
if experiment_definition.get("slurm"):
return "slurm"
if experiment_definition.get("shell"):
return "shell"
# Final priority goes to the auto-discovery
if shutil.which("pbs") is not None:
return "pbs"
if shutil.which("slurm") is not None:
return "slurm"
# Default if nothing else is found goes to shell
return "shell"
def _set_verbosity(ctx, param, value):
if value == 1:
logging.basicConfig(level=logging.INFO)
if value == 2:
logging.basicConfig(level=logging.DEBUG)
def launch(
input_file="experiment.yml", use_dependencies=False, dry_run=False, scheduler=None
) -> None:
# This function provides an API to access experi's functionality from within
# python scripts, as an alternative to the command-line interface
# Process and run commands
input_file = Path(input_file)
structure = read_file(input_file)
scheduler = determine_scheduler(scheduler, structure)
jobs = process_structure(
structure, scheduler, Path(input_file.parent), use_dependencies
)
run_jobs(jobs, scheduler, input_file.parent, dry_run)
@click.command()
@click.version_option()
@click.option(
"-f",
"--input-file",
type=click.Path(exists=True, dir_okay=False),
default="experiment.yml",
help="""Path to a YAML file containing experiment data. Note that the experiment
will be run from the directory in which the file exists, not the directory the
script was run from.""",
)
@click.option(
"-s",
"--scheduler",
type=click.Choice(["shell", "pbs", "slurm"]),
default=None,
help="The scheduler with which to run the jobs.",
)
@click.option(
"--use-dependencies",
default=False,
is_flag=True,
help="Use the dependencies specified in the command to reduce the processing",
)
@click.option(
"--dry-run",
is_flag=True,
default=False,
help="Don't run commands or submit jobs, just show the commands that would be run.",
)
@click.option(
"-v",
"--verbose",
callback=_set_verbosity,
expose_value=False,
count=True,
help="Increase the verbosity of logging events.",
)
def main(input_file, use_dependencies, dry_run, scheduler) -> None:
launch(input_file, use_dependencies, dry_run, scheduler)
|
malramsay64/experi
|
src/experi/run.py
|
run_scheduler_jobs
|
python
|
def run_scheduler_jobs(
scheduler: str,
jobs: Iterator[Job],
directory: PathLike = Path.cwd(),
basename: str = "experi",
dry_run: bool = False,
) -> None:
submit_job = True
logger.debug("Creating commands in %s files.", scheduler)
# Check scheduler submit command exists
if scheduler == "pbs":
submit_executable = "qsub"
elif scheduler == "slurm":
submit_executable = "sbatch"
else:
raise ValueError("scheduler can only take values ['pbs', 'slurm']")
if shutil.which(submit_executable) is None:
logger.warning(
"The `%s` command is not found."
"Skipping job submission and just generating files",
submit_executable,
)
submit_job = False
# Ensure directory is a Path
directory = Path(directory)
# remove existing files
for fname in directory.glob(basename + f"*.{scheduler}"):
print("Removing {}".format(fname))
os.remove(str(fname))
# Write new files and generate commands
prev_jobids: List[str] = []
for index, job in enumerate(jobs):
# Generate scheduler file
content = create_scheduler_file(scheduler, job)
logger.debug("File contents:\n%s", content)
# Write file to disk
fname = Path(directory / "{}_{:02d}.{}".format(basename, index, scheduler))
with fname.open("w") as dst:
dst.write(content)
if submit_job or dry_run:
# Construct command
submit_cmd = [submit_executable]
if prev_jobids:
# Continue to append all previous jobs to submit_cmd so subsequent jobs die along
# with the first.
afterok = f"afterok:{':'.join(prev_jobids)}"
if scheduler == "pbs":
submit_cmd += ["-W", f"depend={afterok}"]
elif scheduler == "slurm":
submit_cmd += ["--dependency", afterok]
# actually run the command
logger.info(str(submit_cmd))
try:
if dry_run:
print(f"{submit_cmd} {fname.name}")
prev_jobids.append("dry_run")
else:
cmd_res = subprocess.check_output(
submit_cmd + [fname.name], cwd=str(directory)
)
prev_jobids.append(cmd_res.decode().strip())
except subprocess.CalledProcessError:
logger.error("Submitting job to the queue failed.")
break
|
Submit a series of commands to a batch scheduler.
This takes a list of strings which are the contents of the pbs files, writes the
files to disk and submits the job to the scheduler. Files which match the pattern of
the resulting files <basename>_<index>.pbs are deleted before writing the new files.
To ensure that commands run consecutively the aditional requirement to the run
script `-W depend=afterok:<prev_jobid>` is added. This allows for all the components
of the experiment to be conducted in a single script.
Note: Having this function submit jobs requires that the command `qsub` exists,
implying that a job scheduler is installed.
|
train
|
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L398-L483
|
[
"def create_scheduler_file(scheduler: str, job: Job) -> str:\n \"\"\"Substitute values into a template scheduler file.\"\"\"\n logger.debug(\"Create Scheduler File Function\")\n\n if job.scheduler_options is None:\n scheduler_options: Dict[str, Any] = {}\n else:\n scheduler_options = deepcopy(job.scheduler_options)\n try:\n setup_string = parse_setup(scheduler_options[\"setup\"])\n del scheduler_options[\"setup\"]\n except KeyError:\n setup_string = \"\"\n # Create header\n header_string = create_header_string(scheduler, **scheduler_options)\n header_string += get_array_string(scheduler, len(job))\n\n if scheduler.upper() == \"SLURM\":\n workdir = r\"$SLURM_SUBMIT_DIR\"\n array_index = r\"$SLURM_ARRAY_TASK_ID\"\n elif scheduler.upper() == \"PBS\":\n workdir = r\"$PBS_O_WORKDIR\"\n array_index = r\"$PBS_ARRAY_INDEX\"\n\n return header_string + SCHEDULER_TEMPLATE.format(\n workdir=workdir,\n command_list=job.as_bash_array(),\n setup=setup_string,\n array_index=array_index,\n )\n"
] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 Malcolm Ramsay <malramsay64@gmail.com>
#
# Distributed under terms of the MIT license.
"""Run an experiment varying a number of variables."""
import logging
import os
import shutil
import subprocess
import sys
from collections import ChainMap
from itertools import chain, product, repeat
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Union
import click
import numpy as np
import yaml
from .commands import Command, Job
from .scheduler import create_scheduler_file
logger = logging.getLogger(__name__)
logger.setLevel("DEBUG")
# Type definitions
PathLike = Union[str, Path]
YamlValue = Union[str, int, float]
CommandInput = Union[str, Dict[str, YamlValue]]
VarType = Union[YamlValue, List[YamlValue], Dict[str, YamlValue]]
VarMatrix = List[Dict[str, YamlValue]]
def combine_dictionaries(dicts: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Merge a list of dictionaries into a single dictionary.
Where there are collisions the first value in the list will be set
as this function is using ChainMap to combine the dicts.
"""
return dict(ChainMap(*dicts))
def iterator_zip(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""Apply the zip operator to a set of variables.
This uses the python zip iterator to combine multiple lists of variables such that
the nth variable in each list is aligned.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from zip iterator")
if isinstance(variables, list):
for item in variables:
yield list(variable_matrix(item, parent, "zip"))
else:
yield list(variable_matrix(variables, parent, "zip"))
def iterator_product(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""Apply the product operator to a set of variables.
This uses the python itertools.product iterator to combine multiple variables
such that all possible combinations are generated. This is the default iterator
however this is a method of manually specifying the option.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from product iterator")
if isinstance(variables, list):
raise ValueError(
f"Product only takes mappings of values, got {variables} of type {type(variables)}"
)
yield list(variable_matrix(variables, parent, "product"))
def iterator_chain(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""This successively appends each element of an array to a single list of values.
This takes a list of values and puts all the values generated for each element in
the list into a single list of values. It uses the :func:`itertools.chain` function to
achieve this. This function is particularly useful for specifying multiple types of
simulations with different parameters.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from append iterator")
if not isinstance(variables, list):
raise ValueError(
f"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}"
)
# Create a single list containing all the values
yield list(
chain.from_iterable(
variable_matrix(item, parent, "product") for item in variables
)
)
def arange(start=None, stop=None, step=None, dtype=None) -> np.ndarray:
if stop and not start:
return np.arange(stop)
return np.arange(start=start, stop=stop, step=step, dtype=dtype)
def iterator_arange(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Create a list of values using the :func:`numpy.arange` function.
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
assert parent is not None
if isinstance(variables, (int, float)):
yield [{parent: i} for i in np.arange(variables)]
elif isinstance(variables, dict):
if variables.get("stop"):
yield [{parent: i} for i in arange(**variables)]
else:
raise ValueError(f"Stop is a required keyword for the arange iterator.")
else:
raise ValueError(
f"The arange keyword only takes a dict as arguments, got {variables} of type {type(variables)}"
)
def iterator_cycle(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Cycle through a list of values a specified number of times
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
if isinstance(variables, dict):
if variables.get("times"):
times = int(variables["times"])
del variables["times"]
yield list(variable_matrix(variables, parent, "product")) * times
else:
raise ValueError(f"times is a required keyword for the repeat iterator.")
else:
raise ValueError(
f"The repeat operator only takes a dict as arguments, got {variables} of type {type(variables)}"
)
def variable_matrix(
variables: VarType, parent: str = None, iterator: str = "product"
) -> Iterable[Dict[str, YamlValue]]:
"""Process the variables into a list of the appropriate combinations.
This function performs recursive processing of the input variables, creating an
iterator which has all the combinations of variables specified in the input.
"""
_iters: Dict[str, Callable] = {"product": product, "zip": zip}
_special_keys: Dict[str, Callable[[VarType, Any], Iterable[VarMatrix]]] = {
"zip": iterator_zip,
"product": iterator_product,
"arange": iterator_arange,
"chain": iterator_chain,
"append": iterator_chain,
"cycle": iterator_cycle,
"repeat": iterator_cycle,
}
if isinstance(variables, dict):
key_vars: List[List[Dict[str, YamlValue]]] = []
# Handling of specialised iterators
for key, function in _special_keys.items():
if variables.get(key):
item = variables[key]
assert item is not None
for val in function(item, parent):
key_vars.append(val)
del variables[key]
for key, value in variables.items():
key_vars.append(list(variable_matrix(value, key, iterator)))
logger.debug("key vars: %s", key_vars)
# Iterate through all possible products generating a dictionary
for i in _iters[iterator](*key_vars):
logger.debug("dicts: %s", i)
yield combine_dictionaries(i)
# Iterate through a list of values
elif isinstance(variables, list):
for item in variables:
yield from variable_matrix(item, parent, iterator)
# Stopping condition -> we have either a single value from a list
# or a value had only one item
else:
assert parent is not None
yield {parent: variables}
def uniqueify(my_list: Any) -> List[Any]:
"""Remove duplicate entries in a list retaining order."""
if sys.version_info >= (3, 6):
# An implementation specific detail of py3.6 is the retention of order
# within a dictionary. In py3.7 this becomes the documented behaviour.
return list(dict.fromkeys(my_list))
# Slower method of order preserving unique list in older python versions
seen = set()
return [x for x in my_list if x not in seen and not seen.add(x)]
def process_jobs(
jobs: List[Dict],
matrix: VarMatrix,
scheduler_options: Dict[str, Any] = None,
directory: Path = None,
use_dependencies: bool = False,
) -> Iterator[Job]:
assert jobs is not None
logger.debug("Found %d jobs in file", len(jobs))
for job in jobs:
command = job.get("command")
assert command is not None
yield Job(
process_command(command, matrix),
scheduler_options,
directory,
use_dependencies,
)
def process_command(command: CommandInput, matrix: VarMatrix) -> List[Command]:
"""Generate all combinations of commands given a variable matrix.
Processes the commands to be sequences of strings.
"""
assert command is not None
if isinstance(command, str):
command_list = [Command(command, variables=variables) for variables in matrix]
elif isinstance(command, list):
command_list = [Command(command, variables=variables) for variables in matrix]
else:
if command.get("command") is not None:
cmd = command.get("command")
else:
cmd = command.get("cmd")
creates = str(command.get("creates", ""))
requires = str(command.get("requires", ""))
assert isinstance(cmd, (list, str))
command_list = [
Command(cmd, variables, creates, requires) for variables in matrix
]
return uniqueify(command_list)
def read_file(filename: PathLike = "experiment.yml") -> Dict[str, Any]:
"""Read and parse yaml file."""
logger.debug("Input file: %s", filename)
with open(filename, "r") as stream:
structure = yaml.safe_load(stream)
return structure
def process_structure(
structure: Dict[str, Any],
scheduler: str = "shell",
directory: Path = None,
use_dependencies: bool = False,
) -> Iterator[Job]:
input_variables = structure.get("variables")
if input_variables is None:
raise KeyError('The key "variables" was not found in the input file.')
assert isinstance(input_variables, Dict)
# create variable matrix
variables = list(variable_matrix(input_variables))
assert variables
# Check for scheduler options
scheduler_options: Dict[str, YamlValue] = {}
if structure.get("scheduler"):
new_options = structure.get("scheduler")
assert new_options is not None
assert isinstance(new_options, dict)
scheduler_options.update(new_options)
if structure.get(scheduler):
new_options = structure.get(scheduler)
assert new_options is not None
assert isinstance(new_options, dict)
scheduler_options.update(new_options)
assert isinstance(scheduler_options, dict)
if structure.get("name"):
name = structure.get("name")
assert isinstance(name, str)
# set the name attribute in scheduler to global name if no name defined
scheduler_options.setdefault("name", name)
jobs_dict = structure.get("jobs")
if jobs_dict is None:
input_command = structure.get("command")
if isinstance(input_command, list):
jobs_dict = [{"command": cmd} for cmd in input_command]
else:
jobs_dict = [{"command": input_command}]
yield from process_jobs(
jobs_dict, variables, scheduler_options, directory, use_dependencies
)
def run_jobs(
jobs: Iterator[Job],
scheduler: str = "shell",
directory=Path.cwd(),
dry_run: bool = False,
) -> None:
if scheduler == "shell":
run_bash_jobs(jobs, directory, dry_run=dry_run)
elif scheduler in ["pbs", "slurm"]:
run_scheduler_jobs(scheduler, jobs, directory, dry_run=dry_run)
else:
raise ValueError(
f"Scheduler '{scheduler}'was not recognised. Possible values are ['shell', 'pbs', 'slurm']"
)
def run_bash_jobs(
jobs: Iterator[Job], directory: PathLike = Path.cwd(), dry_run: bool = False
) -> None:
"""Submit commands to the bash shell.
This function runs the commands iteratively but handles errors in the
same way as with the pbs_commands function. A command will run for all
combinations of variables in the variable matrix, however if any one of
those commands fails then the next command will not run.
"""
logger.debug("Running commands in bash shell")
# iterate through command groups
for job in jobs:
# Check shell exists
if shutil.which(job.shell) is None:
raise ProcessLookupError(f"The shell '{job.shell}' was not found.")
failed = False
for command in job:
for cmd in command:
logger.info(cmd)
if dry_run:
print(f"{job.shell} -c '{cmd}'")
else:
result = subprocess.run(
[job.shell, "-c", f"{cmd}"], cwd=str(directory)
)
if result.returncode != 0:
failed = True
logger.error("Command failed: %s", command)
break
if failed:
logger.error("A command failed, not continuing further.")
return
def determine_scheduler(
scheduler: Optional[str], experiment_definition: Dict[str, YamlValue]
) -> str:
"""Determine the scheduler to use to run the jobs."""
# Scheduler value from command line has first priority
if scheduler is not None:
if scheduler in ["shell", "pbs", "slurm"]:
return scheduler
raise ValueError(
"Argument scheduler only supports input values of ['shell', 'pbs', 'slurm']"
)
# Next priority goes to the experiment.yml file
if experiment_definition.get("pbs"):
return "pbs"
if experiment_definition.get("slurm"):
return "slurm"
if experiment_definition.get("shell"):
return "shell"
# Final priority goes to the auto-discovery
if shutil.which("pbs") is not None:
return "pbs"
if shutil.which("slurm") is not None:
return "slurm"
# Default if nothing else is found goes to shell
return "shell"
def _set_verbosity(ctx, param, value):
if value == 1:
logging.basicConfig(level=logging.INFO)
if value == 2:
logging.basicConfig(level=logging.DEBUG)
def launch(
input_file="experiment.yml", use_dependencies=False, dry_run=False, scheduler=None
) -> None:
# This function provides an API to access experi's functionality from within
# python scripts, as an alternative to the command-line interface
# Process and run commands
input_file = Path(input_file)
structure = read_file(input_file)
scheduler = determine_scheduler(scheduler, structure)
jobs = process_structure(
structure, scheduler, Path(input_file.parent), use_dependencies
)
run_jobs(jobs, scheduler, input_file.parent, dry_run)
@click.command()
@click.version_option()
@click.option(
"-f",
"--input-file",
type=click.Path(exists=True, dir_okay=False),
default="experiment.yml",
help="""Path to a YAML file containing experiment data. Note that the experiment
will be run from the directory in which the file exists, not the directory the
script was run from.""",
)
@click.option(
"-s",
"--scheduler",
type=click.Choice(["shell", "pbs", "slurm"]),
default=None,
help="The scheduler with which to run the jobs.",
)
@click.option(
"--use-dependencies",
default=False,
is_flag=True,
help="Use the dependencies specified in the command to reduce the processing",
)
@click.option(
"--dry-run",
is_flag=True,
default=False,
help="Don't run commands or submit jobs, just show the commands that would be run.",
)
@click.option(
"-v",
"--verbose",
callback=_set_verbosity,
expose_value=False,
count=True,
help="Increase the verbosity of logging events.",
)
def main(input_file, use_dependencies, dry_run, scheduler) -> None:
launch(input_file, use_dependencies, dry_run, scheduler)
|
malramsay64/experi
|
src/experi/run.py
|
determine_scheduler
|
python
|
def determine_scheduler(
scheduler: Optional[str], experiment_definition: Dict[str, YamlValue]
) -> str:
# Scheduler value from command line has first priority
if scheduler is not None:
if scheduler in ["shell", "pbs", "slurm"]:
return scheduler
raise ValueError(
"Argument scheduler only supports input values of ['shell', 'pbs', 'slurm']"
)
# Next priority goes to the experiment.yml file
if experiment_definition.get("pbs"):
return "pbs"
if experiment_definition.get("slurm"):
return "slurm"
if experiment_definition.get("shell"):
return "shell"
# Final priority goes to the auto-discovery
if shutil.which("pbs") is not None:
return "pbs"
if shutil.which("slurm") is not None:
return "slurm"
# Default if nothing else is found goes to shell
return "shell"
|
Determine the scheduler to use to run the jobs.
|
train
|
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L486-L514
| null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 Malcolm Ramsay <malramsay64@gmail.com>
#
# Distributed under terms of the MIT license.
"""Run an experiment varying a number of variables."""
import logging
import os
import shutil
import subprocess
import sys
from collections import ChainMap
from itertools import chain, product, repeat
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Union
import click
import numpy as np
import yaml
from .commands import Command, Job
from .scheduler import create_scheduler_file
logger = logging.getLogger(__name__)
logger.setLevel("DEBUG")
# Type definitions
PathLike = Union[str, Path]
YamlValue = Union[str, int, float]
CommandInput = Union[str, Dict[str, YamlValue]]
VarType = Union[YamlValue, List[YamlValue], Dict[str, YamlValue]]
VarMatrix = List[Dict[str, YamlValue]]
def combine_dictionaries(dicts: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Merge a list of dictionaries into a single dictionary.
Where there are collisions the first value in the list will be set
as this function is using ChainMap to combine the dicts.
"""
return dict(ChainMap(*dicts))
def iterator_zip(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""Apply the zip operator to a set of variables.
This uses the python zip iterator to combine multiple lists of variables such that
the nth variable in each list is aligned.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from zip iterator")
if isinstance(variables, list):
for item in variables:
yield list(variable_matrix(item, parent, "zip"))
else:
yield list(variable_matrix(variables, parent, "zip"))
def iterator_product(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""Apply the product operator to a set of variables.
This uses the python itertools.product iterator to combine multiple variables
such that all possible combinations are generated. This is the default iterator
however this is a method of manually specifying the option.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from product iterator")
if isinstance(variables, list):
raise ValueError(
f"Product only takes mappings of values, got {variables} of type {type(variables)}"
)
yield list(variable_matrix(variables, parent, "product"))
def iterator_chain(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
"""This successively appends each element of an array to a single list of values.
This takes a list of values and puts all the values generated for each element in
the list into a single list of values. It uses the :func:`itertools.chain` function to
achieve this. This function is particularly useful for specifying multiple types of
simulations with different parameters.
Args:
variables: The variables object
parent: Unused
"""
logger.debug("Yielding from append iterator")
if not isinstance(variables, list):
raise ValueError(
f"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}"
)
# Create a single list containing all the values
yield list(
chain.from_iterable(
variable_matrix(item, parent, "product") for item in variables
)
)
def arange(start=None, stop=None, step=None, dtype=None) -> np.ndarray:
if stop and not start:
return np.arange(stop)
return np.arange(start=start, stop=stop, step=step, dtype=dtype)
def iterator_arange(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Create a list of values using the :func:`numpy.arange` function.
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
assert parent is not None
if isinstance(variables, (int, float)):
yield [{parent: i} for i in np.arange(variables)]
elif isinstance(variables, dict):
if variables.get("stop"):
yield [{parent: i} for i in arange(**variables)]
else:
raise ValueError(f"Stop is a required keyword for the arange iterator.")
else:
raise ValueError(
f"The arange keyword only takes a dict as arguments, got {variables} of type {type(variables)}"
)
def iterator_cycle(variables: VarType, parent: str) -> Iterable[VarMatrix]:
"""Cycle through a list of values a specified number of times
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
"""
if isinstance(variables, dict):
if variables.get("times"):
times = int(variables["times"])
del variables["times"]
yield list(variable_matrix(variables, parent, "product")) * times
else:
raise ValueError(f"times is a required keyword for the repeat iterator.")
else:
raise ValueError(
f"The repeat operator only takes a dict as arguments, got {variables} of type {type(variables)}"
)
def variable_matrix(
variables: VarType, parent: str = None, iterator: str = "product"
) -> Iterable[Dict[str, YamlValue]]:
"""Process the variables into a list of the appropriate combinations.
This function performs recursive processing of the input variables, creating an
iterator which has all the combinations of variables specified in the input.
"""
_iters: Dict[str, Callable] = {"product": product, "zip": zip}
_special_keys: Dict[str, Callable[[VarType, Any], Iterable[VarMatrix]]] = {
"zip": iterator_zip,
"product": iterator_product,
"arange": iterator_arange,
"chain": iterator_chain,
"append": iterator_chain,
"cycle": iterator_cycle,
"repeat": iterator_cycle,
}
if isinstance(variables, dict):
key_vars: List[List[Dict[str, YamlValue]]] = []
# Handling of specialised iterators
for key, function in _special_keys.items():
if variables.get(key):
item = variables[key]
assert item is not None
for val in function(item, parent):
key_vars.append(val)
del variables[key]
for key, value in variables.items():
key_vars.append(list(variable_matrix(value, key, iterator)))
logger.debug("key vars: %s", key_vars)
# Iterate through all possible products generating a dictionary
for i in _iters[iterator](*key_vars):
logger.debug("dicts: %s", i)
yield combine_dictionaries(i)
# Iterate through a list of values
elif isinstance(variables, list):
for item in variables:
yield from variable_matrix(item, parent, iterator)
# Stopping condition -> we have either a single value from a list
# or a value had only one item
else:
assert parent is not None
yield {parent: variables}
def uniqueify(my_list: Any) -> List[Any]:
"""Remove duplicate entries in a list retaining order."""
if sys.version_info >= (3, 6):
# An implementation specific detail of py3.6 is the retention of order
# within a dictionary. In py3.7 this becomes the documented behaviour.
return list(dict.fromkeys(my_list))
# Slower method of order preserving unique list in older python versions
seen = set()
return [x for x in my_list if x not in seen and not seen.add(x)]
def process_jobs(
jobs: List[Dict],
matrix: VarMatrix,
scheduler_options: Dict[str, Any] = None,
directory: Path = None,
use_dependencies: bool = False,
) -> Iterator[Job]:
assert jobs is not None
logger.debug("Found %d jobs in file", len(jobs))
for job in jobs:
command = job.get("command")
assert command is not None
yield Job(
process_command(command, matrix),
scheduler_options,
directory,
use_dependencies,
)
def process_command(command: CommandInput, matrix: VarMatrix) -> List[Command]:
"""Generate all combinations of commands given a variable matrix.
Processes the commands to be sequences of strings.
"""
assert command is not None
if isinstance(command, str):
command_list = [Command(command, variables=variables) for variables in matrix]
elif isinstance(command, list):
command_list = [Command(command, variables=variables) for variables in matrix]
else:
if command.get("command") is not None:
cmd = command.get("command")
else:
cmd = command.get("cmd")
creates = str(command.get("creates", ""))
requires = str(command.get("requires", ""))
assert isinstance(cmd, (list, str))
command_list = [
Command(cmd, variables, creates, requires) for variables in matrix
]
return uniqueify(command_list)
def read_file(filename: PathLike = "experiment.yml") -> Dict[str, Any]:
"""Read and parse yaml file."""
logger.debug("Input file: %s", filename)
with open(filename, "r") as stream:
structure = yaml.safe_load(stream)
return structure
def process_structure(
structure: Dict[str, Any],
scheduler: str = "shell",
directory: Path = None,
use_dependencies: bool = False,
) -> Iterator[Job]:
input_variables = structure.get("variables")
if input_variables is None:
raise KeyError('The key "variables" was not found in the input file.')
assert isinstance(input_variables, Dict)
# create variable matrix
variables = list(variable_matrix(input_variables))
assert variables
# Check for scheduler options
scheduler_options: Dict[str, YamlValue] = {}
if structure.get("scheduler"):
new_options = structure.get("scheduler")
assert new_options is not None
assert isinstance(new_options, dict)
scheduler_options.update(new_options)
if structure.get(scheduler):
new_options = structure.get(scheduler)
assert new_options is not None
assert isinstance(new_options, dict)
scheduler_options.update(new_options)
assert isinstance(scheduler_options, dict)
if structure.get("name"):
name = structure.get("name")
assert isinstance(name, str)
# set the name attribute in scheduler to global name if no name defined
scheduler_options.setdefault("name", name)
jobs_dict = structure.get("jobs")
if jobs_dict is None:
input_command = structure.get("command")
if isinstance(input_command, list):
jobs_dict = [{"command": cmd} for cmd in input_command]
else:
jobs_dict = [{"command": input_command}]
yield from process_jobs(
jobs_dict, variables, scheduler_options, directory, use_dependencies
)
def run_jobs(
jobs: Iterator[Job],
scheduler: str = "shell",
directory=Path.cwd(),
dry_run: bool = False,
) -> None:
if scheduler == "shell":
run_bash_jobs(jobs, directory, dry_run=dry_run)
elif scheduler in ["pbs", "slurm"]:
run_scheduler_jobs(scheduler, jobs, directory, dry_run=dry_run)
else:
raise ValueError(
f"Scheduler '{scheduler}'was not recognised. Possible values are ['shell', 'pbs', 'slurm']"
)
def run_bash_jobs(
jobs: Iterator[Job], directory: PathLike = Path.cwd(), dry_run: bool = False
) -> None:
"""Submit commands to the bash shell.
This function runs the commands iteratively but handles errors in the
same way as with the pbs_commands function. A command will run for all
combinations of variables in the variable matrix, however if any one of
those commands fails then the next command will not run.
"""
logger.debug("Running commands in bash shell")
# iterate through command groups
for job in jobs:
# Check shell exists
if shutil.which(job.shell) is None:
raise ProcessLookupError(f"The shell '{job.shell}' was not found.")
failed = False
for command in job:
for cmd in command:
logger.info(cmd)
if dry_run:
print(f"{job.shell} -c '{cmd}'")
else:
result = subprocess.run(
[job.shell, "-c", f"{cmd}"], cwd=str(directory)
)
if result.returncode != 0:
failed = True
logger.error("Command failed: %s", command)
break
if failed:
logger.error("A command failed, not continuing further.")
return
def run_scheduler_jobs(
scheduler: str,
jobs: Iterator[Job],
directory: PathLike = Path.cwd(),
basename: str = "experi",
dry_run: bool = False,
) -> None:
"""Submit a series of commands to a batch scheduler.
This takes a list of strings which are the contents of the pbs files, writes the
files to disk and submits the job to the scheduler. Files which match the pattern of
the resulting files <basename>_<index>.pbs are deleted before writing the new files.
To ensure that commands run consecutively the aditional requirement to the run
script `-W depend=afterok:<prev_jobid>` is added. This allows for all the components
of the experiment to be conducted in a single script.
Note: Having this function submit jobs requires that the command `qsub` exists,
implying that a job scheduler is installed.
"""
submit_job = True
logger.debug("Creating commands in %s files.", scheduler)
# Check scheduler submit command exists
if scheduler == "pbs":
submit_executable = "qsub"
elif scheduler == "slurm":
submit_executable = "sbatch"
else:
raise ValueError("scheduler can only take values ['pbs', 'slurm']")
if shutil.which(submit_executable) is None:
logger.warning(
"The `%s` command is not found."
"Skipping job submission and just generating files",
submit_executable,
)
submit_job = False
# Ensure directory is a Path
directory = Path(directory)
# remove existing files
for fname in directory.glob(basename + f"*.{scheduler}"):
print("Removing {}".format(fname))
os.remove(str(fname))
# Write new files and generate commands
prev_jobids: List[str] = []
for index, job in enumerate(jobs):
# Generate scheduler file
content = create_scheduler_file(scheduler, job)
logger.debug("File contents:\n%s", content)
# Write file to disk
fname = Path(directory / "{}_{:02d}.{}".format(basename, index, scheduler))
with fname.open("w") as dst:
dst.write(content)
if submit_job or dry_run:
# Construct command
submit_cmd = [submit_executable]
if prev_jobids:
# Continue to append all previous jobs to submit_cmd so subsequent jobs die along
# with the first.
afterok = f"afterok:{':'.join(prev_jobids)}"
if scheduler == "pbs":
submit_cmd += ["-W", f"depend={afterok}"]
elif scheduler == "slurm":
submit_cmd += ["--dependency", afterok]
# actually run the command
logger.info(str(submit_cmd))
try:
if dry_run:
print(f"{submit_cmd} {fname.name}")
prev_jobids.append("dry_run")
else:
cmd_res = subprocess.check_output(
submit_cmd + [fname.name], cwd=str(directory)
)
prev_jobids.append(cmd_res.decode().strip())
except subprocess.CalledProcessError:
logger.error("Submitting job to the queue failed.")
break
def _set_verbosity(ctx, param, value):
if value == 1:
logging.basicConfig(level=logging.INFO)
if value == 2:
logging.basicConfig(level=logging.DEBUG)
def launch(
input_file="experiment.yml", use_dependencies=False, dry_run=False, scheduler=None
) -> None:
# This function provides an API to access experi's functionality from within
# python scripts, as an alternative to the command-line interface
# Process and run commands
input_file = Path(input_file)
structure = read_file(input_file)
scheduler = determine_scheduler(scheduler, structure)
jobs = process_structure(
structure, scheduler, Path(input_file.parent), use_dependencies
)
run_jobs(jobs, scheduler, input_file.parent, dry_run)
@click.command()
@click.version_option()
@click.option(
"-f",
"--input-file",
type=click.Path(exists=True, dir_okay=False),
default="experiment.yml",
help="""Path to a YAML file containing experiment data. Note that the experiment
will be run from the directory in which the file exists, not the directory the
script was run from.""",
)
@click.option(
"-s",
"--scheduler",
type=click.Choice(["shell", "pbs", "slurm"]),
default=None,
help="The scheduler with which to run the jobs.",
)
@click.option(
"--use-dependencies",
default=False,
is_flag=True,
help="Use the dependencies specified in the command to reduce the processing",
)
@click.option(
"--dry-run",
is_flag=True,
default=False,
help="Don't run commands or submit jobs, just show the commands that would be run.",
)
@click.option(
"-v",
"--verbose",
callback=_set_verbosity,
expose_value=False,
count=True,
help="Increase the verbosity of logging events.",
)
def main(input_file, use_dependencies, dry_run, scheduler) -> None:
launch(input_file, use_dependencies, dry_run, scheduler)
|
numan/py-analytics
|
analytics/backends/redis.py
|
Redis._get_closest_week
|
python
|
def _get_closest_week(self, metric_date):
#find the offset to the closest monday
days_after_monday = metric_date.isoweekday() - 1
return metric_date - datetime.timedelta(days=days_after_monday)
|
Gets the closest monday to the date provided.
|
train
|
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L58-L65
| null |
class Redis(BaseAnalyticsBackend):
def __init__(self, settings, **kwargs):
nydus_hosts = {}
hosts = settings.get("hosts", [])
if not hosts:
raise Exception("No redis hosts specified")
for i, host in enumerate(hosts):
nydus_hosts[i] = host
defaults = settings.get(
"defaults",
{
'host': 'localhost',
'port': 6379,
})
self._analytics_backend = create_cluster({
'engine': 'nydus.db.backends.redis.Redis',
'router': 'nydus.db.routers.keyvalue.ConsistentHashingRouter',
'hosts': nydus_hosts,
'defaults': defaults,
})
super(Redis, self).__init__(settings, **kwargs)
def _get_daily_metric_key(self, unique_identifier, metric_date):
"""
Redis key for daily metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y-%m"),)
def _get_weekly_metric_key(self, unique_identifier, metric_date):
"""
Redis key for weekly metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y"),)
def _get_daily_metric_name(self, metric, metric_date):
"""
Hash key for daily metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_weekly_metric_name(self, metric, metric_date):
"""
Hash key for weekly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_monthly_metric_name(self, metric, metric_date):
"""
Hash key for monthly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m"),)
def _get_daily_date_range(self, metric_date, delta):
"""
Get the range of months that we need to use as keys to scan redis.
"""
dates = [metric_date]
start_date = metric_date
end_date = metric_date + delta
while start_date.month < end_date.month or start_date.year < end_date.year:
days_in_month = calendar.monthrange(start_date.year, start_date.month)[1]
#shift along to the next month as one of the months we will have to see. We don't care that the exact date
#is the 1st in each subsequent date range as we only care about the year and the month
start_date = start_date + datetime.timedelta(days=days_in_month - start_date.day + 1)
dates.append(start_date)
return dates
def _get_weekly_date_range(self, metric_date, delta):
"""
Gets the range of years that we need to use as keys to get metrics from redis.
"""
dates = [metric_date]
end_date = metric_date + delta
#Figure out how many years our metric range spans
spanning_years = end_date.year - metric_date.year
for i in range(spanning_years):
#for the weekly keys, we only care about the year
dates.append(
datetime.date(
year=metric_date.year + (i + 1), month=1, day=1))
return dates
def _parse_and_process_metrics(self, series, list_of_metrics):
formatted_result_list = []
series = [dt.strftime("%Y-%m-%d") for dt in series]
for result in list_of_metrics:
values = {}
for index, date_string in enumerate(series):
values[date_string] = int(result[index]) if result[index] is not None else 0
formatted_result_list.append(values)
merged_values = reduce(
lambda a, b: dict((n, a.get(n, 0) + b.get(n, 0)) for n in set(a) | set(b)),
formatted_result_list)
return set(series), merged_values
def _num_weeks(self, start_date, end_date):
closest_monday = self._get_closest_week(start_date)
return ((end_date - closest_monday).days / 7) + 1
def _num_months(self, start_date, end_date):
return ((end_date.year - start_date.year) * 12) + (end_date.month - start_date.month) + 1
def clear_all(self):
"""
Deletes all ``sandsnake`` related data from redis.
.. warning::
Very expensive and destructive operation. Use with causion
"""
keys = self._analytics_backend.keys()
for key in itertools.chain(*keys):
with self._analytics_backend.map() as conn:
if key.startswith(self._prefix):
conn.delete(key)
def track_count(self, unique_identifier, metric, inc_amt=1, **kwargs):
"""
Tracks a metric just by count. If you track a metric this way, you won't be able
to query the metric by day, week or month.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
return self._analytics_backend.incr(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric), inc_amt)
def track_metric(self, unique_identifier, metric, date=None, inc_amt=1, **kwargs):
"""
Tracks a metric for a specific ``unique_identifier`` for a certain date. The redis backend supports
lists for both ``unique_identifier`` and ``metric`` allowing for tracking of multiple metrics for multiple
unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track. This can be a list or a string.
:param date: A python date object indicating when this event occured. Defaults to today.
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
if date is None:
date = datetime.date.today()
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
closest_monday = self._get_closest_week(date)
hash_key_weekly = self._get_weekly_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
weekly_metric_name = self._get_weekly_metric_name(single_metric, closest_monday)
monthly_metric_name = self._get_monthly_metric_name(single_metric, date)
results.append(
[
conn.hincrby(hash_key_daily, daily_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, weekly_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, monthly_metric_name, inc_amt),
conn.incr(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), inc_amt)
]
)
return results
def get_metric_by_day(self, unique_identifier, metric, from_date, limit=30, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by day
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of days to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
date_generator = (from_date + datetime.timedelta(days=i) for i in itertools.count())
metric_key_date_range = self._get_daily_date_range(from_date, datetime.timedelta(days=limit))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_daily_metric_name(metric, daily_date) for daily_date in series]
metric_func = lambda conn: [conn.hmget(self._get_daily_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_week(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by week
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of weeks to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
closest_monday_from_date = self._get_closest_week(from_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=limit))
date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_weekly_metric_name(metric, monday_date) for monday_date in series]
metric_func = lambda conn: [conn.hmget(self._get_weekly_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_month(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by month
starting from``from_date``. It will retrieve metrics data starting from the 1st of the
month specified in ``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
first_of_month = datetime.date(year=from_date.year, month=from_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=limit))
date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_monthly_metric_name(metric, month_date) for month_date in series]
metric_func = lambda conn: [conn.hmget(
self._get_weekly_metric_key(
unique_identifier, metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metrics(self, metric_identifiers, from_date, limit=10, group_by="week", **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
:param group_by: The type of aggregation to perform on the metric. Choices are: ``day``, ``week`` or ``month``
"""
results = []
#validation of types:
allowed_types = {
"day": self.get_metric_by_day,
"week": self.get_metric_by_week,
"month": self.get_metric_by_month,
}
if group_by.lower() not in allowed_types:
raise Exception("Allowed values for group_by are day, week or month.")
group_by_func = allowed_types[group_by.lower()]
#pass a connection object so we can pipeline as much as possible
with self._analytics_backend.map() as conn:
for unique_identifier, metric in metric_identifiers:
results.append(group_by_func(unique_identifier, metric, from_date, limit=limit, connection=conn))
#we have to merge all the metric results afterwards because we are using a custom context processor
return [
self._parse_and_process_metrics(series, list_of_metrics) for
series, list_of_metrics in results]
def get_count(self, unique_identifier, metric, start_date=None, end_date=None, **kwargs):
"""
Gets the count for the ``metric`` for ``unique_identifier``. You can specify a ``start_date``
and an ``end_date``, to only get metrics within that time range.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Get the specified metrics after this date
:param end_date: Get the sepcified metrics before this date
:return: The count for the metric, 0 otherwise
"""
result = None
if start_date and end_date:
start_date, end_date = (start_date, end_date,) if start_date < end_date else (end_date, start_date,)
start_date = start_date if hasattr(start_date, 'date') else datetime.datetime.combine(start_date, datetime.time())
end_date = end_date if hasattr(end_date, 'date') else datetime.datetime.combine(end_date, datetime.time())
monthly_metrics_dates = list(rrule.rrule(rrule.MONTHLY, dtstart=start_date, bymonthday=1, until=end_date))
#We can sorta optimize this by getting most of the data by month
if len(monthly_metrics_dates) >= 3:
with self._analytics_backend.map() as conn:
monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results = self._get_counts(
conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date)
monthly_metric_series, monthly_metric_results = self._parse_and_process_metrics(monthly_metric_series, monthly_metric_results)
starting_metric_series, starting_metric_results = self._parse_and_process_metrics(starting_metric_series, starting_metric_results)
ending_metric_series, ending_metric_results = self._parse_and_process_metrics(ending_metric_series, ending_metric_results)
result = sum(monthly_metric_results.values()) + sum(starting_metric_results.values()) + sum(ending_metric_results.values())
else:
diff = end_date - start_date
metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=diff.days + 1)
result = sum(metric_results[1].values())
else:
try:
result = int(self._analytics_backend.get(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric,)))
except TypeError:
result = 0
return result
def get_counts(self, metric_identifiers, **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
"""
parsed_results = []
results = [
self.get_count(unique_identifier, metric, **kwargs) for
unique_identifier, metric in metric_identifiers]
for result in results:
try:
parsed_result = int(result)
except TypeError:
parsed_result = 0
parsed_results.append(parsed_result)
return parsed_results
def set_metric_by_day(self, unique_identifier, metric, date, count, sync_agg=True, update_counter=True):
"""
Sets the count for the ``metric`` for ``unique_identifier``.
You must specify a ``date`` for the ``count`` to be set on. Useful for resetting a metric count to 0 or decrementing a metric.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param date: Sets the specified metrics for this date
:param count: Sets the sepcified metrics to value of count
:param sync_agg: Boolean used to determine if week and month metrics should be updated
:param update_counter: Boolean used to determine if overall counter should be updated
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
if update_counter: # updates overall counter for metric
overall_count = self.get_count(uid, single_metric)
day, daily_count = self.get_metric_by_day(uid, single_metric, date, 1)[1].popitem()
self._analytics_backend.set(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), overall_count + (count - daily_count))
results.append([conn.hset(hash_key_daily, daily_metric_name, count)])
if sync_agg:
self.sync_agg_metric(unique_identifier, metric, date, date)
return results
def sync_agg_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the associated weeks and months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
self.sync_week_metric(unique_identifier, metric, start_date, end_date)
self.sync_month_metric(unique_identifier, metric, start_date, end_date)
def sync_week_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the weeks for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month
after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
closest_monday_from_date = self._get_closest_week(start_date)
num_weeks = self._num_weeks(start_date, end_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=num_weeks))
week_date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
weeks_to_update = list(itertools.islice(week_date_generator, num_weeks))
for uid in unique_identifier:
for single_metric in metric:
for week in weeks_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=week, limit=7)
week_counter = sum([value for key, value in series_results.items()])
hash_key_weekly = self._get_weekly_metric_key(uid, week)
weekly_metric_name = self._get_weekly_metric_name(single_metric, week)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_weekly, weekly_metric_name, week_counter)
def sync_month_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
num_months = self._num_months(start_date, end_date)
first_of_month = datetime.date(year=start_date.year, month=start_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=num_months))
month_date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
months_to_update = list(itertools.islice(month_date_generator, num_months))
for uid in unique_identifier:
for single_metric in metric:
for month in months_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=month, limit=monthrange(month.year, month.month)[1])
month_counter = sum([value for key, value in series_results.items()])
hash_key_monthly = self._get_weekly_metric_key(uid, month)
monthly_metric_name = self._get_monthly_metric_name(single_metric, month)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_monthly, monthly_metric_name, month_counter)
def _get_counts(self, conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date):
start_diff = monthly_metrics_dates[0] - start_date
end_diff = end_date - monthly_metrics_dates[-1]
monthly_metric_series, monthly_metric_results = self.get_metric_by_month(unique_identifier, metric, monthly_metrics_dates[0], limit=len(monthly_metrics_dates) - 1, connection=conn)
#get the difference from the date to the start date and get all dates in between
starting_metric_series, starting_metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=start_diff.days, connection=conn) if start_diff.days > 0 else ([], [[]],)
ending_metric_series, ending_metric_results = self.get_metric_by_day(unique_identifier, metric, monthly_metrics_dates[-1], limit=end_diff.days + 1, connection=conn)
return monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results
|
numan/py-analytics
|
analytics/backends/redis.py
|
Redis._get_daily_date_range
|
python
|
def _get_daily_date_range(self, metric_date, delta):
dates = [metric_date]
start_date = metric_date
end_date = metric_date + delta
while start_date.month < end_date.month or start_date.year < end_date.year:
days_in_month = calendar.monthrange(start_date.year, start_date.month)[1]
#shift along to the next month as one of the months we will have to see. We don't care that the exact date
#is the 1st in each subsequent date range as we only care about the year and the month
start_date = start_date + datetime.timedelta(days=days_in_month - start_date.day + 1)
dates.append(start_date)
return dates
|
Get the range of months that we need to use as keys to scan redis.
|
train
|
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L97-L112
| null |
class Redis(BaseAnalyticsBackend):
def __init__(self, settings, **kwargs):
nydus_hosts = {}
hosts = settings.get("hosts", [])
if not hosts:
raise Exception("No redis hosts specified")
for i, host in enumerate(hosts):
nydus_hosts[i] = host
defaults = settings.get(
"defaults",
{
'host': 'localhost',
'port': 6379,
})
self._analytics_backend = create_cluster({
'engine': 'nydus.db.backends.redis.Redis',
'router': 'nydus.db.routers.keyvalue.ConsistentHashingRouter',
'hosts': nydus_hosts,
'defaults': defaults,
})
super(Redis, self).__init__(settings, **kwargs)
def _get_closest_week(self, metric_date):
"""
Gets the closest monday to the date provided.
"""
#find the offset to the closest monday
days_after_monday = metric_date.isoweekday() - 1
return metric_date - datetime.timedelta(days=days_after_monday)
def _get_daily_metric_key(self, unique_identifier, metric_date):
"""
Redis key for daily metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y-%m"),)
def _get_weekly_metric_key(self, unique_identifier, metric_date):
"""
Redis key for weekly metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y"),)
def _get_daily_metric_name(self, metric, metric_date):
"""
Hash key for daily metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_weekly_metric_name(self, metric, metric_date):
"""
Hash key for weekly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_monthly_metric_name(self, metric, metric_date):
"""
Hash key for monthly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m"),)
def _get_weekly_date_range(self, metric_date, delta):
"""
Gets the range of years that we need to use as keys to get metrics from redis.
"""
dates = [metric_date]
end_date = metric_date + delta
#Figure out how many years our metric range spans
spanning_years = end_date.year - metric_date.year
for i in range(spanning_years):
#for the weekly keys, we only care about the year
dates.append(
datetime.date(
year=metric_date.year + (i + 1), month=1, day=1))
return dates
def _parse_and_process_metrics(self, series, list_of_metrics):
formatted_result_list = []
series = [dt.strftime("%Y-%m-%d") for dt in series]
for result in list_of_metrics:
values = {}
for index, date_string in enumerate(series):
values[date_string] = int(result[index]) if result[index] is not None else 0
formatted_result_list.append(values)
merged_values = reduce(
lambda a, b: dict((n, a.get(n, 0) + b.get(n, 0)) for n in set(a) | set(b)),
formatted_result_list)
return set(series), merged_values
def _num_weeks(self, start_date, end_date):
closest_monday = self._get_closest_week(start_date)
return ((end_date - closest_monday).days / 7) + 1
def _num_months(self, start_date, end_date):
return ((end_date.year - start_date.year) * 12) + (end_date.month - start_date.month) + 1
def clear_all(self):
"""
Deletes all ``sandsnake`` related data from redis.
.. warning::
Very expensive and destructive operation. Use with causion
"""
keys = self._analytics_backend.keys()
for key in itertools.chain(*keys):
with self._analytics_backend.map() as conn:
if key.startswith(self._prefix):
conn.delete(key)
def track_count(self, unique_identifier, metric, inc_amt=1, **kwargs):
"""
Tracks a metric just by count. If you track a metric this way, you won't be able
to query the metric by day, week or month.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
return self._analytics_backend.incr(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric), inc_amt)
def track_metric(self, unique_identifier, metric, date=None, inc_amt=1, **kwargs):
"""
Tracks a metric for a specific ``unique_identifier`` for a certain date. The redis backend supports
lists for both ``unique_identifier`` and ``metric`` allowing for tracking of multiple metrics for multiple
unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track. This can be a list or a string.
:param date: A python date object indicating when this event occured. Defaults to today.
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
if date is None:
date = datetime.date.today()
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
closest_monday = self._get_closest_week(date)
hash_key_weekly = self._get_weekly_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
weekly_metric_name = self._get_weekly_metric_name(single_metric, closest_monday)
monthly_metric_name = self._get_monthly_metric_name(single_metric, date)
results.append(
[
conn.hincrby(hash_key_daily, daily_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, weekly_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, monthly_metric_name, inc_amt),
conn.incr(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), inc_amt)
]
)
return results
def get_metric_by_day(self, unique_identifier, metric, from_date, limit=30, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by day
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of days to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
date_generator = (from_date + datetime.timedelta(days=i) for i in itertools.count())
metric_key_date_range = self._get_daily_date_range(from_date, datetime.timedelta(days=limit))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_daily_metric_name(metric, daily_date) for daily_date in series]
metric_func = lambda conn: [conn.hmget(self._get_daily_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_week(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by week
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of weeks to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
closest_monday_from_date = self._get_closest_week(from_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=limit))
date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_weekly_metric_name(metric, monday_date) for monday_date in series]
metric_func = lambda conn: [conn.hmget(self._get_weekly_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_month(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by month
starting from``from_date``. It will retrieve metrics data starting from the 1st of the
month specified in ``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
first_of_month = datetime.date(year=from_date.year, month=from_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=limit))
date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_monthly_metric_name(metric, month_date) for month_date in series]
metric_func = lambda conn: [conn.hmget(
self._get_weekly_metric_key(
unique_identifier, metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metrics(self, metric_identifiers, from_date, limit=10, group_by="week", **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
:param group_by: The type of aggregation to perform on the metric. Choices are: ``day``, ``week`` or ``month``
"""
results = []
#validation of types:
allowed_types = {
"day": self.get_metric_by_day,
"week": self.get_metric_by_week,
"month": self.get_metric_by_month,
}
if group_by.lower() not in allowed_types:
raise Exception("Allowed values for group_by are day, week or month.")
group_by_func = allowed_types[group_by.lower()]
#pass a connection object so we can pipeline as much as possible
with self._analytics_backend.map() as conn:
for unique_identifier, metric in metric_identifiers:
results.append(group_by_func(unique_identifier, metric, from_date, limit=limit, connection=conn))
#we have to merge all the metric results afterwards because we are using a custom context processor
return [
self._parse_and_process_metrics(series, list_of_metrics) for
series, list_of_metrics in results]
def get_count(self, unique_identifier, metric, start_date=None, end_date=None, **kwargs):
"""
Gets the count for the ``metric`` for ``unique_identifier``. You can specify a ``start_date``
and an ``end_date``, to only get metrics within that time range.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Get the specified metrics after this date
:param end_date: Get the sepcified metrics before this date
:return: The count for the metric, 0 otherwise
"""
result = None
if start_date and end_date:
start_date, end_date = (start_date, end_date,) if start_date < end_date else (end_date, start_date,)
start_date = start_date if hasattr(start_date, 'date') else datetime.datetime.combine(start_date, datetime.time())
end_date = end_date if hasattr(end_date, 'date') else datetime.datetime.combine(end_date, datetime.time())
monthly_metrics_dates = list(rrule.rrule(rrule.MONTHLY, dtstart=start_date, bymonthday=1, until=end_date))
#We can sorta optimize this by getting most of the data by month
if len(monthly_metrics_dates) >= 3:
with self._analytics_backend.map() as conn:
monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results = self._get_counts(
conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date)
monthly_metric_series, monthly_metric_results = self._parse_and_process_metrics(monthly_metric_series, monthly_metric_results)
starting_metric_series, starting_metric_results = self._parse_and_process_metrics(starting_metric_series, starting_metric_results)
ending_metric_series, ending_metric_results = self._parse_and_process_metrics(ending_metric_series, ending_metric_results)
result = sum(monthly_metric_results.values()) + sum(starting_metric_results.values()) + sum(ending_metric_results.values())
else:
diff = end_date - start_date
metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=diff.days + 1)
result = sum(metric_results[1].values())
else:
try:
result = int(self._analytics_backend.get(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric,)))
except TypeError:
result = 0
return result
def get_counts(self, metric_identifiers, **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
"""
parsed_results = []
results = [
self.get_count(unique_identifier, metric, **kwargs) for
unique_identifier, metric in metric_identifiers]
for result in results:
try:
parsed_result = int(result)
except TypeError:
parsed_result = 0
parsed_results.append(parsed_result)
return parsed_results
def set_metric_by_day(self, unique_identifier, metric, date, count, sync_agg=True, update_counter=True):
"""
Sets the count for the ``metric`` for ``unique_identifier``.
You must specify a ``date`` for the ``count`` to be set on. Useful for resetting a metric count to 0 or decrementing a metric.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param date: Sets the specified metrics for this date
:param count: Sets the sepcified metrics to value of count
:param sync_agg: Boolean used to determine if week and month metrics should be updated
:param update_counter: Boolean used to determine if overall counter should be updated
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
if update_counter: # updates overall counter for metric
overall_count = self.get_count(uid, single_metric)
day, daily_count = self.get_metric_by_day(uid, single_metric, date, 1)[1].popitem()
self._analytics_backend.set(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), overall_count + (count - daily_count))
results.append([conn.hset(hash_key_daily, daily_metric_name, count)])
if sync_agg:
self.sync_agg_metric(unique_identifier, metric, date, date)
return results
def sync_agg_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the associated weeks and months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
self.sync_week_metric(unique_identifier, metric, start_date, end_date)
self.sync_month_metric(unique_identifier, metric, start_date, end_date)
def sync_week_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the weeks for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month
after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
closest_monday_from_date = self._get_closest_week(start_date)
num_weeks = self._num_weeks(start_date, end_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=num_weeks))
week_date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
weeks_to_update = list(itertools.islice(week_date_generator, num_weeks))
for uid in unique_identifier:
for single_metric in metric:
for week in weeks_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=week, limit=7)
week_counter = sum([value for key, value in series_results.items()])
hash_key_weekly = self._get_weekly_metric_key(uid, week)
weekly_metric_name = self._get_weekly_metric_name(single_metric, week)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_weekly, weekly_metric_name, week_counter)
def sync_month_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
num_months = self._num_months(start_date, end_date)
first_of_month = datetime.date(year=start_date.year, month=start_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=num_months))
month_date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
months_to_update = list(itertools.islice(month_date_generator, num_months))
for uid in unique_identifier:
for single_metric in metric:
for month in months_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=month, limit=monthrange(month.year, month.month)[1])
month_counter = sum([value for key, value in series_results.items()])
hash_key_monthly = self._get_weekly_metric_key(uid, month)
monthly_metric_name = self._get_monthly_metric_name(single_metric, month)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_monthly, monthly_metric_name, month_counter)
def _get_counts(self, conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date):
start_diff = monthly_metrics_dates[0] - start_date
end_diff = end_date - monthly_metrics_dates[-1]
monthly_metric_series, monthly_metric_results = self.get_metric_by_month(unique_identifier, metric, monthly_metrics_dates[0], limit=len(monthly_metrics_dates) - 1, connection=conn)
#get the difference from the date to the start date and get all dates in between
starting_metric_series, starting_metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=start_diff.days, connection=conn) if start_diff.days > 0 else ([], [[]],)
ending_metric_series, ending_metric_results = self.get_metric_by_day(unique_identifier, metric, monthly_metrics_dates[-1], limit=end_diff.days + 1, connection=conn)
return monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results
|
numan/py-analytics
|
analytics/backends/redis.py
|
Redis._get_weekly_date_range
|
python
|
def _get_weekly_date_range(self, metric_date, delta):
dates = [metric_date]
end_date = metric_date + delta
#Figure out how many years our metric range spans
spanning_years = end_date.year - metric_date.year
for i in range(spanning_years):
#for the weekly keys, we only care about the year
dates.append(
datetime.date(
year=metric_date.year + (i + 1), month=1, day=1))
return dates
|
Gets the range of years that we need to use as keys to get metrics from redis.
|
train
|
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L114-L127
| null |
class Redis(BaseAnalyticsBackend):
def __init__(self, settings, **kwargs):
nydus_hosts = {}
hosts = settings.get("hosts", [])
if not hosts:
raise Exception("No redis hosts specified")
for i, host in enumerate(hosts):
nydus_hosts[i] = host
defaults = settings.get(
"defaults",
{
'host': 'localhost',
'port': 6379,
})
self._analytics_backend = create_cluster({
'engine': 'nydus.db.backends.redis.Redis',
'router': 'nydus.db.routers.keyvalue.ConsistentHashingRouter',
'hosts': nydus_hosts,
'defaults': defaults,
})
super(Redis, self).__init__(settings, **kwargs)
def _get_closest_week(self, metric_date):
"""
Gets the closest monday to the date provided.
"""
#find the offset to the closest monday
days_after_monday = metric_date.isoweekday() - 1
return metric_date - datetime.timedelta(days=days_after_monday)
def _get_daily_metric_key(self, unique_identifier, metric_date):
"""
Redis key for daily metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y-%m"),)
def _get_weekly_metric_key(self, unique_identifier, metric_date):
"""
Redis key for weekly metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y"),)
def _get_daily_metric_name(self, metric, metric_date):
"""
Hash key for daily metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_weekly_metric_name(self, metric, metric_date):
"""
Hash key for weekly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_monthly_metric_name(self, metric, metric_date):
"""
Hash key for monthly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m"),)
def _get_daily_date_range(self, metric_date, delta):
"""
Get the range of months that we need to use as keys to scan redis.
"""
dates = [metric_date]
start_date = metric_date
end_date = metric_date + delta
while start_date.month < end_date.month or start_date.year < end_date.year:
days_in_month = calendar.monthrange(start_date.year, start_date.month)[1]
#shift along to the next month as one of the months we will have to see. We don't care that the exact date
#is the 1st in each subsequent date range as we only care about the year and the month
start_date = start_date + datetime.timedelta(days=days_in_month - start_date.day + 1)
dates.append(start_date)
return dates
def _parse_and_process_metrics(self, series, list_of_metrics):
formatted_result_list = []
series = [dt.strftime("%Y-%m-%d") for dt in series]
for result in list_of_metrics:
values = {}
for index, date_string in enumerate(series):
values[date_string] = int(result[index]) if result[index] is not None else 0
formatted_result_list.append(values)
merged_values = reduce(
lambda a, b: dict((n, a.get(n, 0) + b.get(n, 0)) for n in set(a) | set(b)),
formatted_result_list)
return set(series), merged_values
def _num_weeks(self, start_date, end_date):
closest_monday = self._get_closest_week(start_date)
return ((end_date - closest_monday).days / 7) + 1
def _num_months(self, start_date, end_date):
return ((end_date.year - start_date.year) * 12) + (end_date.month - start_date.month) + 1
def clear_all(self):
"""
Deletes all ``sandsnake`` related data from redis.
.. warning::
Very expensive and destructive operation. Use with causion
"""
keys = self._analytics_backend.keys()
for key in itertools.chain(*keys):
with self._analytics_backend.map() as conn:
if key.startswith(self._prefix):
conn.delete(key)
def track_count(self, unique_identifier, metric, inc_amt=1, **kwargs):
"""
Tracks a metric just by count. If you track a metric this way, you won't be able
to query the metric by day, week or month.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
return self._analytics_backend.incr(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric), inc_amt)
def track_metric(self, unique_identifier, metric, date=None, inc_amt=1, **kwargs):
"""
Tracks a metric for a specific ``unique_identifier`` for a certain date. The redis backend supports
lists for both ``unique_identifier`` and ``metric`` allowing for tracking of multiple metrics for multiple
unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track. This can be a list or a string.
:param date: A python date object indicating when this event occured. Defaults to today.
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
if date is None:
date = datetime.date.today()
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
closest_monday = self._get_closest_week(date)
hash_key_weekly = self._get_weekly_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
weekly_metric_name = self._get_weekly_metric_name(single_metric, closest_monday)
monthly_metric_name = self._get_monthly_metric_name(single_metric, date)
results.append(
[
conn.hincrby(hash_key_daily, daily_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, weekly_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, monthly_metric_name, inc_amt),
conn.incr(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), inc_amt)
]
)
return results
def get_metric_by_day(self, unique_identifier, metric, from_date, limit=30, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by day
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of days to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
date_generator = (from_date + datetime.timedelta(days=i) for i in itertools.count())
metric_key_date_range = self._get_daily_date_range(from_date, datetime.timedelta(days=limit))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_daily_metric_name(metric, daily_date) for daily_date in series]
metric_func = lambda conn: [conn.hmget(self._get_daily_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_week(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by week
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of weeks to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
closest_monday_from_date = self._get_closest_week(from_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=limit))
date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_weekly_metric_name(metric, monday_date) for monday_date in series]
metric_func = lambda conn: [conn.hmget(self._get_weekly_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_month(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by month
starting from``from_date``. It will retrieve metrics data starting from the 1st of the
month specified in ``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
first_of_month = datetime.date(year=from_date.year, month=from_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=limit))
date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_monthly_metric_name(metric, month_date) for month_date in series]
metric_func = lambda conn: [conn.hmget(
self._get_weekly_metric_key(
unique_identifier, metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metrics(self, metric_identifiers, from_date, limit=10, group_by="week", **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
:param group_by: The type of aggregation to perform on the metric. Choices are: ``day``, ``week`` or ``month``
"""
results = []
#validation of types:
allowed_types = {
"day": self.get_metric_by_day,
"week": self.get_metric_by_week,
"month": self.get_metric_by_month,
}
if group_by.lower() not in allowed_types:
raise Exception("Allowed values for group_by are day, week or month.")
group_by_func = allowed_types[group_by.lower()]
#pass a connection object so we can pipeline as much as possible
with self._analytics_backend.map() as conn:
for unique_identifier, metric in metric_identifiers:
results.append(group_by_func(unique_identifier, metric, from_date, limit=limit, connection=conn))
#we have to merge all the metric results afterwards because we are using a custom context processor
return [
self._parse_and_process_metrics(series, list_of_metrics) for
series, list_of_metrics in results]
def get_count(self, unique_identifier, metric, start_date=None, end_date=None, **kwargs):
"""
Gets the count for the ``metric`` for ``unique_identifier``. You can specify a ``start_date``
and an ``end_date``, to only get metrics within that time range.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Get the specified metrics after this date
:param end_date: Get the sepcified metrics before this date
:return: The count for the metric, 0 otherwise
"""
result = None
if start_date and end_date:
start_date, end_date = (start_date, end_date,) if start_date < end_date else (end_date, start_date,)
start_date = start_date if hasattr(start_date, 'date') else datetime.datetime.combine(start_date, datetime.time())
end_date = end_date if hasattr(end_date, 'date') else datetime.datetime.combine(end_date, datetime.time())
monthly_metrics_dates = list(rrule.rrule(rrule.MONTHLY, dtstart=start_date, bymonthday=1, until=end_date))
#We can sorta optimize this by getting most of the data by month
if len(monthly_metrics_dates) >= 3:
with self._analytics_backend.map() as conn:
monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results = self._get_counts(
conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date)
monthly_metric_series, monthly_metric_results = self._parse_and_process_metrics(monthly_metric_series, monthly_metric_results)
starting_metric_series, starting_metric_results = self._parse_and_process_metrics(starting_metric_series, starting_metric_results)
ending_metric_series, ending_metric_results = self._parse_and_process_metrics(ending_metric_series, ending_metric_results)
result = sum(monthly_metric_results.values()) + sum(starting_metric_results.values()) + sum(ending_metric_results.values())
else:
diff = end_date - start_date
metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=diff.days + 1)
result = sum(metric_results[1].values())
else:
try:
result = int(self._analytics_backend.get(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric,)))
except TypeError:
result = 0
return result
def get_counts(self, metric_identifiers, **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
"""
parsed_results = []
results = [
self.get_count(unique_identifier, metric, **kwargs) for
unique_identifier, metric in metric_identifiers]
for result in results:
try:
parsed_result = int(result)
except TypeError:
parsed_result = 0
parsed_results.append(parsed_result)
return parsed_results
def set_metric_by_day(self, unique_identifier, metric, date, count, sync_agg=True, update_counter=True):
"""
Sets the count for the ``metric`` for ``unique_identifier``.
You must specify a ``date`` for the ``count`` to be set on. Useful for resetting a metric count to 0 or decrementing a metric.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param date: Sets the specified metrics for this date
:param count: Sets the sepcified metrics to value of count
:param sync_agg: Boolean used to determine if week and month metrics should be updated
:param update_counter: Boolean used to determine if overall counter should be updated
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
if update_counter: # updates overall counter for metric
overall_count = self.get_count(uid, single_metric)
day, daily_count = self.get_metric_by_day(uid, single_metric, date, 1)[1].popitem()
self._analytics_backend.set(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), overall_count + (count - daily_count))
results.append([conn.hset(hash_key_daily, daily_metric_name, count)])
if sync_agg:
self.sync_agg_metric(unique_identifier, metric, date, date)
return results
def sync_agg_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the associated weeks and months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
self.sync_week_metric(unique_identifier, metric, start_date, end_date)
self.sync_month_metric(unique_identifier, metric, start_date, end_date)
def sync_week_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the weeks for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month
after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
closest_monday_from_date = self._get_closest_week(start_date)
num_weeks = self._num_weeks(start_date, end_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=num_weeks))
week_date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
weeks_to_update = list(itertools.islice(week_date_generator, num_weeks))
for uid in unique_identifier:
for single_metric in metric:
for week in weeks_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=week, limit=7)
week_counter = sum([value for key, value in series_results.items()])
hash_key_weekly = self._get_weekly_metric_key(uid, week)
weekly_metric_name = self._get_weekly_metric_name(single_metric, week)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_weekly, weekly_metric_name, week_counter)
def sync_month_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
num_months = self._num_months(start_date, end_date)
first_of_month = datetime.date(year=start_date.year, month=start_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=num_months))
month_date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
months_to_update = list(itertools.islice(month_date_generator, num_months))
for uid in unique_identifier:
for single_metric in metric:
for month in months_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=month, limit=monthrange(month.year, month.month)[1])
month_counter = sum([value for key, value in series_results.items()])
hash_key_monthly = self._get_weekly_metric_key(uid, month)
monthly_metric_name = self._get_monthly_metric_name(single_metric, month)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_monthly, monthly_metric_name, month_counter)
def _get_counts(self, conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date):
start_diff = monthly_metrics_dates[0] - start_date
end_diff = end_date - monthly_metrics_dates[-1]
monthly_metric_series, monthly_metric_results = self.get_metric_by_month(unique_identifier, metric, monthly_metrics_dates[0], limit=len(monthly_metrics_dates) - 1, connection=conn)
#get the difference from the date to the start date and get all dates in between
starting_metric_series, starting_metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=start_diff.days, connection=conn) if start_diff.days > 0 else ([], [[]],)
ending_metric_series, ending_metric_results = self.get_metric_by_day(unique_identifier, metric, monthly_metrics_dates[-1], limit=end_diff.days + 1, connection=conn)
return monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results
|
numan/py-analytics
|
analytics/backends/redis.py
|
Redis.clear_all
|
python
|
def clear_all(self):
keys = self._analytics_backend.keys()
for key in itertools.chain(*keys):
with self._analytics_backend.map() as conn:
if key.startswith(self._prefix):
conn.delete(key)
|
Deletes all ``sandsnake`` related data from redis.
.. warning::
Very expensive and destructive operation. Use with causion
|
train
|
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L151-L164
| null |
class Redis(BaseAnalyticsBackend):
def __init__(self, settings, **kwargs):
nydus_hosts = {}
hosts = settings.get("hosts", [])
if not hosts:
raise Exception("No redis hosts specified")
for i, host in enumerate(hosts):
nydus_hosts[i] = host
defaults = settings.get(
"defaults",
{
'host': 'localhost',
'port': 6379,
})
self._analytics_backend = create_cluster({
'engine': 'nydus.db.backends.redis.Redis',
'router': 'nydus.db.routers.keyvalue.ConsistentHashingRouter',
'hosts': nydus_hosts,
'defaults': defaults,
})
super(Redis, self).__init__(settings, **kwargs)
def _get_closest_week(self, metric_date):
"""
Gets the closest monday to the date provided.
"""
#find the offset to the closest monday
days_after_monday = metric_date.isoweekday() - 1
return metric_date - datetime.timedelta(days=days_after_monday)
def _get_daily_metric_key(self, unique_identifier, metric_date):
"""
Redis key for daily metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y-%m"),)
def _get_weekly_metric_key(self, unique_identifier, metric_date):
"""
Redis key for weekly metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y"),)
def _get_daily_metric_name(self, metric, metric_date):
"""
Hash key for daily metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_weekly_metric_name(self, metric, metric_date):
"""
Hash key for weekly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_monthly_metric_name(self, metric, metric_date):
"""
Hash key for monthly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m"),)
def _get_daily_date_range(self, metric_date, delta):
"""
Get the range of months that we need to use as keys to scan redis.
"""
dates = [metric_date]
start_date = metric_date
end_date = metric_date + delta
while start_date.month < end_date.month or start_date.year < end_date.year:
days_in_month = calendar.monthrange(start_date.year, start_date.month)[1]
#shift along to the next month as one of the months we will have to see. We don't care that the exact date
#is the 1st in each subsequent date range as we only care about the year and the month
start_date = start_date + datetime.timedelta(days=days_in_month - start_date.day + 1)
dates.append(start_date)
return dates
def _get_weekly_date_range(self, metric_date, delta):
"""
Gets the range of years that we need to use as keys to get metrics from redis.
"""
dates = [metric_date]
end_date = metric_date + delta
#Figure out how many years our metric range spans
spanning_years = end_date.year - metric_date.year
for i in range(spanning_years):
#for the weekly keys, we only care about the year
dates.append(
datetime.date(
year=metric_date.year + (i + 1), month=1, day=1))
return dates
def _parse_and_process_metrics(self, series, list_of_metrics):
formatted_result_list = []
series = [dt.strftime("%Y-%m-%d") for dt in series]
for result in list_of_metrics:
values = {}
for index, date_string in enumerate(series):
values[date_string] = int(result[index]) if result[index] is not None else 0
formatted_result_list.append(values)
merged_values = reduce(
lambda a, b: dict((n, a.get(n, 0) + b.get(n, 0)) for n in set(a) | set(b)),
formatted_result_list)
return set(series), merged_values
def _num_weeks(self, start_date, end_date):
closest_monday = self._get_closest_week(start_date)
return ((end_date - closest_monday).days / 7) + 1
def _num_months(self, start_date, end_date):
return ((end_date.year - start_date.year) * 12) + (end_date.month - start_date.month) + 1
def track_count(self, unique_identifier, metric, inc_amt=1, **kwargs):
"""
Tracks a metric just by count. If you track a metric this way, you won't be able
to query the metric by day, week or month.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
return self._analytics_backend.incr(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric), inc_amt)
def track_metric(self, unique_identifier, metric, date=None, inc_amt=1, **kwargs):
"""
Tracks a metric for a specific ``unique_identifier`` for a certain date. The redis backend supports
lists for both ``unique_identifier`` and ``metric`` allowing for tracking of multiple metrics for multiple
unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track. This can be a list or a string.
:param date: A python date object indicating when this event occured. Defaults to today.
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
if date is None:
date = datetime.date.today()
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
closest_monday = self._get_closest_week(date)
hash_key_weekly = self._get_weekly_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
weekly_metric_name = self._get_weekly_metric_name(single_metric, closest_monday)
monthly_metric_name = self._get_monthly_metric_name(single_metric, date)
results.append(
[
conn.hincrby(hash_key_daily, daily_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, weekly_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, monthly_metric_name, inc_amt),
conn.incr(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), inc_amt)
]
)
return results
def get_metric_by_day(self, unique_identifier, metric, from_date, limit=30, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by day
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of days to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
date_generator = (from_date + datetime.timedelta(days=i) for i in itertools.count())
metric_key_date_range = self._get_daily_date_range(from_date, datetime.timedelta(days=limit))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_daily_metric_name(metric, daily_date) for daily_date in series]
metric_func = lambda conn: [conn.hmget(self._get_daily_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_week(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by week
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of weeks to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
closest_monday_from_date = self._get_closest_week(from_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=limit))
date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_weekly_metric_name(metric, monday_date) for monday_date in series]
metric_func = lambda conn: [conn.hmget(self._get_weekly_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_month(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by month
starting from``from_date``. It will retrieve metrics data starting from the 1st of the
month specified in ``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
first_of_month = datetime.date(year=from_date.year, month=from_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=limit))
date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_monthly_metric_name(metric, month_date) for month_date in series]
metric_func = lambda conn: [conn.hmget(
self._get_weekly_metric_key(
unique_identifier, metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metrics(self, metric_identifiers, from_date, limit=10, group_by="week", **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
:param group_by: The type of aggregation to perform on the metric. Choices are: ``day``, ``week`` or ``month``
"""
results = []
#validation of types:
allowed_types = {
"day": self.get_metric_by_day,
"week": self.get_metric_by_week,
"month": self.get_metric_by_month,
}
if group_by.lower() not in allowed_types:
raise Exception("Allowed values for group_by are day, week or month.")
group_by_func = allowed_types[group_by.lower()]
#pass a connection object so we can pipeline as much as possible
with self._analytics_backend.map() as conn:
for unique_identifier, metric in metric_identifiers:
results.append(group_by_func(unique_identifier, metric, from_date, limit=limit, connection=conn))
#we have to merge all the metric results afterwards because we are using a custom context processor
return [
self._parse_and_process_metrics(series, list_of_metrics) for
series, list_of_metrics in results]
def get_count(self, unique_identifier, metric, start_date=None, end_date=None, **kwargs):
"""
Gets the count for the ``metric`` for ``unique_identifier``. You can specify a ``start_date``
and an ``end_date``, to only get metrics within that time range.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Get the specified metrics after this date
:param end_date: Get the sepcified metrics before this date
:return: The count for the metric, 0 otherwise
"""
result = None
if start_date and end_date:
start_date, end_date = (start_date, end_date,) if start_date < end_date else (end_date, start_date,)
start_date = start_date if hasattr(start_date, 'date') else datetime.datetime.combine(start_date, datetime.time())
end_date = end_date if hasattr(end_date, 'date') else datetime.datetime.combine(end_date, datetime.time())
monthly_metrics_dates = list(rrule.rrule(rrule.MONTHLY, dtstart=start_date, bymonthday=1, until=end_date))
#We can sorta optimize this by getting most of the data by month
if len(monthly_metrics_dates) >= 3:
with self._analytics_backend.map() as conn:
monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results = self._get_counts(
conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date)
monthly_metric_series, monthly_metric_results = self._parse_and_process_metrics(monthly_metric_series, monthly_metric_results)
starting_metric_series, starting_metric_results = self._parse_and_process_metrics(starting_metric_series, starting_metric_results)
ending_metric_series, ending_metric_results = self._parse_and_process_metrics(ending_metric_series, ending_metric_results)
result = sum(monthly_metric_results.values()) + sum(starting_metric_results.values()) + sum(ending_metric_results.values())
else:
diff = end_date - start_date
metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=diff.days + 1)
result = sum(metric_results[1].values())
else:
try:
result = int(self._analytics_backend.get(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric,)))
except TypeError:
result = 0
return result
def get_counts(self, metric_identifiers, **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
"""
parsed_results = []
results = [
self.get_count(unique_identifier, metric, **kwargs) for
unique_identifier, metric in metric_identifiers]
for result in results:
try:
parsed_result = int(result)
except TypeError:
parsed_result = 0
parsed_results.append(parsed_result)
return parsed_results
def set_metric_by_day(self, unique_identifier, metric, date, count, sync_agg=True, update_counter=True):
"""
Sets the count for the ``metric`` for ``unique_identifier``.
You must specify a ``date`` for the ``count`` to be set on. Useful for resetting a metric count to 0 or decrementing a metric.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param date: Sets the specified metrics for this date
:param count: Sets the sepcified metrics to value of count
:param sync_agg: Boolean used to determine if week and month metrics should be updated
:param update_counter: Boolean used to determine if overall counter should be updated
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
if update_counter: # updates overall counter for metric
overall_count = self.get_count(uid, single_metric)
day, daily_count = self.get_metric_by_day(uid, single_metric, date, 1)[1].popitem()
self._analytics_backend.set(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), overall_count + (count - daily_count))
results.append([conn.hset(hash_key_daily, daily_metric_name, count)])
if sync_agg:
self.sync_agg_metric(unique_identifier, metric, date, date)
return results
def sync_agg_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the associated weeks and months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
self.sync_week_metric(unique_identifier, metric, start_date, end_date)
self.sync_month_metric(unique_identifier, metric, start_date, end_date)
def sync_week_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the weeks for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month
after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
closest_monday_from_date = self._get_closest_week(start_date)
num_weeks = self._num_weeks(start_date, end_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=num_weeks))
week_date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
weeks_to_update = list(itertools.islice(week_date_generator, num_weeks))
for uid in unique_identifier:
for single_metric in metric:
for week in weeks_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=week, limit=7)
week_counter = sum([value for key, value in series_results.items()])
hash_key_weekly = self._get_weekly_metric_key(uid, week)
weekly_metric_name = self._get_weekly_metric_name(single_metric, week)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_weekly, weekly_metric_name, week_counter)
def sync_month_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
num_months = self._num_months(start_date, end_date)
first_of_month = datetime.date(year=start_date.year, month=start_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=num_months))
month_date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
months_to_update = list(itertools.islice(month_date_generator, num_months))
for uid in unique_identifier:
for single_metric in metric:
for month in months_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=month, limit=monthrange(month.year, month.month)[1])
month_counter = sum([value for key, value in series_results.items()])
hash_key_monthly = self._get_weekly_metric_key(uid, month)
monthly_metric_name = self._get_monthly_metric_name(single_metric, month)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_monthly, monthly_metric_name, month_counter)
def _get_counts(self, conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date):
start_diff = monthly_metrics_dates[0] - start_date
end_diff = end_date - monthly_metrics_dates[-1]
monthly_metric_series, monthly_metric_results = self.get_metric_by_month(unique_identifier, metric, monthly_metrics_dates[0], limit=len(monthly_metrics_dates) - 1, connection=conn)
#get the difference from the date to the start date and get all dates in between
starting_metric_series, starting_metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=start_diff.days, connection=conn) if start_diff.days > 0 else ([], [[]],)
ending_metric_series, ending_metric_results = self.get_metric_by_day(unique_identifier, metric, monthly_metrics_dates[-1], limit=end_diff.days + 1, connection=conn)
return monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results
|
numan/py-analytics
|
analytics/backends/redis.py
|
Redis.track_count
|
python
|
def track_count(self, unique_identifier, metric, inc_amt=1, **kwargs):
return self._analytics_backend.incr(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric), inc_amt)
|
Tracks a metric just by count. If you track a metric this way, you won't be able
to query the metric by day, week or month.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
|
train
|
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L166-L176
| null |
class Redis(BaseAnalyticsBackend):
def __init__(self, settings, **kwargs):
nydus_hosts = {}
hosts = settings.get("hosts", [])
if not hosts:
raise Exception("No redis hosts specified")
for i, host in enumerate(hosts):
nydus_hosts[i] = host
defaults = settings.get(
"defaults",
{
'host': 'localhost',
'port': 6379,
})
self._analytics_backend = create_cluster({
'engine': 'nydus.db.backends.redis.Redis',
'router': 'nydus.db.routers.keyvalue.ConsistentHashingRouter',
'hosts': nydus_hosts,
'defaults': defaults,
})
super(Redis, self).__init__(settings, **kwargs)
def _get_closest_week(self, metric_date):
"""
Gets the closest monday to the date provided.
"""
#find the offset to the closest monday
days_after_monday = metric_date.isoweekday() - 1
return metric_date - datetime.timedelta(days=days_after_monday)
def _get_daily_metric_key(self, unique_identifier, metric_date):
"""
Redis key for daily metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y-%m"),)
def _get_weekly_metric_key(self, unique_identifier, metric_date):
"""
Redis key for weekly metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y"),)
def _get_daily_metric_name(self, metric, metric_date):
"""
Hash key for daily metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_weekly_metric_name(self, metric, metric_date):
"""
Hash key for weekly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_monthly_metric_name(self, metric, metric_date):
"""
Hash key for monthly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m"),)
def _get_daily_date_range(self, metric_date, delta):
"""
Get the range of months that we need to use as keys to scan redis.
"""
dates = [metric_date]
start_date = metric_date
end_date = metric_date + delta
while start_date.month < end_date.month or start_date.year < end_date.year:
days_in_month = calendar.monthrange(start_date.year, start_date.month)[1]
#shift along to the next month as one of the months we will have to see. We don't care that the exact date
#is the 1st in each subsequent date range as we only care about the year and the month
start_date = start_date + datetime.timedelta(days=days_in_month - start_date.day + 1)
dates.append(start_date)
return dates
def _get_weekly_date_range(self, metric_date, delta):
"""
Gets the range of years that we need to use as keys to get metrics from redis.
"""
dates = [metric_date]
end_date = metric_date + delta
#Figure out how many years our metric range spans
spanning_years = end_date.year - metric_date.year
for i in range(spanning_years):
#for the weekly keys, we only care about the year
dates.append(
datetime.date(
year=metric_date.year + (i + 1), month=1, day=1))
return dates
def _parse_and_process_metrics(self, series, list_of_metrics):
formatted_result_list = []
series = [dt.strftime("%Y-%m-%d") for dt in series]
for result in list_of_metrics:
values = {}
for index, date_string in enumerate(series):
values[date_string] = int(result[index]) if result[index] is not None else 0
formatted_result_list.append(values)
merged_values = reduce(
lambda a, b: dict((n, a.get(n, 0) + b.get(n, 0)) for n in set(a) | set(b)),
formatted_result_list)
return set(series), merged_values
def _num_weeks(self, start_date, end_date):
closest_monday = self._get_closest_week(start_date)
return ((end_date - closest_monday).days / 7) + 1
def _num_months(self, start_date, end_date):
return ((end_date.year - start_date.year) * 12) + (end_date.month - start_date.month) + 1
def clear_all(self):
"""
Deletes all ``sandsnake`` related data from redis.
.. warning::
Very expensive and destructive operation. Use with causion
"""
keys = self._analytics_backend.keys()
for key in itertools.chain(*keys):
with self._analytics_backend.map() as conn:
if key.startswith(self._prefix):
conn.delete(key)
def track_metric(self, unique_identifier, metric, date=None, inc_amt=1, **kwargs):
"""
Tracks a metric for a specific ``unique_identifier`` for a certain date. The redis backend supports
lists for both ``unique_identifier`` and ``metric`` allowing for tracking of multiple metrics for multiple
unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track. This can be a list or a string.
:param date: A python date object indicating when this event occured. Defaults to today.
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
if date is None:
date = datetime.date.today()
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
closest_monday = self._get_closest_week(date)
hash_key_weekly = self._get_weekly_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
weekly_metric_name = self._get_weekly_metric_name(single_metric, closest_monday)
monthly_metric_name = self._get_monthly_metric_name(single_metric, date)
results.append(
[
conn.hincrby(hash_key_daily, daily_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, weekly_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, monthly_metric_name, inc_amt),
conn.incr(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), inc_amt)
]
)
return results
def get_metric_by_day(self, unique_identifier, metric, from_date, limit=30, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by day
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of days to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
date_generator = (from_date + datetime.timedelta(days=i) for i in itertools.count())
metric_key_date_range = self._get_daily_date_range(from_date, datetime.timedelta(days=limit))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_daily_metric_name(metric, daily_date) for daily_date in series]
metric_func = lambda conn: [conn.hmget(self._get_daily_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_week(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by week
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of weeks to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
closest_monday_from_date = self._get_closest_week(from_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=limit))
date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_weekly_metric_name(metric, monday_date) for monday_date in series]
metric_func = lambda conn: [conn.hmget(self._get_weekly_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_month(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by month
starting from``from_date``. It will retrieve metrics data starting from the 1st of the
month specified in ``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
first_of_month = datetime.date(year=from_date.year, month=from_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=limit))
date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_monthly_metric_name(metric, month_date) for month_date in series]
metric_func = lambda conn: [conn.hmget(
self._get_weekly_metric_key(
unique_identifier, metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metrics(self, metric_identifiers, from_date, limit=10, group_by="week", **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
:param group_by: The type of aggregation to perform on the metric. Choices are: ``day``, ``week`` or ``month``
"""
results = []
#validation of types:
allowed_types = {
"day": self.get_metric_by_day,
"week": self.get_metric_by_week,
"month": self.get_metric_by_month,
}
if group_by.lower() not in allowed_types:
raise Exception("Allowed values for group_by are day, week or month.")
group_by_func = allowed_types[group_by.lower()]
#pass a connection object so we can pipeline as much as possible
with self._analytics_backend.map() as conn:
for unique_identifier, metric in metric_identifiers:
results.append(group_by_func(unique_identifier, metric, from_date, limit=limit, connection=conn))
#we have to merge all the metric results afterwards because we are using a custom context processor
return [
self._parse_and_process_metrics(series, list_of_metrics) for
series, list_of_metrics in results]
def get_count(self, unique_identifier, metric, start_date=None, end_date=None, **kwargs):
"""
Gets the count for the ``metric`` for ``unique_identifier``. You can specify a ``start_date``
and an ``end_date``, to only get metrics within that time range.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Get the specified metrics after this date
:param end_date: Get the sepcified metrics before this date
:return: The count for the metric, 0 otherwise
"""
result = None
if start_date and end_date:
start_date, end_date = (start_date, end_date,) if start_date < end_date else (end_date, start_date,)
start_date = start_date if hasattr(start_date, 'date') else datetime.datetime.combine(start_date, datetime.time())
end_date = end_date if hasattr(end_date, 'date') else datetime.datetime.combine(end_date, datetime.time())
monthly_metrics_dates = list(rrule.rrule(rrule.MONTHLY, dtstart=start_date, bymonthday=1, until=end_date))
#We can sorta optimize this by getting most of the data by month
if len(monthly_metrics_dates) >= 3:
with self._analytics_backend.map() as conn:
monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results = self._get_counts(
conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date)
monthly_metric_series, monthly_metric_results = self._parse_and_process_metrics(monthly_metric_series, monthly_metric_results)
starting_metric_series, starting_metric_results = self._parse_and_process_metrics(starting_metric_series, starting_metric_results)
ending_metric_series, ending_metric_results = self._parse_and_process_metrics(ending_metric_series, ending_metric_results)
result = sum(monthly_metric_results.values()) + sum(starting_metric_results.values()) + sum(ending_metric_results.values())
else:
diff = end_date - start_date
metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=diff.days + 1)
result = sum(metric_results[1].values())
else:
try:
result = int(self._analytics_backend.get(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric,)))
except TypeError:
result = 0
return result
def get_counts(self, metric_identifiers, **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
"""
parsed_results = []
results = [
self.get_count(unique_identifier, metric, **kwargs) for
unique_identifier, metric in metric_identifiers]
for result in results:
try:
parsed_result = int(result)
except TypeError:
parsed_result = 0
parsed_results.append(parsed_result)
return parsed_results
def set_metric_by_day(self, unique_identifier, metric, date, count, sync_agg=True, update_counter=True):
"""
Sets the count for the ``metric`` for ``unique_identifier``.
You must specify a ``date`` for the ``count`` to be set on. Useful for resetting a metric count to 0 or decrementing a metric.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param date: Sets the specified metrics for this date
:param count: Sets the sepcified metrics to value of count
:param sync_agg: Boolean used to determine if week and month metrics should be updated
:param update_counter: Boolean used to determine if overall counter should be updated
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
if update_counter: # updates overall counter for metric
overall_count = self.get_count(uid, single_metric)
day, daily_count = self.get_metric_by_day(uid, single_metric, date, 1)[1].popitem()
self._analytics_backend.set(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), overall_count + (count - daily_count))
results.append([conn.hset(hash_key_daily, daily_metric_name, count)])
if sync_agg:
self.sync_agg_metric(unique_identifier, metric, date, date)
return results
def sync_agg_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the associated weeks and months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
self.sync_week_metric(unique_identifier, metric, start_date, end_date)
self.sync_month_metric(unique_identifier, metric, start_date, end_date)
def sync_week_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the weeks for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month
after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
closest_monday_from_date = self._get_closest_week(start_date)
num_weeks = self._num_weeks(start_date, end_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=num_weeks))
week_date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
weeks_to_update = list(itertools.islice(week_date_generator, num_weeks))
for uid in unique_identifier:
for single_metric in metric:
for week in weeks_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=week, limit=7)
week_counter = sum([value for key, value in series_results.items()])
hash_key_weekly = self._get_weekly_metric_key(uid, week)
weekly_metric_name = self._get_weekly_metric_name(single_metric, week)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_weekly, weekly_metric_name, week_counter)
def sync_month_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
num_months = self._num_months(start_date, end_date)
first_of_month = datetime.date(year=start_date.year, month=start_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=num_months))
month_date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
months_to_update = list(itertools.islice(month_date_generator, num_months))
for uid in unique_identifier:
for single_metric in metric:
for month in months_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=month, limit=monthrange(month.year, month.month)[1])
month_counter = sum([value for key, value in series_results.items()])
hash_key_monthly = self._get_weekly_metric_key(uid, month)
monthly_metric_name = self._get_monthly_metric_name(single_metric, month)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_monthly, monthly_metric_name, month_counter)
def _get_counts(self, conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date):
start_diff = monthly_metrics_dates[0] - start_date
end_diff = end_date - monthly_metrics_dates[-1]
monthly_metric_series, monthly_metric_results = self.get_metric_by_month(unique_identifier, metric, monthly_metrics_dates[0], limit=len(monthly_metrics_dates) - 1, connection=conn)
#get the difference from the date to the start date and get all dates in between
starting_metric_series, starting_metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=start_diff.days, connection=conn) if start_diff.days > 0 else ([], [[]],)
ending_metric_series, ending_metric_results = self.get_metric_by_day(unique_identifier, metric, monthly_metrics_dates[-1], limit=end_diff.days + 1, connection=conn)
return monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results
|
numan/py-analytics
|
analytics/backends/redis.py
|
Redis.track_metric
|
python
|
def track_metric(self, unique_identifier, metric, date=None, inc_amt=1, **kwargs):
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
if date is None:
date = datetime.date.today()
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
closest_monday = self._get_closest_week(date)
hash_key_weekly = self._get_weekly_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
weekly_metric_name = self._get_weekly_metric_name(single_metric, closest_monday)
monthly_metric_name = self._get_monthly_metric_name(single_metric, date)
results.append(
[
conn.hincrby(hash_key_daily, daily_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, weekly_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, monthly_metric_name, inc_amt),
conn.incr(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), inc_amt)
]
)
return results
|
Tracks a metric for a specific ``unique_identifier`` for a certain date. The redis backend supports
lists for both ``unique_identifier`` and ``metric`` allowing for tracking of multiple metrics for multiple
unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track. This can be a list or a string.
:param date: A python date object indicating when this event occured. Defaults to today.
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
|
train
|
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L178-L216
|
[
"def _get_closest_week(self, metric_date):\n \"\"\"\n Gets the closest monday to the date provided.\n \"\"\"\n #find the offset to the closest monday\n days_after_monday = metric_date.isoweekday() - 1\n\n return metric_date - datetime.timedelta(days=days_after_monday)\n",
"def _get_daily_metric_key(self, unique_identifier, metric_date):\n \"\"\"\n Redis key for daily metric\n \"\"\"\n return self._prefix + \":\" + \"user:%s:analy:%s\" % (unique_identifier, metric_date.strftime(\"%y-%m\"),)\n",
"def _get_weekly_metric_key(self, unique_identifier, metric_date):\n \"\"\"\n Redis key for weekly metric\n \"\"\"\n return self._prefix + \":\" + \"user:%s:analy:%s\" % (unique_identifier, metric_date.strftime(\"%y\"),)\n",
"def _get_daily_metric_name(self, metric, metric_date):\n \"\"\"\n Hash key for daily metric\n \"\"\"\n return \"%s:%s\" % (metric, metric_date.strftime(\"%y-%m-%d\"),)\n",
"def _get_weekly_metric_name(self, metric, metric_date):\n \"\"\"\n Hash key for weekly metric\n \"\"\"\n return \"%s:%s\" % (metric, metric_date.strftime(\"%y-%m-%d\"),)\n",
"def _get_monthly_metric_name(self, metric, metric_date):\n \"\"\"\n Hash key for monthly metric\n \"\"\"\n return \"%s:%s\" % (metric, metric_date.strftime(\"%y-%m\"),)\n"
] |
class Redis(BaseAnalyticsBackend):
def __init__(self, settings, **kwargs):
nydus_hosts = {}
hosts = settings.get("hosts", [])
if not hosts:
raise Exception("No redis hosts specified")
for i, host in enumerate(hosts):
nydus_hosts[i] = host
defaults = settings.get(
"defaults",
{
'host': 'localhost',
'port': 6379,
})
self._analytics_backend = create_cluster({
'engine': 'nydus.db.backends.redis.Redis',
'router': 'nydus.db.routers.keyvalue.ConsistentHashingRouter',
'hosts': nydus_hosts,
'defaults': defaults,
})
super(Redis, self).__init__(settings, **kwargs)
def _get_closest_week(self, metric_date):
"""
Gets the closest monday to the date provided.
"""
#find the offset to the closest monday
days_after_monday = metric_date.isoweekday() - 1
return metric_date - datetime.timedelta(days=days_after_monday)
def _get_daily_metric_key(self, unique_identifier, metric_date):
"""
Redis key for daily metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y-%m"),)
def _get_weekly_metric_key(self, unique_identifier, metric_date):
"""
Redis key for weekly metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y"),)
def _get_daily_metric_name(self, metric, metric_date):
"""
Hash key for daily metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_weekly_metric_name(self, metric, metric_date):
"""
Hash key for weekly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_monthly_metric_name(self, metric, metric_date):
"""
Hash key for monthly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m"),)
def _get_daily_date_range(self, metric_date, delta):
"""
Get the range of months that we need to use as keys to scan redis.
"""
dates = [metric_date]
start_date = metric_date
end_date = metric_date + delta
while start_date.month < end_date.month or start_date.year < end_date.year:
days_in_month = calendar.monthrange(start_date.year, start_date.month)[1]
#shift along to the next month as one of the months we will have to see. We don't care that the exact date
#is the 1st in each subsequent date range as we only care about the year and the month
start_date = start_date + datetime.timedelta(days=days_in_month - start_date.day + 1)
dates.append(start_date)
return dates
def _get_weekly_date_range(self, metric_date, delta):
"""
Gets the range of years that we need to use as keys to get metrics from redis.
"""
dates = [metric_date]
end_date = metric_date + delta
#Figure out how many years our metric range spans
spanning_years = end_date.year - metric_date.year
for i in range(spanning_years):
#for the weekly keys, we only care about the year
dates.append(
datetime.date(
year=metric_date.year + (i + 1), month=1, day=1))
return dates
def _parse_and_process_metrics(self, series, list_of_metrics):
formatted_result_list = []
series = [dt.strftime("%Y-%m-%d") for dt in series]
for result in list_of_metrics:
values = {}
for index, date_string in enumerate(series):
values[date_string] = int(result[index]) if result[index] is not None else 0
formatted_result_list.append(values)
merged_values = reduce(
lambda a, b: dict((n, a.get(n, 0) + b.get(n, 0)) for n in set(a) | set(b)),
formatted_result_list)
return set(series), merged_values
def _num_weeks(self, start_date, end_date):
closest_monday = self._get_closest_week(start_date)
return ((end_date - closest_monday).days / 7) + 1
def _num_months(self, start_date, end_date):
return ((end_date.year - start_date.year) * 12) + (end_date.month - start_date.month) + 1
def clear_all(self):
"""
Deletes all ``sandsnake`` related data from redis.
.. warning::
Very expensive and destructive operation. Use with causion
"""
keys = self._analytics_backend.keys()
for key in itertools.chain(*keys):
with self._analytics_backend.map() as conn:
if key.startswith(self._prefix):
conn.delete(key)
def track_count(self, unique_identifier, metric, inc_amt=1, **kwargs):
"""
Tracks a metric just by count. If you track a metric this way, you won't be able
to query the metric by day, week or month.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
return self._analytics_backend.incr(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric), inc_amt)
def get_metric_by_day(self, unique_identifier, metric, from_date, limit=30, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by day
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of days to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
date_generator = (from_date + datetime.timedelta(days=i) for i in itertools.count())
metric_key_date_range = self._get_daily_date_range(from_date, datetime.timedelta(days=limit))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_daily_metric_name(metric, daily_date) for daily_date in series]
metric_func = lambda conn: [conn.hmget(self._get_daily_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_week(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by week
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of weeks to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
closest_monday_from_date = self._get_closest_week(from_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=limit))
date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_weekly_metric_name(metric, monday_date) for monday_date in series]
metric_func = lambda conn: [conn.hmget(self._get_weekly_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_month(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by month
starting from``from_date``. It will retrieve metrics data starting from the 1st of the
month specified in ``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
first_of_month = datetime.date(year=from_date.year, month=from_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=limit))
date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_monthly_metric_name(metric, month_date) for month_date in series]
metric_func = lambda conn: [conn.hmget(
self._get_weekly_metric_key(
unique_identifier, metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metrics(self, metric_identifiers, from_date, limit=10, group_by="week", **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
:param group_by: The type of aggregation to perform on the metric. Choices are: ``day``, ``week`` or ``month``
"""
results = []
#validation of types:
allowed_types = {
"day": self.get_metric_by_day,
"week": self.get_metric_by_week,
"month": self.get_metric_by_month,
}
if group_by.lower() not in allowed_types:
raise Exception("Allowed values for group_by are day, week or month.")
group_by_func = allowed_types[group_by.lower()]
#pass a connection object so we can pipeline as much as possible
with self._analytics_backend.map() as conn:
for unique_identifier, metric in metric_identifiers:
results.append(group_by_func(unique_identifier, metric, from_date, limit=limit, connection=conn))
#we have to merge all the metric results afterwards because we are using a custom context processor
return [
self._parse_and_process_metrics(series, list_of_metrics) for
series, list_of_metrics in results]
def get_count(self, unique_identifier, metric, start_date=None, end_date=None, **kwargs):
"""
Gets the count for the ``metric`` for ``unique_identifier``. You can specify a ``start_date``
and an ``end_date``, to only get metrics within that time range.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Get the specified metrics after this date
:param end_date: Get the sepcified metrics before this date
:return: The count for the metric, 0 otherwise
"""
result = None
if start_date and end_date:
start_date, end_date = (start_date, end_date,) if start_date < end_date else (end_date, start_date,)
start_date = start_date if hasattr(start_date, 'date') else datetime.datetime.combine(start_date, datetime.time())
end_date = end_date if hasattr(end_date, 'date') else datetime.datetime.combine(end_date, datetime.time())
monthly_metrics_dates = list(rrule.rrule(rrule.MONTHLY, dtstart=start_date, bymonthday=1, until=end_date))
#We can sorta optimize this by getting most of the data by month
if len(monthly_metrics_dates) >= 3:
with self._analytics_backend.map() as conn:
monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results = self._get_counts(
conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date)
monthly_metric_series, monthly_metric_results = self._parse_and_process_metrics(monthly_metric_series, monthly_metric_results)
starting_metric_series, starting_metric_results = self._parse_and_process_metrics(starting_metric_series, starting_metric_results)
ending_metric_series, ending_metric_results = self._parse_and_process_metrics(ending_metric_series, ending_metric_results)
result = sum(monthly_metric_results.values()) + sum(starting_metric_results.values()) + sum(ending_metric_results.values())
else:
diff = end_date - start_date
metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=diff.days + 1)
result = sum(metric_results[1].values())
else:
try:
result = int(self._analytics_backend.get(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric,)))
except TypeError:
result = 0
return result
def get_counts(self, metric_identifiers, **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
"""
parsed_results = []
results = [
self.get_count(unique_identifier, metric, **kwargs) for
unique_identifier, metric in metric_identifiers]
for result in results:
try:
parsed_result = int(result)
except TypeError:
parsed_result = 0
parsed_results.append(parsed_result)
return parsed_results
def set_metric_by_day(self, unique_identifier, metric, date, count, sync_agg=True, update_counter=True):
"""
Sets the count for the ``metric`` for ``unique_identifier``.
You must specify a ``date`` for the ``count`` to be set on. Useful for resetting a metric count to 0 or decrementing a metric.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param date: Sets the specified metrics for this date
:param count: Sets the sepcified metrics to value of count
:param sync_agg: Boolean used to determine if week and month metrics should be updated
:param update_counter: Boolean used to determine if overall counter should be updated
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
if update_counter: # updates overall counter for metric
overall_count = self.get_count(uid, single_metric)
day, daily_count = self.get_metric_by_day(uid, single_metric, date, 1)[1].popitem()
self._analytics_backend.set(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), overall_count + (count - daily_count))
results.append([conn.hset(hash_key_daily, daily_metric_name, count)])
if sync_agg:
self.sync_agg_metric(unique_identifier, metric, date, date)
return results
def sync_agg_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the associated weeks and months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
self.sync_week_metric(unique_identifier, metric, start_date, end_date)
self.sync_month_metric(unique_identifier, metric, start_date, end_date)
def sync_week_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the weeks for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month
after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
closest_monday_from_date = self._get_closest_week(start_date)
num_weeks = self._num_weeks(start_date, end_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=num_weeks))
week_date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
weeks_to_update = list(itertools.islice(week_date_generator, num_weeks))
for uid in unique_identifier:
for single_metric in metric:
for week in weeks_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=week, limit=7)
week_counter = sum([value for key, value in series_results.items()])
hash_key_weekly = self._get_weekly_metric_key(uid, week)
weekly_metric_name = self._get_weekly_metric_name(single_metric, week)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_weekly, weekly_metric_name, week_counter)
def sync_month_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
num_months = self._num_months(start_date, end_date)
first_of_month = datetime.date(year=start_date.year, month=start_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=num_months))
month_date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
months_to_update = list(itertools.islice(month_date_generator, num_months))
for uid in unique_identifier:
for single_metric in metric:
for month in months_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=month, limit=monthrange(month.year, month.month)[1])
month_counter = sum([value for key, value in series_results.items()])
hash_key_monthly = self._get_weekly_metric_key(uid, month)
monthly_metric_name = self._get_monthly_metric_name(single_metric, month)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_monthly, monthly_metric_name, month_counter)
def _get_counts(self, conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date):
start_diff = monthly_metrics_dates[0] - start_date
end_diff = end_date - monthly_metrics_dates[-1]
monthly_metric_series, monthly_metric_results = self.get_metric_by_month(unique_identifier, metric, monthly_metrics_dates[0], limit=len(monthly_metrics_dates) - 1, connection=conn)
#get the difference from the date to the start date and get all dates in between
starting_metric_series, starting_metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=start_diff.days, connection=conn) if start_diff.days > 0 else ([], [[]],)
ending_metric_series, ending_metric_results = self.get_metric_by_day(unique_identifier, metric, monthly_metrics_dates[-1], limit=end_diff.days + 1, connection=conn)
return monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results
|
numan/py-analytics
|
analytics/backends/redis.py
|
Redis.get_metric_by_day
|
python
|
def get_metric_by_day(self, unique_identifier, metric, from_date, limit=30, **kwargs):
conn = kwargs.get("connection", None)
date_generator = (from_date + datetime.timedelta(days=i) for i in itertools.count())
metric_key_date_range = self._get_daily_date_range(from_date, datetime.timedelta(days=limit))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_daily_metric_name(metric, daily_date) for daily_date in series]
metric_func = lambda conn: [conn.hmget(self._get_daily_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
|
Returns the ``metric`` for ``unique_identifier`` segmented by day
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of days to retrive starting from ``from_date``
|
train
|
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L218-L246
|
[
"def _get_daily_date_range(self, metric_date, delta):\n \"\"\"\n Get the range of months that we need to use as keys to scan redis.\n \"\"\"\n dates = [metric_date]\n start_date = metric_date\n end_date = metric_date + delta\n\n while start_date.month < end_date.month or start_date.year < end_date.year:\n days_in_month = calendar.monthrange(start_date.year, start_date.month)[1]\n #shift along to the next month as one of the months we will have to see. We don't care that the exact date\n #is the 1st in each subsequent date range as we only care about the year and the month\n start_date = start_date + datetime.timedelta(days=days_in_month - start_date.day + 1)\n dates.append(start_date)\n\n return dates\n",
"def _parse_and_process_metrics(self, series, list_of_metrics):\n formatted_result_list = []\n series = [dt.strftime(\"%Y-%m-%d\") for dt in series]\n for result in list_of_metrics:\n values = {}\n for index, date_string in enumerate(series):\n values[date_string] = int(result[index]) if result[index] is not None else 0\n formatted_result_list.append(values)\n\n merged_values = reduce(\n lambda a, b: dict((n, a.get(n, 0) + b.get(n, 0)) for n in set(a) | set(b)),\n formatted_result_list)\n\n return set(series), merged_values\n",
"metric_func = lambda conn: [conn.hmget(self._get_daily_metric_key(unique_identifier, \\\n metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]\n"
] |
class Redis(BaseAnalyticsBackend):
def __init__(self, settings, **kwargs):
nydus_hosts = {}
hosts = settings.get("hosts", [])
if not hosts:
raise Exception("No redis hosts specified")
for i, host in enumerate(hosts):
nydus_hosts[i] = host
defaults = settings.get(
"defaults",
{
'host': 'localhost',
'port': 6379,
})
self._analytics_backend = create_cluster({
'engine': 'nydus.db.backends.redis.Redis',
'router': 'nydus.db.routers.keyvalue.ConsistentHashingRouter',
'hosts': nydus_hosts,
'defaults': defaults,
})
super(Redis, self).__init__(settings, **kwargs)
def _get_closest_week(self, metric_date):
"""
Gets the closest monday to the date provided.
"""
#find the offset to the closest monday
days_after_monday = metric_date.isoweekday() - 1
return metric_date - datetime.timedelta(days=days_after_monday)
def _get_daily_metric_key(self, unique_identifier, metric_date):
"""
Redis key for daily metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y-%m"),)
def _get_weekly_metric_key(self, unique_identifier, metric_date):
"""
Redis key for weekly metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y"),)
def _get_daily_metric_name(self, metric, metric_date):
"""
Hash key for daily metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_weekly_metric_name(self, metric, metric_date):
"""
Hash key for weekly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_monthly_metric_name(self, metric, metric_date):
"""
Hash key for monthly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m"),)
def _get_daily_date_range(self, metric_date, delta):
"""
Get the range of months that we need to use as keys to scan redis.
"""
dates = [metric_date]
start_date = metric_date
end_date = metric_date + delta
while start_date.month < end_date.month or start_date.year < end_date.year:
days_in_month = calendar.monthrange(start_date.year, start_date.month)[1]
#shift along to the next month as one of the months we will have to see. We don't care that the exact date
#is the 1st in each subsequent date range as we only care about the year and the month
start_date = start_date + datetime.timedelta(days=days_in_month - start_date.day + 1)
dates.append(start_date)
return dates
def _get_weekly_date_range(self, metric_date, delta):
"""
Gets the range of years that we need to use as keys to get metrics from redis.
"""
dates = [metric_date]
end_date = metric_date + delta
#Figure out how many years our metric range spans
spanning_years = end_date.year - metric_date.year
for i in range(spanning_years):
#for the weekly keys, we only care about the year
dates.append(
datetime.date(
year=metric_date.year + (i + 1), month=1, day=1))
return dates
def _parse_and_process_metrics(self, series, list_of_metrics):
formatted_result_list = []
series = [dt.strftime("%Y-%m-%d") for dt in series]
for result in list_of_metrics:
values = {}
for index, date_string in enumerate(series):
values[date_string] = int(result[index]) if result[index] is not None else 0
formatted_result_list.append(values)
merged_values = reduce(
lambda a, b: dict((n, a.get(n, 0) + b.get(n, 0)) for n in set(a) | set(b)),
formatted_result_list)
return set(series), merged_values
def _num_weeks(self, start_date, end_date):
closest_monday = self._get_closest_week(start_date)
return ((end_date - closest_monday).days / 7) + 1
def _num_months(self, start_date, end_date):
return ((end_date.year - start_date.year) * 12) + (end_date.month - start_date.month) + 1
def clear_all(self):
"""
Deletes all ``sandsnake`` related data from redis.
.. warning::
Very expensive and destructive operation. Use with causion
"""
keys = self._analytics_backend.keys()
for key in itertools.chain(*keys):
with self._analytics_backend.map() as conn:
if key.startswith(self._prefix):
conn.delete(key)
def track_count(self, unique_identifier, metric, inc_amt=1, **kwargs):
"""
Tracks a metric just by count. If you track a metric this way, you won't be able
to query the metric by day, week or month.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
return self._analytics_backend.incr(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric), inc_amt)
def track_metric(self, unique_identifier, metric, date=None, inc_amt=1, **kwargs):
"""
Tracks a metric for a specific ``unique_identifier`` for a certain date. The redis backend supports
lists for both ``unique_identifier`` and ``metric`` allowing for tracking of multiple metrics for multiple
unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track. This can be a list or a string.
:param date: A python date object indicating when this event occured. Defaults to today.
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
if date is None:
date = datetime.date.today()
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
closest_monday = self._get_closest_week(date)
hash_key_weekly = self._get_weekly_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
weekly_metric_name = self._get_weekly_metric_name(single_metric, closest_monday)
monthly_metric_name = self._get_monthly_metric_name(single_metric, date)
results.append(
[
conn.hincrby(hash_key_daily, daily_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, weekly_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, monthly_metric_name, inc_amt),
conn.incr(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), inc_amt)
]
)
return results
def get_metric_by_week(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by week
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of weeks to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
closest_monday_from_date = self._get_closest_week(from_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=limit))
date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_weekly_metric_name(metric, monday_date) for monday_date in series]
metric_func = lambda conn: [conn.hmget(self._get_weekly_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_month(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by month
starting from``from_date``. It will retrieve metrics data starting from the 1st of the
month specified in ``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
first_of_month = datetime.date(year=from_date.year, month=from_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=limit))
date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_monthly_metric_name(metric, month_date) for month_date in series]
metric_func = lambda conn: [conn.hmget(
self._get_weekly_metric_key(
unique_identifier, metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metrics(self, metric_identifiers, from_date, limit=10, group_by="week", **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
:param group_by: The type of aggregation to perform on the metric. Choices are: ``day``, ``week`` or ``month``
"""
results = []
#validation of types:
allowed_types = {
"day": self.get_metric_by_day,
"week": self.get_metric_by_week,
"month": self.get_metric_by_month,
}
if group_by.lower() not in allowed_types:
raise Exception("Allowed values for group_by are day, week or month.")
group_by_func = allowed_types[group_by.lower()]
#pass a connection object so we can pipeline as much as possible
with self._analytics_backend.map() as conn:
for unique_identifier, metric in metric_identifiers:
results.append(group_by_func(unique_identifier, metric, from_date, limit=limit, connection=conn))
#we have to merge all the metric results afterwards because we are using a custom context processor
return [
self._parse_and_process_metrics(series, list_of_metrics) for
series, list_of_metrics in results]
def get_count(self, unique_identifier, metric, start_date=None, end_date=None, **kwargs):
"""
Gets the count for the ``metric`` for ``unique_identifier``. You can specify a ``start_date``
and an ``end_date``, to only get metrics within that time range.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Get the specified metrics after this date
:param end_date: Get the sepcified metrics before this date
:return: The count for the metric, 0 otherwise
"""
result = None
if start_date and end_date:
start_date, end_date = (start_date, end_date,) if start_date < end_date else (end_date, start_date,)
start_date = start_date if hasattr(start_date, 'date') else datetime.datetime.combine(start_date, datetime.time())
end_date = end_date if hasattr(end_date, 'date') else datetime.datetime.combine(end_date, datetime.time())
monthly_metrics_dates = list(rrule.rrule(rrule.MONTHLY, dtstart=start_date, bymonthday=1, until=end_date))
#We can sorta optimize this by getting most of the data by month
if len(monthly_metrics_dates) >= 3:
with self._analytics_backend.map() as conn:
monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results = self._get_counts(
conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date)
monthly_metric_series, monthly_metric_results = self._parse_and_process_metrics(monthly_metric_series, monthly_metric_results)
starting_metric_series, starting_metric_results = self._parse_and_process_metrics(starting_metric_series, starting_metric_results)
ending_metric_series, ending_metric_results = self._parse_and_process_metrics(ending_metric_series, ending_metric_results)
result = sum(monthly_metric_results.values()) + sum(starting_metric_results.values()) + sum(ending_metric_results.values())
else:
diff = end_date - start_date
metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=diff.days + 1)
result = sum(metric_results[1].values())
else:
try:
result = int(self._analytics_backend.get(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric,)))
except TypeError:
result = 0
return result
def get_counts(self, metric_identifiers, **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
"""
parsed_results = []
results = [
self.get_count(unique_identifier, metric, **kwargs) for
unique_identifier, metric in metric_identifiers]
for result in results:
try:
parsed_result = int(result)
except TypeError:
parsed_result = 0
parsed_results.append(parsed_result)
return parsed_results
def set_metric_by_day(self, unique_identifier, metric, date, count, sync_agg=True, update_counter=True):
"""
Sets the count for the ``metric`` for ``unique_identifier``.
You must specify a ``date`` for the ``count`` to be set on. Useful for resetting a metric count to 0 or decrementing a metric.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param date: Sets the specified metrics for this date
:param count: Sets the sepcified metrics to value of count
:param sync_agg: Boolean used to determine if week and month metrics should be updated
:param update_counter: Boolean used to determine if overall counter should be updated
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
if update_counter: # updates overall counter for metric
overall_count = self.get_count(uid, single_metric)
day, daily_count = self.get_metric_by_day(uid, single_metric, date, 1)[1].popitem()
self._analytics_backend.set(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), overall_count + (count - daily_count))
results.append([conn.hset(hash_key_daily, daily_metric_name, count)])
if sync_agg:
self.sync_agg_metric(unique_identifier, metric, date, date)
return results
def sync_agg_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the associated weeks and months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
self.sync_week_metric(unique_identifier, metric, start_date, end_date)
self.sync_month_metric(unique_identifier, metric, start_date, end_date)
def sync_week_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the weeks for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month
after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
closest_monday_from_date = self._get_closest_week(start_date)
num_weeks = self._num_weeks(start_date, end_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=num_weeks))
week_date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
weeks_to_update = list(itertools.islice(week_date_generator, num_weeks))
for uid in unique_identifier:
for single_metric in metric:
for week in weeks_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=week, limit=7)
week_counter = sum([value for key, value in series_results.items()])
hash_key_weekly = self._get_weekly_metric_key(uid, week)
weekly_metric_name = self._get_weekly_metric_name(single_metric, week)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_weekly, weekly_metric_name, week_counter)
def sync_month_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
num_months = self._num_months(start_date, end_date)
first_of_month = datetime.date(year=start_date.year, month=start_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=num_months))
month_date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
months_to_update = list(itertools.islice(month_date_generator, num_months))
for uid in unique_identifier:
for single_metric in metric:
for month in months_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=month, limit=monthrange(month.year, month.month)[1])
month_counter = sum([value for key, value in series_results.items()])
hash_key_monthly = self._get_weekly_metric_key(uid, month)
monthly_metric_name = self._get_monthly_metric_name(single_metric, month)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_monthly, monthly_metric_name, month_counter)
def _get_counts(self, conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date):
start_diff = monthly_metrics_dates[0] - start_date
end_diff = end_date - monthly_metrics_dates[-1]
monthly_metric_series, monthly_metric_results = self.get_metric_by_month(unique_identifier, metric, monthly_metrics_dates[0], limit=len(monthly_metrics_dates) - 1, connection=conn)
#get the difference from the date to the start date and get all dates in between
starting_metric_series, starting_metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=start_diff.days, connection=conn) if start_diff.days > 0 else ([], [[]],)
ending_metric_series, ending_metric_results = self.get_metric_by_day(unique_identifier, metric, monthly_metrics_dates[-1], limit=end_diff.days + 1, connection=conn)
return monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results
|
numan/py-analytics
|
analytics/backends/redis.py
|
Redis.get_metric_by_week
|
python
|
def get_metric_by_week(self, unique_identifier, metric, from_date, limit=10, **kwargs):
conn = kwargs.get("connection", None)
closest_monday_from_date = self._get_closest_week(from_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=limit))
date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_weekly_metric_name(metric, monday_date) for monday_date in series]
metric_func = lambda conn: [conn.hmget(self._get_weekly_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
|
Returns the ``metric`` for ``unique_identifier`` segmented by week
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of weeks to retrive starting from ``from_date``
|
train
|
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L248-L278
|
[
"def _get_closest_week(self, metric_date):\n \"\"\"\n Gets the closest monday to the date provided.\n \"\"\"\n #find the offset to the closest monday\n days_after_monday = metric_date.isoweekday() - 1\n\n return metric_date - datetime.timedelta(days=days_after_monday)\n",
"def _get_weekly_date_range(self, metric_date, delta):\n \"\"\"\n Gets the range of years that we need to use as keys to get metrics from redis.\n \"\"\"\n dates = [metric_date]\n end_date = metric_date + delta\n #Figure out how many years our metric range spans\n spanning_years = end_date.year - metric_date.year\n for i in range(spanning_years):\n #for the weekly keys, we only care about the year\n dates.append(\n datetime.date(\n year=metric_date.year + (i + 1), month=1, day=1))\n return dates\n",
"def _parse_and_process_metrics(self, series, list_of_metrics):\n formatted_result_list = []\n series = [dt.strftime(\"%Y-%m-%d\") for dt in series]\n for result in list_of_metrics:\n values = {}\n for index, date_string in enumerate(series):\n values[date_string] = int(result[index]) if result[index] is not None else 0\n formatted_result_list.append(values)\n\n merged_values = reduce(\n lambda a, b: dict((n, a.get(n, 0) + b.get(n, 0)) for n in set(a) | set(b)),\n formatted_result_list)\n\n return set(series), merged_values\n",
"metric_func = lambda conn: [conn.hmget(self._get_weekly_metric_key(unique_identifier, \\\n metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]\n"
] |
class Redis(BaseAnalyticsBackend):
def __init__(self, settings, **kwargs):
nydus_hosts = {}
hosts = settings.get("hosts", [])
if not hosts:
raise Exception("No redis hosts specified")
for i, host in enumerate(hosts):
nydus_hosts[i] = host
defaults = settings.get(
"defaults",
{
'host': 'localhost',
'port': 6379,
})
self._analytics_backend = create_cluster({
'engine': 'nydus.db.backends.redis.Redis',
'router': 'nydus.db.routers.keyvalue.ConsistentHashingRouter',
'hosts': nydus_hosts,
'defaults': defaults,
})
super(Redis, self).__init__(settings, **kwargs)
def _get_closest_week(self, metric_date):
"""
Gets the closest monday to the date provided.
"""
#find the offset to the closest monday
days_after_monday = metric_date.isoweekday() - 1
return metric_date - datetime.timedelta(days=days_after_monday)
def _get_daily_metric_key(self, unique_identifier, metric_date):
"""
Redis key for daily metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y-%m"),)
def _get_weekly_metric_key(self, unique_identifier, metric_date):
"""
Redis key for weekly metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y"),)
def _get_daily_metric_name(self, metric, metric_date):
"""
Hash key for daily metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_weekly_metric_name(self, metric, metric_date):
"""
Hash key for weekly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_monthly_metric_name(self, metric, metric_date):
"""
Hash key for monthly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m"),)
def _get_daily_date_range(self, metric_date, delta):
"""
Get the range of months that we need to use as keys to scan redis.
"""
dates = [metric_date]
start_date = metric_date
end_date = metric_date + delta
while start_date.month < end_date.month or start_date.year < end_date.year:
days_in_month = calendar.monthrange(start_date.year, start_date.month)[1]
#shift along to the next month as one of the months we will have to see. We don't care that the exact date
#is the 1st in each subsequent date range as we only care about the year and the month
start_date = start_date + datetime.timedelta(days=days_in_month - start_date.day + 1)
dates.append(start_date)
return dates
def _get_weekly_date_range(self, metric_date, delta):
"""
Gets the range of years that we need to use as keys to get metrics from redis.
"""
dates = [metric_date]
end_date = metric_date + delta
#Figure out how many years our metric range spans
spanning_years = end_date.year - metric_date.year
for i in range(spanning_years):
#for the weekly keys, we only care about the year
dates.append(
datetime.date(
year=metric_date.year + (i + 1), month=1, day=1))
return dates
def _parse_and_process_metrics(self, series, list_of_metrics):
formatted_result_list = []
series = [dt.strftime("%Y-%m-%d") for dt in series]
for result in list_of_metrics:
values = {}
for index, date_string in enumerate(series):
values[date_string] = int(result[index]) if result[index] is not None else 0
formatted_result_list.append(values)
merged_values = reduce(
lambda a, b: dict((n, a.get(n, 0) + b.get(n, 0)) for n in set(a) | set(b)),
formatted_result_list)
return set(series), merged_values
def _num_weeks(self, start_date, end_date):
closest_monday = self._get_closest_week(start_date)
return ((end_date - closest_monday).days / 7) + 1
def _num_months(self, start_date, end_date):
return ((end_date.year - start_date.year) * 12) + (end_date.month - start_date.month) + 1
def clear_all(self):
"""
Deletes all ``sandsnake`` related data from redis.
.. warning::
Very expensive and destructive operation. Use with causion
"""
keys = self._analytics_backend.keys()
for key in itertools.chain(*keys):
with self._analytics_backend.map() as conn:
if key.startswith(self._prefix):
conn.delete(key)
def track_count(self, unique_identifier, metric, inc_amt=1, **kwargs):
"""
Tracks a metric just by count. If you track a metric this way, you won't be able
to query the metric by day, week or month.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
return self._analytics_backend.incr(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric), inc_amt)
def track_metric(self, unique_identifier, metric, date=None, inc_amt=1, **kwargs):
"""
Tracks a metric for a specific ``unique_identifier`` for a certain date. The redis backend supports
lists for both ``unique_identifier`` and ``metric`` allowing for tracking of multiple metrics for multiple
unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track. This can be a list or a string.
:param date: A python date object indicating when this event occured. Defaults to today.
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
if date is None:
date = datetime.date.today()
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
closest_monday = self._get_closest_week(date)
hash_key_weekly = self._get_weekly_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
weekly_metric_name = self._get_weekly_metric_name(single_metric, closest_monday)
monthly_metric_name = self._get_monthly_metric_name(single_metric, date)
results.append(
[
conn.hincrby(hash_key_daily, daily_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, weekly_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, monthly_metric_name, inc_amt),
conn.incr(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), inc_amt)
]
)
return results
def get_metric_by_day(self, unique_identifier, metric, from_date, limit=30, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by day
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of days to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
date_generator = (from_date + datetime.timedelta(days=i) for i in itertools.count())
metric_key_date_range = self._get_daily_date_range(from_date, datetime.timedelta(days=limit))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_daily_metric_name(metric, daily_date) for daily_date in series]
metric_func = lambda conn: [conn.hmget(self._get_daily_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_month(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by month
starting from``from_date``. It will retrieve metrics data starting from the 1st of the
month specified in ``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
first_of_month = datetime.date(year=from_date.year, month=from_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=limit))
date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_monthly_metric_name(metric, month_date) for month_date in series]
metric_func = lambda conn: [conn.hmget(
self._get_weekly_metric_key(
unique_identifier, metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metrics(self, metric_identifiers, from_date, limit=10, group_by="week", **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
:param group_by: The type of aggregation to perform on the metric. Choices are: ``day``, ``week`` or ``month``
"""
results = []
#validation of types:
allowed_types = {
"day": self.get_metric_by_day,
"week": self.get_metric_by_week,
"month": self.get_metric_by_month,
}
if group_by.lower() not in allowed_types:
raise Exception("Allowed values for group_by are day, week or month.")
group_by_func = allowed_types[group_by.lower()]
#pass a connection object so we can pipeline as much as possible
with self._analytics_backend.map() as conn:
for unique_identifier, metric in metric_identifiers:
results.append(group_by_func(unique_identifier, metric, from_date, limit=limit, connection=conn))
#we have to merge all the metric results afterwards because we are using a custom context processor
return [
self._parse_and_process_metrics(series, list_of_metrics) for
series, list_of_metrics in results]
def get_count(self, unique_identifier, metric, start_date=None, end_date=None, **kwargs):
"""
Gets the count for the ``metric`` for ``unique_identifier``. You can specify a ``start_date``
and an ``end_date``, to only get metrics within that time range.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Get the specified metrics after this date
:param end_date: Get the sepcified metrics before this date
:return: The count for the metric, 0 otherwise
"""
result = None
if start_date and end_date:
start_date, end_date = (start_date, end_date,) if start_date < end_date else (end_date, start_date,)
start_date = start_date if hasattr(start_date, 'date') else datetime.datetime.combine(start_date, datetime.time())
end_date = end_date if hasattr(end_date, 'date') else datetime.datetime.combine(end_date, datetime.time())
monthly_metrics_dates = list(rrule.rrule(rrule.MONTHLY, dtstart=start_date, bymonthday=1, until=end_date))
#We can sorta optimize this by getting most of the data by month
if len(monthly_metrics_dates) >= 3:
with self._analytics_backend.map() as conn:
monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results = self._get_counts(
conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date)
monthly_metric_series, monthly_metric_results = self._parse_and_process_metrics(monthly_metric_series, monthly_metric_results)
starting_metric_series, starting_metric_results = self._parse_and_process_metrics(starting_metric_series, starting_metric_results)
ending_metric_series, ending_metric_results = self._parse_and_process_metrics(ending_metric_series, ending_metric_results)
result = sum(monthly_metric_results.values()) + sum(starting_metric_results.values()) + sum(ending_metric_results.values())
else:
diff = end_date - start_date
metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=diff.days + 1)
result = sum(metric_results[1].values())
else:
try:
result = int(self._analytics_backend.get(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric,)))
except TypeError:
result = 0
return result
def get_counts(self, metric_identifiers, **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
"""
parsed_results = []
results = [
self.get_count(unique_identifier, metric, **kwargs) for
unique_identifier, metric in metric_identifiers]
for result in results:
try:
parsed_result = int(result)
except TypeError:
parsed_result = 0
parsed_results.append(parsed_result)
return parsed_results
def set_metric_by_day(self, unique_identifier, metric, date, count, sync_agg=True, update_counter=True):
"""
Sets the count for the ``metric`` for ``unique_identifier``.
You must specify a ``date`` for the ``count`` to be set on. Useful for resetting a metric count to 0 or decrementing a metric.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param date: Sets the specified metrics for this date
:param count: Sets the sepcified metrics to value of count
:param sync_agg: Boolean used to determine if week and month metrics should be updated
:param update_counter: Boolean used to determine if overall counter should be updated
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
if update_counter: # updates overall counter for metric
overall_count = self.get_count(uid, single_metric)
day, daily_count = self.get_metric_by_day(uid, single_metric, date, 1)[1].popitem()
self._analytics_backend.set(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), overall_count + (count - daily_count))
results.append([conn.hset(hash_key_daily, daily_metric_name, count)])
if sync_agg:
self.sync_agg_metric(unique_identifier, metric, date, date)
return results
def sync_agg_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the associated weeks and months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
self.sync_week_metric(unique_identifier, metric, start_date, end_date)
self.sync_month_metric(unique_identifier, metric, start_date, end_date)
def sync_week_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the weeks for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month
after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
closest_monday_from_date = self._get_closest_week(start_date)
num_weeks = self._num_weeks(start_date, end_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=num_weeks))
week_date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
weeks_to_update = list(itertools.islice(week_date_generator, num_weeks))
for uid in unique_identifier:
for single_metric in metric:
for week in weeks_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=week, limit=7)
week_counter = sum([value for key, value in series_results.items()])
hash_key_weekly = self._get_weekly_metric_key(uid, week)
weekly_metric_name = self._get_weekly_metric_name(single_metric, week)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_weekly, weekly_metric_name, week_counter)
def sync_month_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
num_months = self._num_months(start_date, end_date)
first_of_month = datetime.date(year=start_date.year, month=start_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=num_months))
month_date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
months_to_update = list(itertools.islice(month_date_generator, num_months))
for uid in unique_identifier:
for single_metric in metric:
for month in months_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=month, limit=monthrange(month.year, month.month)[1])
month_counter = sum([value for key, value in series_results.items()])
hash_key_monthly = self._get_weekly_metric_key(uid, month)
monthly_metric_name = self._get_monthly_metric_name(single_metric, month)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_monthly, monthly_metric_name, month_counter)
def _get_counts(self, conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date):
start_diff = monthly_metrics_dates[0] - start_date
end_diff = end_date - monthly_metrics_dates[-1]
monthly_metric_series, monthly_metric_results = self.get_metric_by_month(unique_identifier, metric, monthly_metrics_dates[0], limit=len(monthly_metrics_dates) - 1, connection=conn)
#get the difference from the date to the start date and get all dates in between
starting_metric_series, starting_metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=start_diff.days, connection=conn) if start_diff.days > 0 else ([], [[]],)
ending_metric_series, ending_metric_results = self.get_metric_by_day(unique_identifier, metric, monthly_metrics_dates[-1], limit=end_diff.days + 1, connection=conn)
return monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results
|
numan/py-analytics
|
analytics/backends/redis.py
|
Redis.get_metric_by_month
|
python
|
def get_metric_by_month(self, unique_identifier, metric, from_date, limit=10, **kwargs):
conn = kwargs.get("connection", None)
first_of_month = datetime.date(year=from_date.year, month=from_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=limit))
date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_monthly_metric_name(metric, month_date) for month_date in series]
metric_func = lambda conn: [conn.hmget(
self._get_weekly_metric_key(
unique_identifier, metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
|
Returns the ``metric`` for ``unique_identifier`` segmented by month
starting from``from_date``. It will retrieve metrics data starting from the 1st of the
month specified in ``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
|
train
|
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L280-L313
|
[
"def _get_weekly_date_range(self, metric_date, delta):\n \"\"\"\n Gets the range of years that we need to use as keys to get metrics from redis.\n \"\"\"\n dates = [metric_date]\n end_date = metric_date + delta\n #Figure out how many years our metric range spans\n spanning_years = end_date.year - metric_date.year\n for i in range(spanning_years):\n #for the weekly keys, we only care about the year\n dates.append(\n datetime.date(\n year=metric_date.year + (i + 1), month=1, day=1))\n return dates\n",
"def _parse_and_process_metrics(self, series, list_of_metrics):\n formatted_result_list = []\n series = [dt.strftime(\"%Y-%m-%d\") for dt in series]\n for result in list_of_metrics:\n values = {}\n for index, date_string in enumerate(series):\n values[date_string] = int(result[index]) if result[index] is not None else 0\n formatted_result_list.append(values)\n\n merged_values = reduce(\n lambda a, b: dict((n, a.get(n, 0) + b.get(n, 0)) for n in set(a) | set(b)),\n formatted_result_list)\n\n return set(series), merged_values\n",
"metric_func = lambda conn: [conn.hmget(\n self._get_weekly_metric_key(\n unique_identifier, metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]\n"
] |
class Redis(BaseAnalyticsBackend):
def __init__(self, settings, **kwargs):
nydus_hosts = {}
hosts = settings.get("hosts", [])
if not hosts:
raise Exception("No redis hosts specified")
for i, host in enumerate(hosts):
nydus_hosts[i] = host
defaults = settings.get(
"defaults",
{
'host': 'localhost',
'port': 6379,
})
self._analytics_backend = create_cluster({
'engine': 'nydus.db.backends.redis.Redis',
'router': 'nydus.db.routers.keyvalue.ConsistentHashingRouter',
'hosts': nydus_hosts,
'defaults': defaults,
})
super(Redis, self).__init__(settings, **kwargs)
def _get_closest_week(self, metric_date):
"""
Gets the closest monday to the date provided.
"""
#find the offset to the closest monday
days_after_monday = metric_date.isoweekday() - 1
return metric_date - datetime.timedelta(days=days_after_monday)
def _get_daily_metric_key(self, unique_identifier, metric_date):
"""
Redis key for daily metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y-%m"),)
def _get_weekly_metric_key(self, unique_identifier, metric_date):
"""
Redis key for weekly metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y"),)
def _get_daily_metric_name(self, metric, metric_date):
"""
Hash key for daily metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_weekly_metric_name(self, metric, metric_date):
"""
Hash key for weekly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_monthly_metric_name(self, metric, metric_date):
"""
Hash key for monthly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m"),)
def _get_daily_date_range(self, metric_date, delta):
"""
Get the range of months that we need to use as keys to scan redis.
"""
dates = [metric_date]
start_date = metric_date
end_date = metric_date + delta
while start_date.month < end_date.month or start_date.year < end_date.year:
days_in_month = calendar.monthrange(start_date.year, start_date.month)[1]
#shift along to the next month as one of the months we will have to see. We don't care that the exact date
#is the 1st in each subsequent date range as we only care about the year and the month
start_date = start_date + datetime.timedelta(days=days_in_month - start_date.day + 1)
dates.append(start_date)
return dates
def _get_weekly_date_range(self, metric_date, delta):
"""
Gets the range of years that we need to use as keys to get metrics from redis.
"""
dates = [metric_date]
end_date = metric_date + delta
#Figure out how many years our metric range spans
spanning_years = end_date.year - metric_date.year
for i in range(spanning_years):
#for the weekly keys, we only care about the year
dates.append(
datetime.date(
year=metric_date.year + (i + 1), month=1, day=1))
return dates
def _parse_and_process_metrics(self, series, list_of_metrics):
formatted_result_list = []
series = [dt.strftime("%Y-%m-%d") for dt in series]
for result in list_of_metrics:
values = {}
for index, date_string in enumerate(series):
values[date_string] = int(result[index]) if result[index] is not None else 0
formatted_result_list.append(values)
merged_values = reduce(
lambda a, b: dict((n, a.get(n, 0) + b.get(n, 0)) for n in set(a) | set(b)),
formatted_result_list)
return set(series), merged_values
def _num_weeks(self, start_date, end_date):
closest_monday = self._get_closest_week(start_date)
return ((end_date - closest_monday).days / 7) + 1
def _num_months(self, start_date, end_date):
return ((end_date.year - start_date.year) * 12) + (end_date.month - start_date.month) + 1
def clear_all(self):
"""
Deletes all ``sandsnake`` related data from redis.
.. warning::
Very expensive and destructive operation. Use with causion
"""
keys = self._analytics_backend.keys()
for key in itertools.chain(*keys):
with self._analytics_backend.map() as conn:
if key.startswith(self._prefix):
conn.delete(key)
def track_count(self, unique_identifier, metric, inc_amt=1, **kwargs):
"""
Tracks a metric just by count. If you track a metric this way, you won't be able
to query the metric by day, week or month.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
return self._analytics_backend.incr(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric), inc_amt)
def track_metric(self, unique_identifier, metric, date=None, inc_amt=1, **kwargs):
"""
Tracks a metric for a specific ``unique_identifier`` for a certain date. The redis backend supports
lists for both ``unique_identifier`` and ``metric`` allowing for tracking of multiple metrics for multiple
unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track. This can be a list or a string.
:param date: A python date object indicating when this event occured. Defaults to today.
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
if date is None:
date = datetime.date.today()
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
closest_monday = self._get_closest_week(date)
hash_key_weekly = self._get_weekly_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
weekly_metric_name = self._get_weekly_metric_name(single_metric, closest_monday)
monthly_metric_name = self._get_monthly_metric_name(single_metric, date)
results.append(
[
conn.hincrby(hash_key_daily, daily_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, weekly_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, monthly_metric_name, inc_amt),
conn.incr(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), inc_amt)
]
)
return results
def get_metric_by_day(self, unique_identifier, metric, from_date, limit=30, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by day
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of days to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
date_generator = (from_date + datetime.timedelta(days=i) for i in itertools.count())
metric_key_date_range = self._get_daily_date_range(from_date, datetime.timedelta(days=limit))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_daily_metric_name(metric, daily_date) for daily_date in series]
metric_func = lambda conn: [conn.hmget(self._get_daily_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_week(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by week
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of weeks to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
closest_monday_from_date = self._get_closest_week(from_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=limit))
date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_weekly_metric_name(metric, monday_date) for monday_date in series]
metric_func = lambda conn: [conn.hmget(self._get_weekly_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metrics(self, metric_identifiers, from_date, limit=10, group_by="week", **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
:param group_by: The type of aggregation to perform on the metric. Choices are: ``day``, ``week`` or ``month``
"""
results = []
#validation of types:
allowed_types = {
"day": self.get_metric_by_day,
"week": self.get_metric_by_week,
"month": self.get_metric_by_month,
}
if group_by.lower() not in allowed_types:
raise Exception("Allowed values for group_by are day, week or month.")
group_by_func = allowed_types[group_by.lower()]
#pass a connection object so we can pipeline as much as possible
with self._analytics_backend.map() as conn:
for unique_identifier, metric in metric_identifiers:
results.append(group_by_func(unique_identifier, metric, from_date, limit=limit, connection=conn))
#we have to merge all the metric results afterwards because we are using a custom context processor
return [
self._parse_and_process_metrics(series, list_of_metrics) for
series, list_of_metrics in results]
def get_count(self, unique_identifier, metric, start_date=None, end_date=None, **kwargs):
"""
Gets the count for the ``metric`` for ``unique_identifier``. You can specify a ``start_date``
and an ``end_date``, to only get metrics within that time range.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Get the specified metrics after this date
:param end_date: Get the sepcified metrics before this date
:return: The count for the metric, 0 otherwise
"""
result = None
if start_date and end_date:
start_date, end_date = (start_date, end_date,) if start_date < end_date else (end_date, start_date,)
start_date = start_date if hasattr(start_date, 'date') else datetime.datetime.combine(start_date, datetime.time())
end_date = end_date if hasattr(end_date, 'date') else datetime.datetime.combine(end_date, datetime.time())
monthly_metrics_dates = list(rrule.rrule(rrule.MONTHLY, dtstart=start_date, bymonthday=1, until=end_date))
#We can sorta optimize this by getting most of the data by month
if len(monthly_metrics_dates) >= 3:
with self._analytics_backend.map() as conn:
monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results = self._get_counts(
conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date)
monthly_metric_series, monthly_metric_results = self._parse_and_process_metrics(monthly_metric_series, monthly_metric_results)
starting_metric_series, starting_metric_results = self._parse_and_process_metrics(starting_metric_series, starting_metric_results)
ending_metric_series, ending_metric_results = self._parse_and_process_metrics(ending_metric_series, ending_metric_results)
result = sum(monthly_metric_results.values()) + sum(starting_metric_results.values()) + sum(ending_metric_results.values())
else:
diff = end_date - start_date
metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=diff.days + 1)
result = sum(metric_results[1].values())
else:
try:
result = int(self._analytics_backend.get(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric,)))
except TypeError:
result = 0
return result
def get_counts(self, metric_identifiers, **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
"""
parsed_results = []
results = [
self.get_count(unique_identifier, metric, **kwargs) for
unique_identifier, metric in metric_identifiers]
for result in results:
try:
parsed_result = int(result)
except TypeError:
parsed_result = 0
parsed_results.append(parsed_result)
return parsed_results
def set_metric_by_day(self, unique_identifier, metric, date, count, sync_agg=True, update_counter=True):
"""
Sets the count for the ``metric`` for ``unique_identifier``.
You must specify a ``date`` for the ``count`` to be set on. Useful for resetting a metric count to 0 or decrementing a metric.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param date: Sets the specified metrics for this date
:param count: Sets the sepcified metrics to value of count
:param sync_agg: Boolean used to determine if week and month metrics should be updated
:param update_counter: Boolean used to determine if overall counter should be updated
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
if update_counter: # updates overall counter for metric
overall_count = self.get_count(uid, single_metric)
day, daily_count = self.get_metric_by_day(uid, single_metric, date, 1)[1].popitem()
self._analytics_backend.set(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), overall_count + (count - daily_count))
results.append([conn.hset(hash_key_daily, daily_metric_name, count)])
if sync_agg:
self.sync_agg_metric(unique_identifier, metric, date, date)
return results
def sync_agg_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the associated weeks and months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
self.sync_week_metric(unique_identifier, metric, start_date, end_date)
self.sync_month_metric(unique_identifier, metric, start_date, end_date)
def sync_week_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the weeks for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month
after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
closest_monday_from_date = self._get_closest_week(start_date)
num_weeks = self._num_weeks(start_date, end_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=num_weeks))
week_date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
weeks_to_update = list(itertools.islice(week_date_generator, num_weeks))
for uid in unique_identifier:
for single_metric in metric:
for week in weeks_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=week, limit=7)
week_counter = sum([value for key, value in series_results.items()])
hash_key_weekly = self._get_weekly_metric_key(uid, week)
weekly_metric_name = self._get_weekly_metric_name(single_metric, week)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_weekly, weekly_metric_name, week_counter)
def sync_month_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
num_months = self._num_months(start_date, end_date)
first_of_month = datetime.date(year=start_date.year, month=start_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=num_months))
month_date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
months_to_update = list(itertools.islice(month_date_generator, num_months))
for uid in unique_identifier:
for single_metric in metric:
for month in months_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=month, limit=monthrange(month.year, month.month)[1])
month_counter = sum([value for key, value in series_results.items()])
hash_key_monthly = self._get_weekly_metric_key(uid, month)
monthly_metric_name = self._get_monthly_metric_name(single_metric, month)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_monthly, monthly_metric_name, month_counter)
def _get_counts(self, conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date):
start_diff = monthly_metrics_dates[0] - start_date
end_diff = end_date - monthly_metrics_dates[-1]
monthly_metric_series, monthly_metric_results = self.get_metric_by_month(unique_identifier, metric, monthly_metrics_dates[0], limit=len(monthly_metrics_dates) - 1, connection=conn)
#get the difference from the date to the start date and get all dates in between
starting_metric_series, starting_metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=start_diff.days, connection=conn) if start_diff.days > 0 else ([], [[]],)
ending_metric_series, ending_metric_results = self.get_metric_by_day(unique_identifier, metric, monthly_metrics_dates[-1], limit=end_diff.days + 1, connection=conn)
return monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results
|
numan/py-analytics
|
analytics/backends/redis.py
|
Redis.get_metrics
|
python
|
def get_metrics(self, metric_identifiers, from_date, limit=10, group_by="week", **kwargs):
results = []
#validation of types:
allowed_types = {
"day": self.get_metric_by_day,
"week": self.get_metric_by_week,
"month": self.get_metric_by_month,
}
if group_by.lower() not in allowed_types:
raise Exception("Allowed values for group_by are day, week or month.")
group_by_func = allowed_types[group_by.lower()]
#pass a connection object so we can pipeline as much as possible
with self._analytics_backend.map() as conn:
for unique_identifier, metric in metric_identifiers:
results.append(group_by_func(unique_identifier, metric, from_date, limit=limit, connection=conn))
#we have to merge all the metric results afterwards because we are using a custom context processor
return [
self._parse_and_process_metrics(series, list_of_metrics) for
series, list_of_metrics in results]
|
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
:param group_by: The type of aggregation to perform on the metric. Choices are: ``day``, ``week`` or ``month``
|
train
|
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L315-L344
| null |
class Redis(BaseAnalyticsBackend):
def __init__(self, settings, **kwargs):
nydus_hosts = {}
hosts = settings.get("hosts", [])
if not hosts:
raise Exception("No redis hosts specified")
for i, host in enumerate(hosts):
nydus_hosts[i] = host
defaults = settings.get(
"defaults",
{
'host': 'localhost',
'port': 6379,
})
self._analytics_backend = create_cluster({
'engine': 'nydus.db.backends.redis.Redis',
'router': 'nydus.db.routers.keyvalue.ConsistentHashingRouter',
'hosts': nydus_hosts,
'defaults': defaults,
})
super(Redis, self).__init__(settings, **kwargs)
def _get_closest_week(self, metric_date):
"""
Gets the closest monday to the date provided.
"""
#find the offset to the closest monday
days_after_monday = metric_date.isoweekday() - 1
return metric_date - datetime.timedelta(days=days_after_monday)
def _get_daily_metric_key(self, unique_identifier, metric_date):
"""
Redis key for daily metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y-%m"),)
def _get_weekly_metric_key(self, unique_identifier, metric_date):
"""
Redis key for weekly metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y"),)
def _get_daily_metric_name(self, metric, metric_date):
"""
Hash key for daily metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_weekly_metric_name(self, metric, metric_date):
"""
Hash key for weekly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_monthly_metric_name(self, metric, metric_date):
"""
Hash key for monthly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m"),)
def _get_daily_date_range(self, metric_date, delta):
"""
Get the range of months that we need to use as keys to scan redis.
"""
dates = [metric_date]
start_date = metric_date
end_date = metric_date + delta
while start_date.month < end_date.month or start_date.year < end_date.year:
days_in_month = calendar.monthrange(start_date.year, start_date.month)[1]
#shift along to the next month as one of the months we will have to see. We don't care that the exact date
#is the 1st in each subsequent date range as we only care about the year and the month
start_date = start_date + datetime.timedelta(days=days_in_month - start_date.day + 1)
dates.append(start_date)
return dates
def _get_weekly_date_range(self, metric_date, delta):
"""
Gets the range of years that we need to use as keys to get metrics from redis.
"""
dates = [metric_date]
end_date = metric_date + delta
#Figure out how many years our metric range spans
spanning_years = end_date.year - metric_date.year
for i in range(spanning_years):
#for the weekly keys, we only care about the year
dates.append(
datetime.date(
year=metric_date.year + (i + 1), month=1, day=1))
return dates
def _parse_and_process_metrics(self, series, list_of_metrics):
formatted_result_list = []
series = [dt.strftime("%Y-%m-%d") for dt in series]
for result in list_of_metrics:
values = {}
for index, date_string in enumerate(series):
values[date_string] = int(result[index]) if result[index] is not None else 0
formatted_result_list.append(values)
merged_values = reduce(
lambda a, b: dict((n, a.get(n, 0) + b.get(n, 0)) for n in set(a) | set(b)),
formatted_result_list)
return set(series), merged_values
def _num_weeks(self, start_date, end_date):
closest_monday = self._get_closest_week(start_date)
return ((end_date - closest_monday).days / 7) + 1
def _num_months(self, start_date, end_date):
return ((end_date.year - start_date.year) * 12) + (end_date.month - start_date.month) + 1
def clear_all(self):
"""
Deletes all ``sandsnake`` related data from redis.
.. warning::
Very expensive and destructive operation. Use with causion
"""
keys = self._analytics_backend.keys()
for key in itertools.chain(*keys):
with self._analytics_backend.map() as conn:
if key.startswith(self._prefix):
conn.delete(key)
def track_count(self, unique_identifier, metric, inc_amt=1, **kwargs):
"""
Tracks a metric just by count. If you track a metric this way, you won't be able
to query the metric by day, week or month.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
return self._analytics_backend.incr(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric), inc_amt)
def track_metric(self, unique_identifier, metric, date=None, inc_amt=1, **kwargs):
"""
Tracks a metric for a specific ``unique_identifier`` for a certain date. The redis backend supports
lists for both ``unique_identifier`` and ``metric`` allowing for tracking of multiple metrics for multiple
unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track. This can be a list or a string.
:param date: A python date object indicating when this event occured. Defaults to today.
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
if date is None:
date = datetime.date.today()
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
closest_monday = self._get_closest_week(date)
hash_key_weekly = self._get_weekly_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
weekly_metric_name = self._get_weekly_metric_name(single_metric, closest_monday)
monthly_metric_name = self._get_monthly_metric_name(single_metric, date)
results.append(
[
conn.hincrby(hash_key_daily, daily_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, weekly_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, monthly_metric_name, inc_amt),
conn.incr(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), inc_amt)
]
)
return results
def get_metric_by_day(self, unique_identifier, metric, from_date, limit=30, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by day
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of days to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
date_generator = (from_date + datetime.timedelta(days=i) for i in itertools.count())
metric_key_date_range = self._get_daily_date_range(from_date, datetime.timedelta(days=limit))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_daily_metric_name(metric, daily_date) for daily_date in series]
metric_func = lambda conn: [conn.hmget(self._get_daily_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_week(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by week
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of weeks to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
closest_monday_from_date = self._get_closest_week(from_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=limit))
date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_weekly_metric_name(metric, monday_date) for monday_date in series]
metric_func = lambda conn: [conn.hmget(self._get_weekly_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_month(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by month
starting from``from_date``. It will retrieve metrics data starting from the 1st of the
month specified in ``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
first_of_month = datetime.date(year=from_date.year, month=from_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=limit))
date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_monthly_metric_name(metric, month_date) for month_date in series]
metric_func = lambda conn: [conn.hmget(
self._get_weekly_metric_key(
unique_identifier, metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_count(self, unique_identifier, metric, start_date=None, end_date=None, **kwargs):
"""
Gets the count for the ``metric`` for ``unique_identifier``. You can specify a ``start_date``
and an ``end_date``, to only get metrics within that time range.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Get the specified metrics after this date
:param end_date: Get the sepcified metrics before this date
:return: The count for the metric, 0 otherwise
"""
result = None
if start_date and end_date:
start_date, end_date = (start_date, end_date,) if start_date < end_date else (end_date, start_date,)
start_date = start_date if hasattr(start_date, 'date') else datetime.datetime.combine(start_date, datetime.time())
end_date = end_date if hasattr(end_date, 'date') else datetime.datetime.combine(end_date, datetime.time())
monthly_metrics_dates = list(rrule.rrule(rrule.MONTHLY, dtstart=start_date, bymonthday=1, until=end_date))
#We can sorta optimize this by getting most of the data by month
if len(monthly_metrics_dates) >= 3:
with self._analytics_backend.map() as conn:
monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results = self._get_counts(
conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date)
monthly_metric_series, monthly_metric_results = self._parse_and_process_metrics(monthly_metric_series, monthly_metric_results)
starting_metric_series, starting_metric_results = self._parse_and_process_metrics(starting_metric_series, starting_metric_results)
ending_metric_series, ending_metric_results = self._parse_and_process_metrics(ending_metric_series, ending_metric_results)
result = sum(monthly_metric_results.values()) + sum(starting_metric_results.values()) + sum(ending_metric_results.values())
else:
diff = end_date - start_date
metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=diff.days + 1)
result = sum(metric_results[1].values())
else:
try:
result = int(self._analytics_backend.get(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric,)))
except TypeError:
result = 0
return result
def get_counts(self, metric_identifiers, **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
"""
parsed_results = []
results = [
self.get_count(unique_identifier, metric, **kwargs) for
unique_identifier, metric in metric_identifiers]
for result in results:
try:
parsed_result = int(result)
except TypeError:
parsed_result = 0
parsed_results.append(parsed_result)
return parsed_results
def set_metric_by_day(self, unique_identifier, metric, date, count, sync_agg=True, update_counter=True):
"""
Sets the count for the ``metric`` for ``unique_identifier``.
You must specify a ``date`` for the ``count`` to be set on. Useful for resetting a metric count to 0 or decrementing a metric.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param date: Sets the specified metrics for this date
:param count: Sets the sepcified metrics to value of count
:param sync_agg: Boolean used to determine if week and month metrics should be updated
:param update_counter: Boolean used to determine if overall counter should be updated
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
if update_counter: # updates overall counter for metric
overall_count = self.get_count(uid, single_metric)
day, daily_count = self.get_metric_by_day(uid, single_metric, date, 1)[1].popitem()
self._analytics_backend.set(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), overall_count + (count - daily_count))
results.append([conn.hset(hash_key_daily, daily_metric_name, count)])
if sync_agg:
self.sync_agg_metric(unique_identifier, metric, date, date)
return results
def sync_agg_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the associated weeks and months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
self.sync_week_metric(unique_identifier, metric, start_date, end_date)
self.sync_month_metric(unique_identifier, metric, start_date, end_date)
def sync_week_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the weeks for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month
after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
closest_monday_from_date = self._get_closest_week(start_date)
num_weeks = self._num_weeks(start_date, end_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=num_weeks))
week_date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
weeks_to_update = list(itertools.islice(week_date_generator, num_weeks))
for uid in unique_identifier:
for single_metric in metric:
for week in weeks_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=week, limit=7)
week_counter = sum([value for key, value in series_results.items()])
hash_key_weekly = self._get_weekly_metric_key(uid, week)
weekly_metric_name = self._get_weekly_metric_name(single_metric, week)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_weekly, weekly_metric_name, week_counter)
def sync_month_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
num_months = self._num_months(start_date, end_date)
first_of_month = datetime.date(year=start_date.year, month=start_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=num_months))
month_date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
months_to_update = list(itertools.islice(month_date_generator, num_months))
for uid in unique_identifier:
for single_metric in metric:
for month in months_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=month, limit=monthrange(month.year, month.month)[1])
month_counter = sum([value for key, value in series_results.items()])
hash_key_monthly = self._get_weekly_metric_key(uid, month)
monthly_metric_name = self._get_monthly_metric_name(single_metric, month)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_monthly, monthly_metric_name, month_counter)
def _get_counts(self, conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date):
start_diff = monthly_metrics_dates[0] - start_date
end_diff = end_date - monthly_metrics_dates[-1]
monthly_metric_series, monthly_metric_results = self.get_metric_by_month(unique_identifier, metric, monthly_metrics_dates[0], limit=len(monthly_metrics_dates) - 1, connection=conn)
#get the difference from the date to the start date and get all dates in between
starting_metric_series, starting_metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=start_diff.days, connection=conn) if start_diff.days > 0 else ([], [[]],)
ending_metric_series, ending_metric_results = self.get_metric_by_day(unique_identifier, metric, monthly_metrics_dates[-1], limit=end_diff.days + 1, connection=conn)
return monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results
|
numan/py-analytics
|
analytics/backends/redis.py
|
Redis.get_count
|
python
|
def get_count(self, unique_identifier, metric, start_date=None, end_date=None, **kwargs):
result = None
if start_date and end_date:
start_date, end_date = (start_date, end_date,) if start_date < end_date else (end_date, start_date,)
start_date = start_date if hasattr(start_date, 'date') else datetime.datetime.combine(start_date, datetime.time())
end_date = end_date if hasattr(end_date, 'date') else datetime.datetime.combine(end_date, datetime.time())
monthly_metrics_dates = list(rrule.rrule(rrule.MONTHLY, dtstart=start_date, bymonthday=1, until=end_date))
#We can sorta optimize this by getting most of the data by month
if len(monthly_metrics_dates) >= 3:
with self._analytics_backend.map() as conn:
monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results = self._get_counts(
conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date)
monthly_metric_series, monthly_metric_results = self._parse_and_process_metrics(monthly_metric_series, monthly_metric_results)
starting_metric_series, starting_metric_results = self._parse_and_process_metrics(starting_metric_series, starting_metric_results)
ending_metric_series, ending_metric_results = self._parse_and_process_metrics(ending_metric_series, ending_metric_results)
result = sum(monthly_metric_results.values()) + sum(starting_metric_results.values()) + sum(ending_metric_results.values())
else:
diff = end_date - start_date
metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=diff.days + 1)
result = sum(metric_results[1].values())
else:
try:
result = int(self._analytics_backend.get(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric,)))
except TypeError:
result = 0
return result
|
Gets the count for the ``metric`` for ``unique_identifier``. You can specify a ``start_date``
and an ``end_date``, to only get metrics within that time range.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Get the specified metrics after this date
:param end_date: Get the sepcified metrics before this date
:return: The count for the metric, 0 otherwise
|
train
|
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L346-L389
|
[
"def _parse_and_process_metrics(self, series, list_of_metrics):\n formatted_result_list = []\n series = [dt.strftime(\"%Y-%m-%d\") for dt in series]\n for result in list_of_metrics:\n values = {}\n for index, date_string in enumerate(series):\n values[date_string] = int(result[index]) if result[index] is not None else 0\n formatted_result_list.append(values)\n\n merged_values = reduce(\n lambda a, b: dict((n, a.get(n, 0) + b.get(n, 0)) for n in set(a) | set(b)),\n formatted_result_list)\n\n return set(series), merged_values\n",
"def get_metric_by_day(self, unique_identifier, metric, from_date, limit=30, **kwargs):\n \"\"\"\n Returns the ``metric`` for ``unique_identifier`` segmented by day\n starting from``from_date``\n\n :param unique_identifier: Unique string indetifying the object this metric is for\n :param metric: A unique name for the metric you want to track\n :param from_date: A python date object\n :param limit: The total number of days to retrive starting from ``from_date``\n \"\"\"\n conn = kwargs.get(\"connection\", None)\n date_generator = (from_date + datetime.timedelta(days=i) for i in itertools.count())\n metric_key_date_range = self._get_daily_date_range(from_date, datetime.timedelta(days=limit))\n #generate a list of mondays in between the start date and the end date\n series = list(itertools.islice(date_generator, limit))\n\n metric_keys = [self._get_daily_metric_name(metric, daily_date) for daily_date in series]\n\n metric_func = lambda conn: [conn.hmget(self._get_daily_metric_key(unique_identifier, \\\n metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]\n\n if conn is not None:\n results = metric_func(conn)\n else:\n with self._analytics_backend.map() as conn:\n results = metric_func(conn)\n series, results = self._parse_and_process_metrics(series, results)\n\n return series, results\n",
"def _get_counts(self, conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date):\n start_diff = monthly_metrics_dates[0] - start_date\n end_diff = end_date - monthly_metrics_dates[-1]\n\n monthly_metric_series, monthly_metric_results = self.get_metric_by_month(unique_identifier, metric, monthly_metrics_dates[0], limit=len(monthly_metrics_dates) - 1, connection=conn)\n\n #get the difference from the date to the start date and get all dates in between\n starting_metric_series, starting_metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=start_diff.days, connection=conn) if start_diff.days > 0 else ([], [[]],)\n ending_metric_series, ending_metric_results = self.get_metric_by_day(unique_identifier, metric, monthly_metrics_dates[-1], limit=end_diff.days + 1, connection=conn)\n\n return monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results\n"
] |
class Redis(BaseAnalyticsBackend):
def __init__(self, settings, **kwargs):
nydus_hosts = {}
hosts = settings.get("hosts", [])
if not hosts:
raise Exception("No redis hosts specified")
for i, host in enumerate(hosts):
nydus_hosts[i] = host
defaults = settings.get(
"defaults",
{
'host': 'localhost',
'port': 6379,
})
self._analytics_backend = create_cluster({
'engine': 'nydus.db.backends.redis.Redis',
'router': 'nydus.db.routers.keyvalue.ConsistentHashingRouter',
'hosts': nydus_hosts,
'defaults': defaults,
})
super(Redis, self).__init__(settings, **kwargs)
def _get_closest_week(self, metric_date):
"""
Gets the closest monday to the date provided.
"""
#find the offset to the closest monday
days_after_monday = metric_date.isoweekday() - 1
return metric_date - datetime.timedelta(days=days_after_monday)
def _get_daily_metric_key(self, unique_identifier, metric_date):
"""
Redis key for daily metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y-%m"),)
def _get_weekly_metric_key(self, unique_identifier, metric_date):
"""
Redis key for weekly metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y"),)
def _get_daily_metric_name(self, metric, metric_date):
"""
Hash key for daily metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_weekly_metric_name(self, metric, metric_date):
"""
Hash key for weekly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_monthly_metric_name(self, metric, metric_date):
"""
Hash key for monthly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m"),)
def _get_daily_date_range(self, metric_date, delta):
"""
Get the range of months that we need to use as keys to scan redis.
"""
dates = [metric_date]
start_date = metric_date
end_date = metric_date + delta
while start_date.month < end_date.month or start_date.year < end_date.year:
days_in_month = calendar.monthrange(start_date.year, start_date.month)[1]
#shift along to the next month as one of the months we will have to see. We don't care that the exact date
#is the 1st in each subsequent date range as we only care about the year and the month
start_date = start_date + datetime.timedelta(days=days_in_month - start_date.day + 1)
dates.append(start_date)
return dates
def _get_weekly_date_range(self, metric_date, delta):
"""
Gets the range of years that we need to use as keys to get metrics from redis.
"""
dates = [metric_date]
end_date = metric_date + delta
#Figure out how many years our metric range spans
spanning_years = end_date.year - metric_date.year
for i in range(spanning_years):
#for the weekly keys, we only care about the year
dates.append(
datetime.date(
year=metric_date.year + (i + 1), month=1, day=1))
return dates
def _parse_and_process_metrics(self, series, list_of_metrics):
formatted_result_list = []
series = [dt.strftime("%Y-%m-%d") for dt in series]
for result in list_of_metrics:
values = {}
for index, date_string in enumerate(series):
values[date_string] = int(result[index]) if result[index] is not None else 0
formatted_result_list.append(values)
merged_values = reduce(
lambda a, b: dict((n, a.get(n, 0) + b.get(n, 0)) for n in set(a) | set(b)),
formatted_result_list)
return set(series), merged_values
def _num_weeks(self, start_date, end_date):
closest_monday = self._get_closest_week(start_date)
return ((end_date - closest_monday).days / 7) + 1
def _num_months(self, start_date, end_date):
return ((end_date.year - start_date.year) * 12) + (end_date.month - start_date.month) + 1
def clear_all(self):
"""
Deletes all ``sandsnake`` related data from redis.
.. warning::
Very expensive and destructive operation. Use with causion
"""
keys = self._analytics_backend.keys()
for key in itertools.chain(*keys):
with self._analytics_backend.map() as conn:
if key.startswith(self._prefix):
conn.delete(key)
def track_count(self, unique_identifier, metric, inc_amt=1, **kwargs):
"""
Tracks a metric just by count. If you track a metric this way, you won't be able
to query the metric by day, week or month.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
return self._analytics_backend.incr(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric), inc_amt)
def track_metric(self, unique_identifier, metric, date=None, inc_amt=1, **kwargs):
"""
Tracks a metric for a specific ``unique_identifier`` for a certain date. The redis backend supports
lists for both ``unique_identifier`` and ``metric`` allowing for tracking of multiple metrics for multiple
unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track. This can be a list or a string.
:param date: A python date object indicating when this event occured. Defaults to today.
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
if date is None:
date = datetime.date.today()
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
closest_monday = self._get_closest_week(date)
hash_key_weekly = self._get_weekly_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
weekly_metric_name = self._get_weekly_metric_name(single_metric, closest_monday)
monthly_metric_name = self._get_monthly_metric_name(single_metric, date)
results.append(
[
conn.hincrby(hash_key_daily, daily_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, weekly_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, monthly_metric_name, inc_amt),
conn.incr(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), inc_amt)
]
)
return results
def get_metric_by_day(self, unique_identifier, metric, from_date, limit=30, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by day
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of days to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
date_generator = (from_date + datetime.timedelta(days=i) for i in itertools.count())
metric_key_date_range = self._get_daily_date_range(from_date, datetime.timedelta(days=limit))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_daily_metric_name(metric, daily_date) for daily_date in series]
metric_func = lambda conn: [conn.hmget(self._get_daily_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_week(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by week
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of weeks to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
closest_monday_from_date = self._get_closest_week(from_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=limit))
date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_weekly_metric_name(metric, monday_date) for monday_date in series]
metric_func = lambda conn: [conn.hmget(self._get_weekly_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_month(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by month
starting from``from_date``. It will retrieve metrics data starting from the 1st of the
month specified in ``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
first_of_month = datetime.date(year=from_date.year, month=from_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=limit))
date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_monthly_metric_name(metric, month_date) for month_date in series]
metric_func = lambda conn: [conn.hmget(
self._get_weekly_metric_key(
unique_identifier, metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metrics(self, metric_identifiers, from_date, limit=10, group_by="week", **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
:param group_by: The type of aggregation to perform on the metric. Choices are: ``day``, ``week`` or ``month``
"""
results = []
#validation of types:
allowed_types = {
"day": self.get_metric_by_day,
"week": self.get_metric_by_week,
"month": self.get_metric_by_month,
}
if group_by.lower() not in allowed_types:
raise Exception("Allowed values for group_by are day, week or month.")
group_by_func = allowed_types[group_by.lower()]
#pass a connection object so we can pipeline as much as possible
with self._analytics_backend.map() as conn:
for unique_identifier, metric in metric_identifiers:
results.append(group_by_func(unique_identifier, metric, from_date, limit=limit, connection=conn))
#we have to merge all the metric results afterwards because we are using a custom context processor
return [
self._parse_and_process_metrics(series, list_of_metrics) for
series, list_of_metrics in results]
def get_counts(self, metric_identifiers, **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
"""
parsed_results = []
results = [
self.get_count(unique_identifier, metric, **kwargs) for
unique_identifier, metric in metric_identifiers]
for result in results:
try:
parsed_result = int(result)
except TypeError:
parsed_result = 0
parsed_results.append(parsed_result)
return parsed_results
def set_metric_by_day(self, unique_identifier, metric, date, count, sync_agg=True, update_counter=True):
"""
Sets the count for the ``metric`` for ``unique_identifier``.
You must specify a ``date`` for the ``count`` to be set on. Useful for resetting a metric count to 0 or decrementing a metric.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param date: Sets the specified metrics for this date
:param count: Sets the sepcified metrics to value of count
:param sync_agg: Boolean used to determine if week and month metrics should be updated
:param update_counter: Boolean used to determine if overall counter should be updated
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
if update_counter: # updates overall counter for metric
overall_count = self.get_count(uid, single_metric)
day, daily_count = self.get_metric_by_day(uid, single_metric, date, 1)[1].popitem()
self._analytics_backend.set(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), overall_count + (count - daily_count))
results.append([conn.hset(hash_key_daily, daily_metric_name, count)])
if sync_agg:
self.sync_agg_metric(unique_identifier, metric, date, date)
return results
def sync_agg_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the associated weeks and months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
self.sync_week_metric(unique_identifier, metric, start_date, end_date)
self.sync_month_metric(unique_identifier, metric, start_date, end_date)
def sync_week_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the weeks for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month
after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
closest_monday_from_date = self._get_closest_week(start_date)
num_weeks = self._num_weeks(start_date, end_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=num_weeks))
week_date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
weeks_to_update = list(itertools.islice(week_date_generator, num_weeks))
for uid in unique_identifier:
for single_metric in metric:
for week in weeks_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=week, limit=7)
week_counter = sum([value for key, value in series_results.items()])
hash_key_weekly = self._get_weekly_metric_key(uid, week)
weekly_metric_name = self._get_weekly_metric_name(single_metric, week)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_weekly, weekly_metric_name, week_counter)
def sync_month_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
num_months = self._num_months(start_date, end_date)
first_of_month = datetime.date(year=start_date.year, month=start_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=num_months))
month_date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
months_to_update = list(itertools.islice(month_date_generator, num_months))
for uid in unique_identifier:
for single_metric in metric:
for month in months_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=month, limit=monthrange(month.year, month.month)[1])
month_counter = sum([value for key, value in series_results.items()])
hash_key_monthly = self._get_weekly_metric_key(uid, month)
monthly_metric_name = self._get_monthly_metric_name(single_metric, month)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_monthly, monthly_metric_name, month_counter)
def _get_counts(self, conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date):
start_diff = monthly_metrics_dates[0] - start_date
end_diff = end_date - monthly_metrics_dates[-1]
monthly_metric_series, monthly_metric_results = self.get_metric_by_month(unique_identifier, metric, monthly_metrics_dates[0], limit=len(monthly_metrics_dates) - 1, connection=conn)
#get the difference from the date to the start date and get all dates in between
starting_metric_series, starting_metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=start_diff.days, connection=conn) if start_diff.days > 0 else ([], [[]],)
ending_metric_series, ending_metric_results = self.get_metric_by_day(unique_identifier, metric, monthly_metrics_dates[-1], limit=end_diff.days + 1, connection=conn)
return monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results
|
numan/py-analytics
|
analytics/backends/redis.py
|
Redis.get_counts
|
python
|
def get_counts(self, metric_identifiers, **kwargs):
parsed_results = []
results = [
self.get_count(unique_identifier, metric, **kwargs) for
unique_identifier, metric in metric_identifiers]
for result in results:
try:
parsed_result = int(result)
except TypeError:
parsed_result = 0
parsed_results.append(parsed_result)
return parsed_results
|
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
|
train
|
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L391-L411
| null |
class Redis(BaseAnalyticsBackend):
def __init__(self, settings, **kwargs):
nydus_hosts = {}
hosts = settings.get("hosts", [])
if not hosts:
raise Exception("No redis hosts specified")
for i, host in enumerate(hosts):
nydus_hosts[i] = host
defaults = settings.get(
"defaults",
{
'host': 'localhost',
'port': 6379,
})
self._analytics_backend = create_cluster({
'engine': 'nydus.db.backends.redis.Redis',
'router': 'nydus.db.routers.keyvalue.ConsistentHashingRouter',
'hosts': nydus_hosts,
'defaults': defaults,
})
super(Redis, self).__init__(settings, **kwargs)
def _get_closest_week(self, metric_date):
"""
Gets the closest monday to the date provided.
"""
#find the offset to the closest monday
days_after_monday = metric_date.isoweekday() - 1
return metric_date - datetime.timedelta(days=days_after_monday)
def _get_daily_metric_key(self, unique_identifier, metric_date):
"""
Redis key for daily metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y-%m"),)
def _get_weekly_metric_key(self, unique_identifier, metric_date):
"""
Redis key for weekly metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y"),)
def _get_daily_metric_name(self, metric, metric_date):
"""
Hash key for daily metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_weekly_metric_name(self, metric, metric_date):
"""
Hash key for weekly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_monthly_metric_name(self, metric, metric_date):
"""
Hash key for monthly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m"),)
def _get_daily_date_range(self, metric_date, delta):
"""
Get the range of months that we need to use as keys to scan redis.
"""
dates = [metric_date]
start_date = metric_date
end_date = metric_date + delta
while start_date.month < end_date.month or start_date.year < end_date.year:
days_in_month = calendar.monthrange(start_date.year, start_date.month)[1]
#shift along to the next month as one of the months we will have to see. We don't care that the exact date
#is the 1st in each subsequent date range as we only care about the year and the month
start_date = start_date + datetime.timedelta(days=days_in_month - start_date.day + 1)
dates.append(start_date)
return dates
def _get_weekly_date_range(self, metric_date, delta):
"""
Gets the range of years that we need to use as keys to get metrics from redis.
"""
dates = [metric_date]
end_date = metric_date + delta
#Figure out how many years our metric range spans
spanning_years = end_date.year - metric_date.year
for i in range(spanning_years):
#for the weekly keys, we only care about the year
dates.append(
datetime.date(
year=metric_date.year + (i + 1), month=1, day=1))
return dates
def _parse_and_process_metrics(self, series, list_of_metrics):
formatted_result_list = []
series = [dt.strftime("%Y-%m-%d") for dt in series]
for result in list_of_metrics:
values = {}
for index, date_string in enumerate(series):
values[date_string] = int(result[index]) if result[index] is not None else 0
formatted_result_list.append(values)
merged_values = reduce(
lambda a, b: dict((n, a.get(n, 0) + b.get(n, 0)) for n in set(a) | set(b)),
formatted_result_list)
return set(series), merged_values
def _num_weeks(self, start_date, end_date):
closest_monday = self._get_closest_week(start_date)
return ((end_date - closest_monday).days / 7) + 1
def _num_months(self, start_date, end_date):
return ((end_date.year - start_date.year) * 12) + (end_date.month - start_date.month) + 1
def clear_all(self):
"""
Deletes all ``sandsnake`` related data from redis.
.. warning::
Very expensive and destructive operation. Use with causion
"""
keys = self._analytics_backend.keys()
for key in itertools.chain(*keys):
with self._analytics_backend.map() as conn:
if key.startswith(self._prefix):
conn.delete(key)
def track_count(self, unique_identifier, metric, inc_amt=1, **kwargs):
"""
Tracks a metric just by count. If you track a metric this way, you won't be able
to query the metric by day, week or month.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
return self._analytics_backend.incr(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric), inc_amt)
def track_metric(self, unique_identifier, metric, date=None, inc_amt=1, **kwargs):
"""
Tracks a metric for a specific ``unique_identifier`` for a certain date. The redis backend supports
lists for both ``unique_identifier`` and ``metric`` allowing for tracking of multiple metrics for multiple
unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track. This can be a list or a string.
:param date: A python date object indicating when this event occured. Defaults to today.
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
if date is None:
date = datetime.date.today()
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
closest_monday = self._get_closest_week(date)
hash_key_weekly = self._get_weekly_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
weekly_metric_name = self._get_weekly_metric_name(single_metric, closest_monday)
monthly_metric_name = self._get_monthly_metric_name(single_metric, date)
results.append(
[
conn.hincrby(hash_key_daily, daily_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, weekly_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, monthly_metric_name, inc_amt),
conn.incr(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), inc_amt)
]
)
return results
def get_metric_by_day(self, unique_identifier, metric, from_date, limit=30, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by day
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of days to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
date_generator = (from_date + datetime.timedelta(days=i) for i in itertools.count())
metric_key_date_range = self._get_daily_date_range(from_date, datetime.timedelta(days=limit))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_daily_metric_name(metric, daily_date) for daily_date in series]
metric_func = lambda conn: [conn.hmget(self._get_daily_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_week(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by week
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of weeks to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
closest_monday_from_date = self._get_closest_week(from_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=limit))
date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_weekly_metric_name(metric, monday_date) for monday_date in series]
metric_func = lambda conn: [conn.hmget(self._get_weekly_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_month(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by month
starting from``from_date``. It will retrieve metrics data starting from the 1st of the
month specified in ``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
first_of_month = datetime.date(year=from_date.year, month=from_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=limit))
date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_monthly_metric_name(metric, month_date) for month_date in series]
metric_func = lambda conn: [conn.hmget(
self._get_weekly_metric_key(
unique_identifier, metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metrics(self, metric_identifiers, from_date, limit=10, group_by="week", **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
:param group_by: The type of aggregation to perform on the metric. Choices are: ``day``, ``week`` or ``month``
"""
results = []
#validation of types:
allowed_types = {
"day": self.get_metric_by_day,
"week": self.get_metric_by_week,
"month": self.get_metric_by_month,
}
if group_by.lower() not in allowed_types:
raise Exception("Allowed values for group_by are day, week or month.")
group_by_func = allowed_types[group_by.lower()]
#pass a connection object so we can pipeline as much as possible
with self._analytics_backend.map() as conn:
for unique_identifier, metric in metric_identifiers:
results.append(group_by_func(unique_identifier, metric, from_date, limit=limit, connection=conn))
#we have to merge all the metric results afterwards because we are using a custom context processor
return [
self._parse_and_process_metrics(series, list_of_metrics) for
series, list_of_metrics in results]
def get_count(self, unique_identifier, metric, start_date=None, end_date=None, **kwargs):
"""
Gets the count for the ``metric`` for ``unique_identifier``. You can specify a ``start_date``
and an ``end_date``, to only get metrics within that time range.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Get the specified metrics after this date
:param end_date: Get the sepcified metrics before this date
:return: The count for the metric, 0 otherwise
"""
result = None
if start_date and end_date:
start_date, end_date = (start_date, end_date,) if start_date < end_date else (end_date, start_date,)
start_date = start_date if hasattr(start_date, 'date') else datetime.datetime.combine(start_date, datetime.time())
end_date = end_date if hasattr(end_date, 'date') else datetime.datetime.combine(end_date, datetime.time())
monthly_metrics_dates = list(rrule.rrule(rrule.MONTHLY, dtstart=start_date, bymonthday=1, until=end_date))
#We can sorta optimize this by getting most of the data by month
if len(monthly_metrics_dates) >= 3:
with self._analytics_backend.map() as conn:
monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results = self._get_counts(
conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date)
monthly_metric_series, monthly_metric_results = self._parse_and_process_metrics(monthly_metric_series, monthly_metric_results)
starting_metric_series, starting_metric_results = self._parse_and_process_metrics(starting_metric_series, starting_metric_results)
ending_metric_series, ending_metric_results = self._parse_and_process_metrics(ending_metric_series, ending_metric_results)
result = sum(monthly_metric_results.values()) + sum(starting_metric_results.values()) + sum(ending_metric_results.values())
else:
diff = end_date - start_date
metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=diff.days + 1)
result = sum(metric_results[1].values())
else:
try:
result = int(self._analytics_backend.get(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric,)))
except TypeError:
result = 0
return result
def set_metric_by_day(self, unique_identifier, metric, date, count, sync_agg=True, update_counter=True):
"""
Sets the count for the ``metric`` for ``unique_identifier``.
You must specify a ``date`` for the ``count`` to be set on. Useful for resetting a metric count to 0 or decrementing a metric.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param date: Sets the specified metrics for this date
:param count: Sets the sepcified metrics to value of count
:param sync_agg: Boolean used to determine if week and month metrics should be updated
:param update_counter: Boolean used to determine if overall counter should be updated
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
if update_counter: # updates overall counter for metric
overall_count = self.get_count(uid, single_metric)
day, daily_count = self.get_metric_by_day(uid, single_metric, date, 1)[1].popitem()
self._analytics_backend.set(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), overall_count + (count - daily_count))
results.append([conn.hset(hash_key_daily, daily_metric_name, count)])
if sync_agg:
self.sync_agg_metric(unique_identifier, metric, date, date)
return results
def sync_agg_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the associated weeks and months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
self.sync_week_metric(unique_identifier, metric, start_date, end_date)
self.sync_month_metric(unique_identifier, metric, start_date, end_date)
def sync_week_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the weeks for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month
after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
closest_monday_from_date = self._get_closest_week(start_date)
num_weeks = self._num_weeks(start_date, end_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=num_weeks))
week_date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
weeks_to_update = list(itertools.islice(week_date_generator, num_weeks))
for uid in unique_identifier:
for single_metric in metric:
for week in weeks_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=week, limit=7)
week_counter = sum([value for key, value in series_results.items()])
hash_key_weekly = self._get_weekly_metric_key(uid, week)
weekly_metric_name = self._get_weekly_metric_name(single_metric, week)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_weekly, weekly_metric_name, week_counter)
def sync_month_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
num_months = self._num_months(start_date, end_date)
first_of_month = datetime.date(year=start_date.year, month=start_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=num_months))
month_date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
months_to_update = list(itertools.islice(month_date_generator, num_months))
for uid in unique_identifier:
for single_metric in metric:
for month in months_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=month, limit=monthrange(month.year, month.month)[1])
month_counter = sum([value for key, value in series_results.items()])
hash_key_monthly = self._get_weekly_metric_key(uid, month)
monthly_metric_name = self._get_monthly_metric_name(single_metric, month)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_monthly, monthly_metric_name, month_counter)
def _get_counts(self, conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date):
start_diff = monthly_metrics_dates[0] - start_date
end_diff = end_date - monthly_metrics_dates[-1]
monthly_metric_series, monthly_metric_results = self.get_metric_by_month(unique_identifier, metric, monthly_metrics_dates[0], limit=len(monthly_metrics_dates) - 1, connection=conn)
#get the difference from the date to the start date and get all dates in between
starting_metric_series, starting_metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=start_diff.days, connection=conn) if start_diff.days > 0 else ([], [[]],)
ending_metric_series, ending_metric_results = self.get_metric_by_day(unique_identifier, metric, monthly_metrics_dates[-1], limit=end_diff.days + 1, connection=conn)
return monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results
|
numan/py-analytics
|
analytics/backends/redis.py
|
Redis.set_metric_by_day
|
python
|
def set_metric_by_day(self, unique_identifier, metric, date, count, sync_agg=True, update_counter=True):
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
if update_counter: # updates overall counter for metric
overall_count = self.get_count(uid, single_metric)
day, daily_count = self.get_metric_by_day(uid, single_metric, date, 1)[1].popitem()
self._analytics_backend.set(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), overall_count + (count - daily_count))
results.append([conn.hset(hash_key_daily, daily_metric_name, count)])
if sync_agg:
self.sync_agg_metric(unique_identifier, metric, date, date)
return results
|
Sets the count for the ``metric`` for ``unique_identifier``.
You must specify a ``date`` for the ``count`` to be set on. Useful for resetting a metric count to 0 or decrementing a metric.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param date: Sets the specified metrics for this date
:param count: Sets the sepcified metrics to value of count
:param sync_agg: Boolean used to determine if week and month metrics should be updated
:param update_counter: Boolean used to determine if overall counter should be updated
|
train
|
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L413-L448
|
[
"def _get_daily_metric_key(self, unique_identifier, metric_date):\n \"\"\"\n Redis key for daily metric\n \"\"\"\n return self._prefix + \":\" + \"user:%s:analy:%s\" % (unique_identifier, metric_date.strftime(\"%y-%m\"),)\n",
"def _get_daily_metric_name(self, metric, metric_date):\n \"\"\"\n Hash key for daily metric\n \"\"\"\n return \"%s:%s\" % (metric, metric_date.strftime(\"%y-%m-%d\"),)\n",
"def get_metric_by_day(self, unique_identifier, metric, from_date, limit=30, **kwargs):\n \"\"\"\n Returns the ``metric`` for ``unique_identifier`` segmented by day\n starting from``from_date``\n\n :param unique_identifier: Unique string indetifying the object this metric is for\n :param metric: A unique name for the metric you want to track\n :param from_date: A python date object\n :param limit: The total number of days to retrive starting from ``from_date``\n \"\"\"\n conn = kwargs.get(\"connection\", None)\n date_generator = (from_date + datetime.timedelta(days=i) for i in itertools.count())\n metric_key_date_range = self._get_daily_date_range(from_date, datetime.timedelta(days=limit))\n #generate a list of mondays in between the start date and the end date\n series = list(itertools.islice(date_generator, limit))\n\n metric_keys = [self._get_daily_metric_name(metric, daily_date) for daily_date in series]\n\n metric_func = lambda conn: [conn.hmget(self._get_daily_metric_key(unique_identifier, \\\n metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]\n\n if conn is not None:\n results = metric_func(conn)\n else:\n with self._analytics_backend.map() as conn:\n results = metric_func(conn)\n series, results = self._parse_and_process_metrics(series, results)\n\n return series, results\n",
"def get_count(self, unique_identifier, metric, start_date=None, end_date=None, **kwargs):\n \"\"\"\n Gets the count for the ``metric`` for ``unique_identifier``. You can specify a ``start_date``\n and an ``end_date``, to only get metrics within that time range.\n\n :param unique_identifier: Unique string indetifying the object this metric is for\n :param metric: A unique name for the metric you want to track\n :param start_date: Get the specified metrics after this date\n :param end_date: Get the sepcified metrics before this date\n :return: The count for the metric, 0 otherwise\n \"\"\"\n result = None\n if start_date and end_date:\n start_date, end_date = (start_date, end_date,) if start_date < end_date else (end_date, start_date,)\n\n start_date = start_date if hasattr(start_date, 'date') else datetime.datetime.combine(start_date, datetime.time())\n end_date = end_date if hasattr(end_date, 'date') else datetime.datetime.combine(end_date, datetime.time())\n\n monthly_metrics_dates = list(rrule.rrule(rrule.MONTHLY, dtstart=start_date, bymonthday=1, until=end_date))\n\n #We can sorta optimize this by getting most of the data by month\n if len(monthly_metrics_dates) >= 3:\n\n with self._analytics_backend.map() as conn:\n monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results = self._get_counts(\n conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date)\n\n monthly_metric_series, monthly_metric_results = self._parse_and_process_metrics(monthly_metric_series, monthly_metric_results)\n starting_metric_series, starting_metric_results = self._parse_and_process_metrics(starting_metric_series, starting_metric_results)\n ending_metric_series, ending_metric_results = self._parse_and_process_metrics(ending_metric_series, ending_metric_results)\n\n result = sum(monthly_metric_results.values()) + sum(starting_metric_results.values()) + sum(ending_metric_results.values())\n else:\n diff = end_date - start_date\n metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=diff.days + 1)\n result = sum(metric_results[1].values())\n\n else:\n try:\n result = int(self._analytics_backend.get(self._prefix + \":\" + \"analy:%s:count:%s\" % (unique_identifier, metric,)))\n except TypeError:\n result = 0\n\n return result\n",
"def sync_agg_metric(self, unique_identifier, metric, start_date, end_date):\n \"\"\"\n Uses the count for each day in the date range to recalculate the counters for the associated weeks and months for\n the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.\n\n The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of \n multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.\n\n :param unique_identifier: Unique string indetifying the object this metric is for\n :param metric: A unique name for the metric you want to track\n :param start_date: Date syncing starts\n :param end_date: Date syncing end\n \"\"\"\n self.sync_week_metric(unique_identifier, metric, start_date, end_date)\n self.sync_month_metric(unique_identifier, metric, start_date, end_date)\n"
] |
class Redis(BaseAnalyticsBackend):
def __init__(self, settings, **kwargs):
nydus_hosts = {}
hosts = settings.get("hosts", [])
if not hosts:
raise Exception("No redis hosts specified")
for i, host in enumerate(hosts):
nydus_hosts[i] = host
defaults = settings.get(
"defaults",
{
'host': 'localhost',
'port': 6379,
})
self._analytics_backend = create_cluster({
'engine': 'nydus.db.backends.redis.Redis',
'router': 'nydus.db.routers.keyvalue.ConsistentHashingRouter',
'hosts': nydus_hosts,
'defaults': defaults,
})
super(Redis, self).__init__(settings, **kwargs)
def _get_closest_week(self, metric_date):
"""
Gets the closest monday to the date provided.
"""
#find the offset to the closest monday
days_after_monday = metric_date.isoweekday() - 1
return metric_date - datetime.timedelta(days=days_after_monday)
def _get_daily_metric_key(self, unique_identifier, metric_date):
"""
Redis key for daily metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y-%m"),)
def _get_weekly_metric_key(self, unique_identifier, metric_date):
"""
Redis key for weekly metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y"),)
def _get_daily_metric_name(self, metric, metric_date):
"""
Hash key for daily metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_weekly_metric_name(self, metric, metric_date):
"""
Hash key for weekly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_monthly_metric_name(self, metric, metric_date):
"""
Hash key for monthly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m"),)
def _get_daily_date_range(self, metric_date, delta):
"""
Get the range of months that we need to use as keys to scan redis.
"""
dates = [metric_date]
start_date = metric_date
end_date = metric_date + delta
while start_date.month < end_date.month or start_date.year < end_date.year:
days_in_month = calendar.monthrange(start_date.year, start_date.month)[1]
#shift along to the next month as one of the months we will have to see. We don't care that the exact date
#is the 1st in each subsequent date range as we only care about the year and the month
start_date = start_date + datetime.timedelta(days=days_in_month - start_date.day + 1)
dates.append(start_date)
return dates
def _get_weekly_date_range(self, metric_date, delta):
"""
Gets the range of years that we need to use as keys to get metrics from redis.
"""
dates = [metric_date]
end_date = metric_date + delta
#Figure out how many years our metric range spans
spanning_years = end_date.year - metric_date.year
for i in range(spanning_years):
#for the weekly keys, we only care about the year
dates.append(
datetime.date(
year=metric_date.year + (i + 1), month=1, day=1))
return dates
def _parse_and_process_metrics(self, series, list_of_metrics):
formatted_result_list = []
series = [dt.strftime("%Y-%m-%d") for dt in series]
for result in list_of_metrics:
values = {}
for index, date_string in enumerate(series):
values[date_string] = int(result[index]) if result[index] is not None else 0
formatted_result_list.append(values)
merged_values = reduce(
lambda a, b: dict((n, a.get(n, 0) + b.get(n, 0)) for n in set(a) | set(b)),
formatted_result_list)
return set(series), merged_values
def _num_weeks(self, start_date, end_date):
closest_monday = self._get_closest_week(start_date)
return ((end_date - closest_monday).days / 7) + 1
def _num_months(self, start_date, end_date):
return ((end_date.year - start_date.year) * 12) + (end_date.month - start_date.month) + 1
def clear_all(self):
"""
Deletes all ``sandsnake`` related data from redis.
.. warning::
Very expensive and destructive operation. Use with causion
"""
keys = self._analytics_backend.keys()
for key in itertools.chain(*keys):
with self._analytics_backend.map() as conn:
if key.startswith(self._prefix):
conn.delete(key)
def track_count(self, unique_identifier, metric, inc_amt=1, **kwargs):
"""
Tracks a metric just by count. If you track a metric this way, you won't be able
to query the metric by day, week or month.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
return self._analytics_backend.incr(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric), inc_amt)
def track_metric(self, unique_identifier, metric, date=None, inc_amt=1, **kwargs):
"""
Tracks a metric for a specific ``unique_identifier`` for a certain date. The redis backend supports
lists for both ``unique_identifier`` and ``metric`` allowing for tracking of multiple metrics for multiple
unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track. This can be a list or a string.
:param date: A python date object indicating when this event occured. Defaults to today.
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
if date is None:
date = datetime.date.today()
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
closest_monday = self._get_closest_week(date)
hash_key_weekly = self._get_weekly_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
weekly_metric_name = self._get_weekly_metric_name(single_metric, closest_monday)
monthly_metric_name = self._get_monthly_metric_name(single_metric, date)
results.append(
[
conn.hincrby(hash_key_daily, daily_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, weekly_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, monthly_metric_name, inc_amt),
conn.incr(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), inc_amt)
]
)
return results
def get_metric_by_day(self, unique_identifier, metric, from_date, limit=30, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by day
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of days to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
date_generator = (from_date + datetime.timedelta(days=i) for i in itertools.count())
metric_key_date_range = self._get_daily_date_range(from_date, datetime.timedelta(days=limit))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_daily_metric_name(metric, daily_date) for daily_date in series]
metric_func = lambda conn: [conn.hmget(self._get_daily_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_week(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by week
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of weeks to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
closest_monday_from_date = self._get_closest_week(from_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=limit))
date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_weekly_metric_name(metric, monday_date) for monday_date in series]
metric_func = lambda conn: [conn.hmget(self._get_weekly_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_month(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by month
starting from``from_date``. It will retrieve metrics data starting from the 1st of the
month specified in ``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
first_of_month = datetime.date(year=from_date.year, month=from_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=limit))
date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_monthly_metric_name(metric, month_date) for month_date in series]
metric_func = lambda conn: [conn.hmget(
self._get_weekly_metric_key(
unique_identifier, metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metrics(self, metric_identifiers, from_date, limit=10, group_by="week", **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
:param group_by: The type of aggregation to perform on the metric. Choices are: ``day``, ``week`` or ``month``
"""
results = []
#validation of types:
allowed_types = {
"day": self.get_metric_by_day,
"week": self.get_metric_by_week,
"month": self.get_metric_by_month,
}
if group_by.lower() not in allowed_types:
raise Exception("Allowed values for group_by are day, week or month.")
group_by_func = allowed_types[group_by.lower()]
#pass a connection object so we can pipeline as much as possible
with self._analytics_backend.map() as conn:
for unique_identifier, metric in metric_identifiers:
results.append(group_by_func(unique_identifier, metric, from_date, limit=limit, connection=conn))
#we have to merge all the metric results afterwards because we are using a custom context processor
return [
self._parse_and_process_metrics(series, list_of_metrics) for
series, list_of_metrics in results]
def get_count(self, unique_identifier, metric, start_date=None, end_date=None, **kwargs):
"""
Gets the count for the ``metric`` for ``unique_identifier``. You can specify a ``start_date``
and an ``end_date``, to only get metrics within that time range.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Get the specified metrics after this date
:param end_date: Get the sepcified metrics before this date
:return: The count for the metric, 0 otherwise
"""
result = None
if start_date and end_date:
start_date, end_date = (start_date, end_date,) if start_date < end_date else (end_date, start_date,)
start_date = start_date if hasattr(start_date, 'date') else datetime.datetime.combine(start_date, datetime.time())
end_date = end_date if hasattr(end_date, 'date') else datetime.datetime.combine(end_date, datetime.time())
monthly_metrics_dates = list(rrule.rrule(rrule.MONTHLY, dtstart=start_date, bymonthday=1, until=end_date))
#We can sorta optimize this by getting most of the data by month
if len(monthly_metrics_dates) >= 3:
with self._analytics_backend.map() as conn:
monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results = self._get_counts(
conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date)
monthly_metric_series, monthly_metric_results = self._parse_and_process_metrics(monthly_metric_series, monthly_metric_results)
starting_metric_series, starting_metric_results = self._parse_and_process_metrics(starting_metric_series, starting_metric_results)
ending_metric_series, ending_metric_results = self._parse_and_process_metrics(ending_metric_series, ending_metric_results)
result = sum(monthly_metric_results.values()) + sum(starting_metric_results.values()) + sum(ending_metric_results.values())
else:
diff = end_date - start_date
metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=diff.days + 1)
result = sum(metric_results[1].values())
else:
try:
result = int(self._analytics_backend.get(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric,)))
except TypeError:
result = 0
return result
def get_counts(self, metric_identifiers, **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
"""
parsed_results = []
results = [
self.get_count(unique_identifier, metric, **kwargs) for
unique_identifier, metric in metric_identifiers]
for result in results:
try:
parsed_result = int(result)
except TypeError:
parsed_result = 0
parsed_results.append(parsed_result)
return parsed_results
def sync_agg_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the associated weeks and months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
self.sync_week_metric(unique_identifier, metric, start_date, end_date)
self.sync_month_metric(unique_identifier, metric, start_date, end_date)
def sync_week_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the weeks for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month
after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
closest_monday_from_date = self._get_closest_week(start_date)
num_weeks = self._num_weeks(start_date, end_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=num_weeks))
week_date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
weeks_to_update = list(itertools.islice(week_date_generator, num_weeks))
for uid in unique_identifier:
for single_metric in metric:
for week in weeks_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=week, limit=7)
week_counter = sum([value for key, value in series_results.items()])
hash_key_weekly = self._get_weekly_metric_key(uid, week)
weekly_metric_name = self._get_weekly_metric_name(single_metric, week)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_weekly, weekly_metric_name, week_counter)
def sync_month_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
num_months = self._num_months(start_date, end_date)
first_of_month = datetime.date(year=start_date.year, month=start_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=num_months))
month_date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
months_to_update = list(itertools.islice(month_date_generator, num_months))
for uid in unique_identifier:
for single_metric in metric:
for month in months_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=month, limit=monthrange(month.year, month.month)[1])
month_counter = sum([value for key, value in series_results.items()])
hash_key_monthly = self._get_weekly_metric_key(uid, month)
monthly_metric_name = self._get_monthly_metric_name(single_metric, month)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_monthly, monthly_metric_name, month_counter)
def _get_counts(self, conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date):
start_diff = monthly_metrics_dates[0] - start_date
end_diff = end_date - monthly_metrics_dates[-1]
monthly_metric_series, monthly_metric_results = self.get_metric_by_month(unique_identifier, metric, monthly_metrics_dates[0], limit=len(monthly_metrics_dates) - 1, connection=conn)
#get the difference from the date to the start date and get all dates in between
starting_metric_series, starting_metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=start_diff.days, connection=conn) if start_diff.days > 0 else ([], [[]],)
ending_metric_series, ending_metric_results = self.get_metric_by_day(unique_identifier, metric, monthly_metrics_dates[-1], limit=end_diff.days + 1, connection=conn)
return monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results
|
numan/py-analytics
|
analytics/backends/redis.py
|
Redis.sync_agg_metric
|
python
|
def sync_agg_metric(self, unique_identifier, metric, start_date, end_date):
self.sync_week_metric(unique_identifier, metric, start_date, end_date)
self.sync_month_metric(unique_identifier, metric, start_date, end_date)
|
Uses the count for each day in the date range to recalculate the counters for the associated weeks and months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
|
train
|
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L450-L464
|
[
"def sync_week_metric(self, unique_identifier, metric, start_date, end_date):\n \"\"\"\n Uses the count for each day in the date range to recalculate the counters for the weeks for\n the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month\n after using set_metric_by_day.\n\n The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of \n multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.\n\n :param unique_identifier: Unique string indetifying the object this metric is for\n :param metric: A unique name for the metric you want to track\n :param start_date: Date syncing starts\n :param end_date: Date syncing end\n \"\"\"\n metric = [metric] if isinstance(metric, basestring) else metric\n unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier\n closest_monday_from_date = self._get_closest_week(start_date)\n num_weeks = self._num_weeks(start_date, end_date)\n metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=num_weeks))\n\n week_date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))\n #generate a list of mondays in between the start date and the end date\n weeks_to_update = list(itertools.islice(week_date_generator, num_weeks))\n for uid in unique_identifier:\n for single_metric in metric:\n for week in weeks_to_update:\n _, series_results = self.get_metric_by_day(uid, single_metric, from_date=week, limit=7)\n week_counter = sum([value for key, value in series_results.items()])\n\n hash_key_weekly = self._get_weekly_metric_key(uid, week)\n weekly_metric_name = self._get_weekly_metric_name(single_metric, week)\n with self._analytics_backend.map() as conn:\n conn.hset(hash_key_weekly, weekly_metric_name, week_counter)\n",
"def sync_month_metric(self, unique_identifier, metric, start_date, end_date):\n \"\"\"\n Uses the count for each day in the date range to recalculate the counters for the months for\n the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.\n\n The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of \n multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.\n\n :param unique_identifier: Unique string indetifying the object this metric is for\n :param metric: A unique name for the metric you want to track\n :param start_date: Date syncing starts\n :param end_date: Date syncing end\n \"\"\"\n metric = [metric] if isinstance(metric, basestring) else metric\n unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier\n num_months = self._num_months(start_date, end_date)\n first_of_month = datetime.date(year=start_date.year, month=start_date.month, day=1)\n metric_key_date_range = self._get_weekly_date_range(\n first_of_month, relativedelta(months=num_months))\n\n month_date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())\n #generate a list of first_of_month's in between the start date and the end date\n months_to_update = list(itertools.islice(month_date_generator, num_months))\n for uid in unique_identifier:\n for single_metric in metric:\n for month in months_to_update:\n _, series_results = self.get_metric_by_day(uid, single_metric, from_date=month, limit=monthrange(month.year, month.month)[1])\n month_counter = sum([value for key, value in series_results.items()])\n\n hash_key_monthly = self._get_weekly_metric_key(uid, month)\n monthly_metric_name = self._get_monthly_metric_name(single_metric, month)\n with self._analytics_backend.map() as conn:\n conn.hset(hash_key_monthly, monthly_metric_name, month_counter)\n"
] |
class Redis(BaseAnalyticsBackend):
def __init__(self, settings, **kwargs):
nydus_hosts = {}
hosts = settings.get("hosts", [])
if not hosts:
raise Exception("No redis hosts specified")
for i, host in enumerate(hosts):
nydus_hosts[i] = host
defaults = settings.get(
"defaults",
{
'host': 'localhost',
'port': 6379,
})
self._analytics_backend = create_cluster({
'engine': 'nydus.db.backends.redis.Redis',
'router': 'nydus.db.routers.keyvalue.ConsistentHashingRouter',
'hosts': nydus_hosts,
'defaults': defaults,
})
super(Redis, self).__init__(settings, **kwargs)
def _get_closest_week(self, metric_date):
"""
Gets the closest monday to the date provided.
"""
#find the offset to the closest monday
days_after_monday = metric_date.isoweekday() - 1
return metric_date - datetime.timedelta(days=days_after_monday)
def _get_daily_metric_key(self, unique_identifier, metric_date):
"""
Redis key for daily metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y-%m"),)
def _get_weekly_metric_key(self, unique_identifier, metric_date):
"""
Redis key for weekly metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y"),)
def _get_daily_metric_name(self, metric, metric_date):
"""
Hash key for daily metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_weekly_metric_name(self, metric, metric_date):
"""
Hash key for weekly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_monthly_metric_name(self, metric, metric_date):
"""
Hash key for monthly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m"),)
def _get_daily_date_range(self, metric_date, delta):
"""
Get the range of months that we need to use as keys to scan redis.
"""
dates = [metric_date]
start_date = metric_date
end_date = metric_date + delta
while start_date.month < end_date.month or start_date.year < end_date.year:
days_in_month = calendar.monthrange(start_date.year, start_date.month)[1]
#shift along to the next month as one of the months we will have to see. We don't care that the exact date
#is the 1st in each subsequent date range as we only care about the year and the month
start_date = start_date + datetime.timedelta(days=days_in_month - start_date.day + 1)
dates.append(start_date)
return dates
def _get_weekly_date_range(self, metric_date, delta):
"""
Gets the range of years that we need to use as keys to get metrics from redis.
"""
dates = [metric_date]
end_date = metric_date + delta
#Figure out how many years our metric range spans
spanning_years = end_date.year - metric_date.year
for i in range(spanning_years):
#for the weekly keys, we only care about the year
dates.append(
datetime.date(
year=metric_date.year + (i + 1), month=1, day=1))
return dates
def _parse_and_process_metrics(self, series, list_of_metrics):
formatted_result_list = []
series = [dt.strftime("%Y-%m-%d") for dt in series]
for result in list_of_metrics:
values = {}
for index, date_string in enumerate(series):
values[date_string] = int(result[index]) if result[index] is not None else 0
formatted_result_list.append(values)
merged_values = reduce(
lambda a, b: dict((n, a.get(n, 0) + b.get(n, 0)) for n in set(a) | set(b)),
formatted_result_list)
return set(series), merged_values
def _num_weeks(self, start_date, end_date):
closest_monday = self._get_closest_week(start_date)
return ((end_date - closest_monday).days / 7) + 1
def _num_months(self, start_date, end_date):
return ((end_date.year - start_date.year) * 12) + (end_date.month - start_date.month) + 1
def clear_all(self):
"""
Deletes all ``sandsnake`` related data from redis.
.. warning::
Very expensive and destructive operation. Use with causion
"""
keys = self._analytics_backend.keys()
for key in itertools.chain(*keys):
with self._analytics_backend.map() as conn:
if key.startswith(self._prefix):
conn.delete(key)
def track_count(self, unique_identifier, metric, inc_amt=1, **kwargs):
"""
Tracks a metric just by count. If you track a metric this way, you won't be able
to query the metric by day, week or month.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
return self._analytics_backend.incr(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric), inc_amt)
def track_metric(self, unique_identifier, metric, date=None, inc_amt=1, **kwargs):
"""
Tracks a metric for a specific ``unique_identifier`` for a certain date. The redis backend supports
lists for both ``unique_identifier`` and ``metric`` allowing for tracking of multiple metrics for multiple
unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track. This can be a list or a string.
:param date: A python date object indicating when this event occured. Defaults to today.
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
if date is None:
date = datetime.date.today()
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
closest_monday = self._get_closest_week(date)
hash_key_weekly = self._get_weekly_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
weekly_metric_name = self._get_weekly_metric_name(single_metric, closest_monday)
monthly_metric_name = self._get_monthly_metric_name(single_metric, date)
results.append(
[
conn.hincrby(hash_key_daily, daily_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, weekly_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, monthly_metric_name, inc_amt),
conn.incr(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), inc_amt)
]
)
return results
def get_metric_by_day(self, unique_identifier, metric, from_date, limit=30, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by day
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of days to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
date_generator = (from_date + datetime.timedelta(days=i) for i in itertools.count())
metric_key_date_range = self._get_daily_date_range(from_date, datetime.timedelta(days=limit))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_daily_metric_name(metric, daily_date) for daily_date in series]
metric_func = lambda conn: [conn.hmget(self._get_daily_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_week(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by week
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of weeks to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
closest_monday_from_date = self._get_closest_week(from_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=limit))
date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_weekly_metric_name(metric, monday_date) for monday_date in series]
metric_func = lambda conn: [conn.hmget(self._get_weekly_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_month(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by month
starting from``from_date``. It will retrieve metrics data starting from the 1st of the
month specified in ``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
first_of_month = datetime.date(year=from_date.year, month=from_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=limit))
date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_monthly_metric_name(metric, month_date) for month_date in series]
metric_func = lambda conn: [conn.hmget(
self._get_weekly_metric_key(
unique_identifier, metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metrics(self, metric_identifiers, from_date, limit=10, group_by="week", **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
:param group_by: The type of aggregation to perform on the metric. Choices are: ``day``, ``week`` or ``month``
"""
results = []
#validation of types:
allowed_types = {
"day": self.get_metric_by_day,
"week": self.get_metric_by_week,
"month": self.get_metric_by_month,
}
if group_by.lower() not in allowed_types:
raise Exception("Allowed values for group_by are day, week or month.")
group_by_func = allowed_types[group_by.lower()]
#pass a connection object so we can pipeline as much as possible
with self._analytics_backend.map() as conn:
for unique_identifier, metric in metric_identifiers:
results.append(group_by_func(unique_identifier, metric, from_date, limit=limit, connection=conn))
#we have to merge all the metric results afterwards because we are using a custom context processor
return [
self._parse_and_process_metrics(series, list_of_metrics) for
series, list_of_metrics in results]
def get_count(self, unique_identifier, metric, start_date=None, end_date=None, **kwargs):
"""
Gets the count for the ``metric`` for ``unique_identifier``. You can specify a ``start_date``
and an ``end_date``, to only get metrics within that time range.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Get the specified metrics after this date
:param end_date: Get the sepcified metrics before this date
:return: The count for the metric, 0 otherwise
"""
result = None
if start_date and end_date:
start_date, end_date = (start_date, end_date,) if start_date < end_date else (end_date, start_date,)
start_date = start_date if hasattr(start_date, 'date') else datetime.datetime.combine(start_date, datetime.time())
end_date = end_date if hasattr(end_date, 'date') else datetime.datetime.combine(end_date, datetime.time())
monthly_metrics_dates = list(rrule.rrule(rrule.MONTHLY, dtstart=start_date, bymonthday=1, until=end_date))
#We can sorta optimize this by getting most of the data by month
if len(monthly_metrics_dates) >= 3:
with self._analytics_backend.map() as conn:
monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results = self._get_counts(
conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date)
monthly_metric_series, monthly_metric_results = self._parse_and_process_metrics(monthly_metric_series, monthly_metric_results)
starting_metric_series, starting_metric_results = self._parse_and_process_metrics(starting_metric_series, starting_metric_results)
ending_metric_series, ending_metric_results = self._parse_and_process_metrics(ending_metric_series, ending_metric_results)
result = sum(monthly_metric_results.values()) + sum(starting_metric_results.values()) + sum(ending_metric_results.values())
else:
diff = end_date - start_date
metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=diff.days + 1)
result = sum(metric_results[1].values())
else:
try:
result = int(self._analytics_backend.get(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric,)))
except TypeError:
result = 0
return result
def get_counts(self, metric_identifiers, **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
"""
parsed_results = []
results = [
self.get_count(unique_identifier, metric, **kwargs) for
unique_identifier, metric in metric_identifiers]
for result in results:
try:
parsed_result = int(result)
except TypeError:
parsed_result = 0
parsed_results.append(parsed_result)
return parsed_results
def set_metric_by_day(self, unique_identifier, metric, date, count, sync_agg=True, update_counter=True):
"""
Sets the count for the ``metric`` for ``unique_identifier``.
You must specify a ``date`` for the ``count`` to be set on. Useful for resetting a metric count to 0 or decrementing a metric.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param date: Sets the specified metrics for this date
:param count: Sets the sepcified metrics to value of count
:param sync_agg: Boolean used to determine if week and month metrics should be updated
:param update_counter: Boolean used to determine if overall counter should be updated
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
if update_counter: # updates overall counter for metric
overall_count = self.get_count(uid, single_metric)
day, daily_count = self.get_metric_by_day(uid, single_metric, date, 1)[1].popitem()
self._analytics_backend.set(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), overall_count + (count - daily_count))
results.append([conn.hset(hash_key_daily, daily_metric_name, count)])
if sync_agg:
self.sync_agg_metric(unique_identifier, metric, date, date)
return results
def sync_week_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the weeks for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month
after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
closest_monday_from_date = self._get_closest_week(start_date)
num_weeks = self._num_weeks(start_date, end_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=num_weeks))
week_date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
weeks_to_update = list(itertools.islice(week_date_generator, num_weeks))
for uid in unique_identifier:
for single_metric in metric:
for week in weeks_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=week, limit=7)
week_counter = sum([value for key, value in series_results.items()])
hash_key_weekly = self._get_weekly_metric_key(uid, week)
weekly_metric_name = self._get_weekly_metric_name(single_metric, week)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_weekly, weekly_metric_name, week_counter)
def sync_month_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
num_months = self._num_months(start_date, end_date)
first_of_month = datetime.date(year=start_date.year, month=start_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=num_months))
month_date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
months_to_update = list(itertools.islice(month_date_generator, num_months))
for uid in unique_identifier:
for single_metric in metric:
for month in months_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=month, limit=monthrange(month.year, month.month)[1])
month_counter = sum([value for key, value in series_results.items()])
hash_key_monthly = self._get_weekly_metric_key(uid, month)
monthly_metric_name = self._get_monthly_metric_name(single_metric, month)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_monthly, monthly_metric_name, month_counter)
def _get_counts(self, conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date):
start_diff = monthly_metrics_dates[0] - start_date
end_diff = end_date - monthly_metrics_dates[-1]
monthly_metric_series, monthly_metric_results = self.get_metric_by_month(unique_identifier, metric, monthly_metrics_dates[0], limit=len(monthly_metrics_dates) - 1, connection=conn)
#get the difference from the date to the start date and get all dates in between
starting_metric_series, starting_metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=start_diff.days, connection=conn) if start_diff.days > 0 else ([], [[]],)
ending_metric_series, ending_metric_results = self.get_metric_by_day(unique_identifier, metric, monthly_metrics_dates[-1], limit=end_diff.days + 1, connection=conn)
return monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results
|
numan/py-analytics
|
analytics/backends/redis.py
|
Redis.sync_week_metric
|
python
|
def sync_week_metric(self, unique_identifier, metric, start_date, end_date):
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
closest_monday_from_date = self._get_closest_week(start_date)
num_weeks = self._num_weeks(start_date, end_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=num_weeks))
week_date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
weeks_to_update = list(itertools.islice(week_date_generator, num_weeks))
for uid in unique_identifier:
for single_metric in metric:
for week in weeks_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=week, limit=7)
week_counter = sum([value for key, value in series_results.items()])
hash_key_weekly = self._get_weekly_metric_key(uid, week)
weekly_metric_name = self._get_weekly_metric_name(single_metric, week)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_weekly, weekly_metric_name, week_counter)
|
Uses the count for each day in the date range to recalculate the counters for the weeks for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month
after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
|
train
|
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L466-L498
|
[
"def _get_closest_week(self, metric_date):\n \"\"\"\n Gets the closest monday to the date provided.\n \"\"\"\n #find the offset to the closest monday\n days_after_monday = metric_date.isoweekday() - 1\n\n return metric_date - datetime.timedelta(days=days_after_monday)\n",
"def _get_weekly_metric_key(self, unique_identifier, metric_date):\n \"\"\"\n Redis key for weekly metric\n \"\"\"\n return self._prefix + \":\" + \"user:%s:analy:%s\" % (unique_identifier, metric_date.strftime(\"%y\"),)\n",
"def _get_weekly_metric_name(self, metric, metric_date):\n \"\"\"\n Hash key for weekly metric\n \"\"\"\n return \"%s:%s\" % (metric, metric_date.strftime(\"%y-%m-%d\"),)\n",
"def _get_weekly_date_range(self, metric_date, delta):\n \"\"\"\n Gets the range of years that we need to use as keys to get metrics from redis.\n \"\"\"\n dates = [metric_date]\n end_date = metric_date + delta\n #Figure out how many years our metric range spans\n spanning_years = end_date.year - metric_date.year\n for i in range(spanning_years):\n #for the weekly keys, we only care about the year\n dates.append(\n datetime.date(\n year=metric_date.year + (i + 1), month=1, day=1))\n return dates\n",
"def _num_weeks(self, start_date, end_date):\n closest_monday = self._get_closest_week(start_date)\n return ((end_date - closest_monday).days / 7) + 1\n",
"def get_metric_by_day(self, unique_identifier, metric, from_date, limit=30, **kwargs):\n \"\"\"\n Returns the ``metric`` for ``unique_identifier`` segmented by day\n starting from``from_date``\n\n :param unique_identifier: Unique string indetifying the object this metric is for\n :param metric: A unique name for the metric you want to track\n :param from_date: A python date object\n :param limit: The total number of days to retrive starting from ``from_date``\n \"\"\"\n conn = kwargs.get(\"connection\", None)\n date_generator = (from_date + datetime.timedelta(days=i) for i in itertools.count())\n metric_key_date_range = self._get_daily_date_range(from_date, datetime.timedelta(days=limit))\n #generate a list of mondays in between the start date and the end date\n series = list(itertools.islice(date_generator, limit))\n\n metric_keys = [self._get_daily_metric_name(metric, daily_date) for daily_date in series]\n\n metric_func = lambda conn: [conn.hmget(self._get_daily_metric_key(unique_identifier, \\\n metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]\n\n if conn is not None:\n results = metric_func(conn)\n else:\n with self._analytics_backend.map() as conn:\n results = metric_func(conn)\n series, results = self._parse_and_process_metrics(series, results)\n\n return series, results\n"
] |
class Redis(BaseAnalyticsBackend):
def __init__(self, settings, **kwargs):
nydus_hosts = {}
hosts = settings.get("hosts", [])
if not hosts:
raise Exception("No redis hosts specified")
for i, host in enumerate(hosts):
nydus_hosts[i] = host
defaults = settings.get(
"defaults",
{
'host': 'localhost',
'port': 6379,
})
self._analytics_backend = create_cluster({
'engine': 'nydus.db.backends.redis.Redis',
'router': 'nydus.db.routers.keyvalue.ConsistentHashingRouter',
'hosts': nydus_hosts,
'defaults': defaults,
})
super(Redis, self).__init__(settings, **kwargs)
def _get_closest_week(self, metric_date):
"""
Gets the closest monday to the date provided.
"""
#find the offset to the closest monday
days_after_monday = metric_date.isoweekday() - 1
return metric_date - datetime.timedelta(days=days_after_monday)
def _get_daily_metric_key(self, unique_identifier, metric_date):
"""
Redis key for daily metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y-%m"),)
def _get_weekly_metric_key(self, unique_identifier, metric_date):
"""
Redis key for weekly metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y"),)
def _get_daily_metric_name(self, metric, metric_date):
"""
Hash key for daily metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_weekly_metric_name(self, metric, metric_date):
"""
Hash key for weekly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_monthly_metric_name(self, metric, metric_date):
"""
Hash key for monthly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m"),)
def _get_daily_date_range(self, metric_date, delta):
"""
Get the range of months that we need to use as keys to scan redis.
"""
dates = [metric_date]
start_date = metric_date
end_date = metric_date + delta
while start_date.month < end_date.month or start_date.year < end_date.year:
days_in_month = calendar.monthrange(start_date.year, start_date.month)[1]
#shift along to the next month as one of the months we will have to see. We don't care that the exact date
#is the 1st in each subsequent date range as we only care about the year and the month
start_date = start_date + datetime.timedelta(days=days_in_month - start_date.day + 1)
dates.append(start_date)
return dates
def _get_weekly_date_range(self, metric_date, delta):
"""
Gets the range of years that we need to use as keys to get metrics from redis.
"""
dates = [metric_date]
end_date = metric_date + delta
#Figure out how many years our metric range spans
spanning_years = end_date.year - metric_date.year
for i in range(spanning_years):
#for the weekly keys, we only care about the year
dates.append(
datetime.date(
year=metric_date.year + (i + 1), month=1, day=1))
return dates
def _parse_and_process_metrics(self, series, list_of_metrics):
formatted_result_list = []
series = [dt.strftime("%Y-%m-%d") for dt in series]
for result in list_of_metrics:
values = {}
for index, date_string in enumerate(series):
values[date_string] = int(result[index]) if result[index] is not None else 0
formatted_result_list.append(values)
merged_values = reduce(
lambda a, b: dict((n, a.get(n, 0) + b.get(n, 0)) for n in set(a) | set(b)),
formatted_result_list)
return set(series), merged_values
def _num_weeks(self, start_date, end_date):
closest_monday = self._get_closest_week(start_date)
return ((end_date - closest_monday).days / 7) + 1
def _num_months(self, start_date, end_date):
return ((end_date.year - start_date.year) * 12) + (end_date.month - start_date.month) + 1
def clear_all(self):
"""
Deletes all ``sandsnake`` related data from redis.
.. warning::
Very expensive and destructive operation. Use with causion
"""
keys = self._analytics_backend.keys()
for key in itertools.chain(*keys):
with self._analytics_backend.map() as conn:
if key.startswith(self._prefix):
conn.delete(key)
def track_count(self, unique_identifier, metric, inc_amt=1, **kwargs):
"""
Tracks a metric just by count. If you track a metric this way, you won't be able
to query the metric by day, week or month.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
return self._analytics_backend.incr(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric), inc_amt)
def track_metric(self, unique_identifier, metric, date=None, inc_amt=1, **kwargs):
"""
Tracks a metric for a specific ``unique_identifier`` for a certain date. The redis backend supports
lists for both ``unique_identifier`` and ``metric`` allowing for tracking of multiple metrics for multiple
unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track. This can be a list or a string.
:param date: A python date object indicating when this event occured. Defaults to today.
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
if date is None:
date = datetime.date.today()
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
closest_monday = self._get_closest_week(date)
hash_key_weekly = self._get_weekly_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
weekly_metric_name = self._get_weekly_metric_name(single_metric, closest_monday)
monthly_metric_name = self._get_monthly_metric_name(single_metric, date)
results.append(
[
conn.hincrby(hash_key_daily, daily_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, weekly_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, monthly_metric_name, inc_amt),
conn.incr(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), inc_amt)
]
)
return results
def get_metric_by_day(self, unique_identifier, metric, from_date, limit=30, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by day
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of days to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
date_generator = (from_date + datetime.timedelta(days=i) for i in itertools.count())
metric_key_date_range = self._get_daily_date_range(from_date, datetime.timedelta(days=limit))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_daily_metric_name(metric, daily_date) for daily_date in series]
metric_func = lambda conn: [conn.hmget(self._get_daily_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_week(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by week
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of weeks to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
closest_monday_from_date = self._get_closest_week(from_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=limit))
date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_weekly_metric_name(metric, monday_date) for monday_date in series]
metric_func = lambda conn: [conn.hmget(self._get_weekly_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_month(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by month
starting from``from_date``. It will retrieve metrics data starting from the 1st of the
month specified in ``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
first_of_month = datetime.date(year=from_date.year, month=from_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=limit))
date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_monthly_metric_name(metric, month_date) for month_date in series]
metric_func = lambda conn: [conn.hmget(
self._get_weekly_metric_key(
unique_identifier, metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metrics(self, metric_identifiers, from_date, limit=10, group_by="week", **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
:param group_by: The type of aggregation to perform on the metric. Choices are: ``day``, ``week`` or ``month``
"""
results = []
#validation of types:
allowed_types = {
"day": self.get_metric_by_day,
"week": self.get_metric_by_week,
"month": self.get_metric_by_month,
}
if group_by.lower() not in allowed_types:
raise Exception("Allowed values for group_by are day, week or month.")
group_by_func = allowed_types[group_by.lower()]
#pass a connection object so we can pipeline as much as possible
with self._analytics_backend.map() as conn:
for unique_identifier, metric in metric_identifiers:
results.append(group_by_func(unique_identifier, metric, from_date, limit=limit, connection=conn))
#we have to merge all the metric results afterwards because we are using a custom context processor
return [
self._parse_and_process_metrics(series, list_of_metrics) for
series, list_of_metrics in results]
def get_count(self, unique_identifier, metric, start_date=None, end_date=None, **kwargs):
"""
Gets the count for the ``metric`` for ``unique_identifier``. You can specify a ``start_date``
and an ``end_date``, to only get metrics within that time range.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Get the specified metrics after this date
:param end_date: Get the sepcified metrics before this date
:return: The count for the metric, 0 otherwise
"""
result = None
if start_date and end_date:
start_date, end_date = (start_date, end_date,) if start_date < end_date else (end_date, start_date,)
start_date = start_date if hasattr(start_date, 'date') else datetime.datetime.combine(start_date, datetime.time())
end_date = end_date if hasattr(end_date, 'date') else datetime.datetime.combine(end_date, datetime.time())
monthly_metrics_dates = list(rrule.rrule(rrule.MONTHLY, dtstart=start_date, bymonthday=1, until=end_date))
#We can sorta optimize this by getting most of the data by month
if len(monthly_metrics_dates) >= 3:
with self._analytics_backend.map() as conn:
monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results = self._get_counts(
conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date)
monthly_metric_series, monthly_metric_results = self._parse_and_process_metrics(monthly_metric_series, monthly_metric_results)
starting_metric_series, starting_metric_results = self._parse_and_process_metrics(starting_metric_series, starting_metric_results)
ending_metric_series, ending_metric_results = self._parse_and_process_metrics(ending_metric_series, ending_metric_results)
result = sum(monthly_metric_results.values()) + sum(starting_metric_results.values()) + sum(ending_metric_results.values())
else:
diff = end_date - start_date
metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=diff.days + 1)
result = sum(metric_results[1].values())
else:
try:
result = int(self._analytics_backend.get(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric,)))
except TypeError:
result = 0
return result
def get_counts(self, metric_identifiers, **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
"""
parsed_results = []
results = [
self.get_count(unique_identifier, metric, **kwargs) for
unique_identifier, metric in metric_identifiers]
for result in results:
try:
parsed_result = int(result)
except TypeError:
parsed_result = 0
parsed_results.append(parsed_result)
return parsed_results
def set_metric_by_day(self, unique_identifier, metric, date, count, sync_agg=True, update_counter=True):
"""
Sets the count for the ``metric`` for ``unique_identifier``.
You must specify a ``date`` for the ``count`` to be set on. Useful for resetting a metric count to 0 or decrementing a metric.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param date: Sets the specified metrics for this date
:param count: Sets the sepcified metrics to value of count
:param sync_agg: Boolean used to determine if week and month metrics should be updated
:param update_counter: Boolean used to determine if overall counter should be updated
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
if update_counter: # updates overall counter for metric
overall_count = self.get_count(uid, single_metric)
day, daily_count = self.get_metric_by_day(uid, single_metric, date, 1)[1].popitem()
self._analytics_backend.set(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), overall_count + (count - daily_count))
results.append([conn.hset(hash_key_daily, daily_metric_name, count)])
if sync_agg:
self.sync_agg_metric(unique_identifier, metric, date, date)
return results
def sync_agg_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the associated weeks and months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
self.sync_week_metric(unique_identifier, metric, start_date, end_date)
self.sync_month_metric(unique_identifier, metric, start_date, end_date)
def sync_month_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
num_months = self._num_months(start_date, end_date)
first_of_month = datetime.date(year=start_date.year, month=start_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=num_months))
month_date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
months_to_update = list(itertools.islice(month_date_generator, num_months))
for uid in unique_identifier:
for single_metric in metric:
for month in months_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=month, limit=monthrange(month.year, month.month)[1])
month_counter = sum([value for key, value in series_results.items()])
hash_key_monthly = self._get_weekly_metric_key(uid, month)
monthly_metric_name = self._get_monthly_metric_name(single_metric, month)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_monthly, monthly_metric_name, month_counter)
def _get_counts(self, conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date):
start_diff = monthly_metrics_dates[0] - start_date
end_diff = end_date - monthly_metrics_dates[-1]
monthly_metric_series, monthly_metric_results = self.get_metric_by_month(unique_identifier, metric, monthly_metrics_dates[0], limit=len(monthly_metrics_dates) - 1, connection=conn)
#get the difference from the date to the start date and get all dates in between
starting_metric_series, starting_metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=start_diff.days, connection=conn) if start_diff.days > 0 else ([], [[]],)
ending_metric_series, ending_metric_results = self.get_metric_by_day(unique_identifier, metric, monthly_metrics_dates[-1], limit=end_diff.days + 1, connection=conn)
return monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results
|
numan/py-analytics
|
analytics/backends/redis.py
|
Redis.sync_month_metric
|
python
|
def sync_month_metric(self, unique_identifier, metric, start_date, end_date):
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
num_months = self._num_months(start_date, end_date)
first_of_month = datetime.date(year=start_date.year, month=start_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=num_months))
month_date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
months_to_update = list(itertools.islice(month_date_generator, num_months))
for uid in unique_identifier:
for single_metric in metric:
for month in months_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=month, limit=monthrange(month.year, month.month)[1])
month_counter = sum([value for key, value in series_results.items()])
hash_key_monthly = self._get_weekly_metric_key(uid, month)
monthly_metric_name = self._get_monthly_metric_name(single_metric, month)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_monthly, monthly_metric_name, month_counter)
|
Uses the count for each day in the date range to recalculate the counters for the months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
|
train
|
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L500-L532
|
[
"def _get_weekly_metric_key(self, unique_identifier, metric_date):\n \"\"\"\n Redis key for weekly metric\n \"\"\"\n return self._prefix + \":\" + \"user:%s:analy:%s\" % (unique_identifier, metric_date.strftime(\"%y\"),)\n",
"def _get_monthly_metric_name(self, metric, metric_date):\n \"\"\"\n Hash key for monthly metric\n \"\"\"\n return \"%s:%s\" % (metric, metric_date.strftime(\"%y-%m\"),)\n",
"def _get_weekly_date_range(self, metric_date, delta):\n \"\"\"\n Gets the range of years that we need to use as keys to get metrics from redis.\n \"\"\"\n dates = [metric_date]\n end_date = metric_date + delta\n #Figure out how many years our metric range spans\n spanning_years = end_date.year - metric_date.year\n for i in range(spanning_years):\n #for the weekly keys, we only care about the year\n dates.append(\n datetime.date(\n year=metric_date.year + (i + 1), month=1, day=1))\n return dates\n",
"def _num_months(self, start_date, end_date):\n return ((end_date.year - start_date.year) * 12) + (end_date.month - start_date.month) + 1\n",
"def get_metric_by_day(self, unique_identifier, metric, from_date, limit=30, **kwargs):\n \"\"\"\n Returns the ``metric`` for ``unique_identifier`` segmented by day\n starting from``from_date``\n\n :param unique_identifier: Unique string indetifying the object this metric is for\n :param metric: A unique name for the metric you want to track\n :param from_date: A python date object\n :param limit: The total number of days to retrive starting from ``from_date``\n \"\"\"\n conn = kwargs.get(\"connection\", None)\n date_generator = (from_date + datetime.timedelta(days=i) for i in itertools.count())\n metric_key_date_range = self._get_daily_date_range(from_date, datetime.timedelta(days=limit))\n #generate a list of mondays in between the start date and the end date\n series = list(itertools.islice(date_generator, limit))\n\n metric_keys = [self._get_daily_metric_name(metric, daily_date) for daily_date in series]\n\n metric_func = lambda conn: [conn.hmget(self._get_daily_metric_key(unique_identifier, \\\n metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]\n\n if conn is not None:\n results = metric_func(conn)\n else:\n with self._analytics_backend.map() as conn:\n results = metric_func(conn)\n series, results = self._parse_and_process_metrics(series, results)\n\n return series, results\n"
] |
class Redis(BaseAnalyticsBackend):
def __init__(self, settings, **kwargs):
nydus_hosts = {}
hosts = settings.get("hosts", [])
if not hosts:
raise Exception("No redis hosts specified")
for i, host in enumerate(hosts):
nydus_hosts[i] = host
defaults = settings.get(
"defaults",
{
'host': 'localhost',
'port': 6379,
})
self._analytics_backend = create_cluster({
'engine': 'nydus.db.backends.redis.Redis',
'router': 'nydus.db.routers.keyvalue.ConsistentHashingRouter',
'hosts': nydus_hosts,
'defaults': defaults,
})
super(Redis, self).__init__(settings, **kwargs)
def _get_closest_week(self, metric_date):
"""
Gets the closest monday to the date provided.
"""
#find the offset to the closest monday
days_after_monday = metric_date.isoweekday() - 1
return metric_date - datetime.timedelta(days=days_after_monday)
def _get_daily_metric_key(self, unique_identifier, metric_date):
"""
Redis key for daily metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y-%m"),)
def _get_weekly_metric_key(self, unique_identifier, metric_date):
"""
Redis key for weekly metric
"""
return self._prefix + ":" + "user:%s:analy:%s" % (unique_identifier, metric_date.strftime("%y"),)
def _get_daily_metric_name(self, metric, metric_date):
"""
Hash key for daily metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_weekly_metric_name(self, metric, metric_date):
"""
Hash key for weekly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m-%d"),)
def _get_monthly_metric_name(self, metric, metric_date):
"""
Hash key for monthly metric
"""
return "%s:%s" % (metric, metric_date.strftime("%y-%m"),)
def _get_daily_date_range(self, metric_date, delta):
"""
Get the range of months that we need to use as keys to scan redis.
"""
dates = [metric_date]
start_date = metric_date
end_date = metric_date + delta
while start_date.month < end_date.month or start_date.year < end_date.year:
days_in_month = calendar.monthrange(start_date.year, start_date.month)[1]
#shift along to the next month as one of the months we will have to see. We don't care that the exact date
#is the 1st in each subsequent date range as we only care about the year and the month
start_date = start_date + datetime.timedelta(days=days_in_month - start_date.day + 1)
dates.append(start_date)
return dates
def _get_weekly_date_range(self, metric_date, delta):
"""
Gets the range of years that we need to use as keys to get metrics from redis.
"""
dates = [metric_date]
end_date = metric_date + delta
#Figure out how many years our metric range spans
spanning_years = end_date.year - metric_date.year
for i in range(spanning_years):
#for the weekly keys, we only care about the year
dates.append(
datetime.date(
year=metric_date.year + (i + 1), month=1, day=1))
return dates
def _parse_and_process_metrics(self, series, list_of_metrics):
formatted_result_list = []
series = [dt.strftime("%Y-%m-%d") for dt in series]
for result in list_of_metrics:
values = {}
for index, date_string in enumerate(series):
values[date_string] = int(result[index]) if result[index] is not None else 0
formatted_result_list.append(values)
merged_values = reduce(
lambda a, b: dict((n, a.get(n, 0) + b.get(n, 0)) for n in set(a) | set(b)),
formatted_result_list)
return set(series), merged_values
def _num_weeks(self, start_date, end_date):
closest_monday = self._get_closest_week(start_date)
return ((end_date - closest_monday).days / 7) + 1
def _num_months(self, start_date, end_date):
return ((end_date.year - start_date.year) * 12) + (end_date.month - start_date.month) + 1
def clear_all(self):
"""
Deletes all ``sandsnake`` related data from redis.
.. warning::
Very expensive and destructive operation. Use with causion
"""
keys = self._analytics_backend.keys()
for key in itertools.chain(*keys):
with self._analytics_backend.map() as conn:
if key.startswith(self._prefix):
conn.delete(key)
def track_count(self, unique_identifier, metric, inc_amt=1, **kwargs):
"""
Tracks a metric just by count. If you track a metric this way, you won't be able
to query the metric by day, week or month.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
return self._analytics_backend.incr(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric), inc_amt)
def track_metric(self, unique_identifier, metric, date=None, inc_amt=1, **kwargs):
"""
Tracks a metric for a specific ``unique_identifier`` for a certain date. The redis backend supports
lists for both ``unique_identifier`` and ``metric`` allowing for tracking of multiple metrics for multiple
unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track. This can be a list or a string.
:param date: A python date object indicating when this event occured. Defaults to today.
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
if date is None:
date = datetime.date.today()
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
closest_monday = self._get_closest_week(date)
hash_key_weekly = self._get_weekly_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
weekly_metric_name = self._get_weekly_metric_name(single_metric, closest_monday)
monthly_metric_name = self._get_monthly_metric_name(single_metric, date)
results.append(
[
conn.hincrby(hash_key_daily, daily_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, weekly_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, monthly_metric_name, inc_amt),
conn.incr(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), inc_amt)
]
)
return results
def get_metric_by_day(self, unique_identifier, metric, from_date, limit=30, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by day
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of days to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
date_generator = (from_date + datetime.timedelta(days=i) for i in itertools.count())
metric_key_date_range = self._get_daily_date_range(from_date, datetime.timedelta(days=limit))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_daily_metric_name(metric, daily_date) for daily_date in series]
metric_func = lambda conn: [conn.hmget(self._get_daily_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_week(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by week
starting from``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of weeks to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
closest_monday_from_date = self._get_closest_week(from_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=limit))
date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_weekly_metric_name(metric, monday_date) for monday_date in series]
metric_func = lambda conn: [conn.hmget(self._get_weekly_metric_key(unique_identifier, \
metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metric_by_month(self, unique_identifier, metric, from_date, limit=10, **kwargs):
"""
Returns the ``metric`` for ``unique_identifier`` segmented by month
starting from``from_date``. It will retrieve metrics data starting from the 1st of the
month specified in ``from_date``
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
"""
conn = kwargs.get("connection", None)
first_of_month = datetime.date(year=from_date.year, month=from_date.month, day=1)
metric_key_date_range = self._get_weekly_date_range(
first_of_month, relativedelta(months=limit))
date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count())
#generate a list of first_of_month's in between the start date and the end date
series = list(itertools.islice(date_generator, limit))
metric_keys = [self._get_monthly_metric_name(metric, month_date) for month_date in series]
metric_func = lambda conn: [conn.hmget(
self._get_weekly_metric_key(
unique_identifier, metric_key_date), metric_keys) for metric_key_date in metric_key_date_range]
if conn is not None:
results = metric_func(conn)
else:
with self._analytics_backend.map() as conn:
results = metric_func(conn)
series, results = self._parse_and_process_metrics(series, results)
return series, results
def get_metrics(self, metric_identifiers, from_date, limit=10, group_by="week", **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
:param from_date: A python date object
:param limit: The total number of months to retrive starting from ``from_date``
:param group_by: The type of aggregation to perform on the metric. Choices are: ``day``, ``week`` or ``month``
"""
results = []
#validation of types:
allowed_types = {
"day": self.get_metric_by_day,
"week": self.get_metric_by_week,
"month": self.get_metric_by_month,
}
if group_by.lower() not in allowed_types:
raise Exception("Allowed values for group_by are day, week or month.")
group_by_func = allowed_types[group_by.lower()]
#pass a connection object so we can pipeline as much as possible
with self._analytics_backend.map() as conn:
for unique_identifier, metric in metric_identifiers:
results.append(group_by_func(unique_identifier, metric, from_date, limit=limit, connection=conn))
#we have to merge all the metric results afterwards because we are using a custom context processor
return [
self._parse_and_process_metrics(series, list_of_metrics) for
series, list_of_metrics in results]
def get_count(self, unique_identifier, metric, start_date=None, end_date=None, **kwargs):
"""
Gets the count for the ``metric`` for ``unique_identifier``. You can specify a ``start_date``
and an ``end_date``, to only get metrics within that time range.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Get the specified metrics after this date
:param end_date: Get the sepcified metrics before this date
:return: The count for the metric, 0 otherwise
"""
result = None
if start_date and end_date:
start_date, end_date = (start_date, end_date,) if start_date < end_date else (end_date, start_date,)
start_date = start_date if hasattr(start_date, 'date') else datetime.datetime.combine(start_date, datetime.time())
end_date = end_date if hasattr(end_date, 'date') else datetime.datetime.combine(end_date, datetime.time())
monthly_metrics_dates = list(rrule.rrule(rrule.MONTHLY, dtstart=start_date, bymonthday=1, until=end_date))
#We can sorta optimize this by getting most of the data by month
if len(monthly_metrics_dates) >= 3:
with self._analytics_backend.map() as conn:
monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results = self._get_counts(
conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date)
monthly_metric_series, monthly_metric_results = self._parse_and_process_metrics(monthly_metric_series, monthly_metric_results)
starting_metric_series, starting_metric_results = self._parse_and_process_metrics(starting_metric_series, starting_metric_results)
ending_metric_series, ending_metric_results = self._parse_and_process_metrics(ending_metric_series, ending_metric_results)
result = sum(monthly_metric_results.values()) + sum(starting_metric_results.values()) + sum(ending_metric_results.values())
else:
diff = end_date - start_date
metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=diff.days + 1)
result = sum(metric_results[1].values())
else:
try:
result = int(self._analytics_backend.get(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric,)))
except TypeError:
result = 0
return result
def get_counts(self, metric_identifiers, **kwargs):
"""
Retrieves a multiple metrics as efficiently as possible.
:param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve.
For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)]
"""
parsed_results = []
results = [
self.get_count(unique_identifier, metric, **kwargs) for
unique_identifier, metric in metric_identifiers]
for result in results:
try:
parsed_result = int(result)
except TypeError:
parsed_result = 0
parsed_results.append(parsed_result)
return parsed_results
def set_metric_by_day(self, unique_identifier, metric, date, count, sync_agg=True, update_counter=True):
"""
Sets the count for the ``metric`` for ``unique_identifier``.
You must specify a ``date`` for the ``count`` to be set on. Useful for resetting a metric count to 0 or decrementing a metric.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param date: Sets the specified metrics for this date
:param count: Sets the sepcified metrics to value of count
:param sync_agg: Boolean used to determine if week and month metrics should be updated
:param update_counter: Boolean used to determine if overall counter should be updated
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
if update_counter: # updates overall counter for metric
overall_count = self.get_count(uid, single_metric)
day, daily_count = self.get_metric_by_day(uid, single_metric, date, 1)[1].popitem()
self._analytics_backend.set(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), overall_count + (count - daily_count))
results.append([conn.hset(hash_key_daily, daily_metric_name, count)])
if sync_agg:
self.sync_agg_metric(unique_identifier, metric, date, date)
return results
def sync_agg_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the associated weeks and months for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
self.sync_week_metric(unique_identifier, metric, start_date, end_date)
self.sync_month_metric(unique_identifier, metric, start_date, end_date)
def sync_week_metric(self, unique_identifier, metric, start_date, end_date):
"""
Uses the count for each day in the date range to recalculate the counters for the weeks for
the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month
after using set_metric_by_day.
The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of
multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track
:param start_date: Date syncing starts
:param end_date: Date syncing end
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
closest_monday_from_date = self._get_closest_week(start_date)
num_weeks = self._num_weeks(start_date, end_date)
metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=num_weeks))
week_date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7))
#generate a list of mondays in between the start date and the end date
weeks_to_update = list(itertools.islice(week_date_generator, num_weeks))
for uid in unique_identifier:
for single_metric in metric:
for week in weeks_to_update:
_, series_results = self.get_metric_by_day(uid, single_metric, from_date=week, limit=7)
week_counter = sum([value for key, value in series_results.items()])
hash_key_weekly = self._get_weekly_metric_key(uid, week)
weekly_metric_name = self._get_weekly_metric_name(single_metric, week)
with self._analytics_backend.map() as conn:
conn.hset(hash_key_weekly, weekly_metric_name, week_counter)
def _get_counts(self, conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date):
start_diff = monthly_metrics_dates[0] - start_date
end_diff = end_date - monthly_metrics_dates[-1]
monthly_metric_series, monthly_metric_results = self.get_metric_by_month(unique_identifier, metric, monthly_metrics_dates[0], limit=len(monthly_metrics_dates) - 1, connection=conn)
#get the difference from the date to the start date and get all dates in between
starting_metric_series, starting_metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=start_diff.days, connection=conn) if start_diff.days > 0 else ([], [[]],)
ending_metric_series, ending_metric_results = self.get_metric_by_day(unique_identifier, metric, monthly_metrics_dates[-1], limit=end_diff.days + 1, connection=conn)
return monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results
|
numan/py-analytics
|
analytics/__init__.py
|
create_analytic_backend
|
python
|
def create_analytic_backend(settings):
backend = settings.get('backend')
if isinstance(backend, basestring):
backend = import_string(backend)
elif backend:
backend = backend
else:
raise KeyError('backend')
return backend(settings.get("settings", {}))
|
Creates a new Analytics backend from the settings
:param settings: Dictionary of settings for the analytics backend
:returns: A backend object implementing the analytics api
>>>
>>> analytics = create_analytic({
>>> 'backend': 'analytics.backends.redis.Redis',
>>> 'settings': {
>>> 'defaults': {
>>> 'host': 'localhost',
>>> 'port': 6379,
>>> 'db': 0,
>>> },
>>> 'hosts': [{'db': 0}, {'db': 1}, {'host': 'redis.example.org'}]
>>> },
>>> })
|
train
|
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/__init__.py#L27-L55
|
[
"def import_string(import_name, silent=False):\n \"\"\"Imports an object based on a string. If *silent* is True the return\n value will be None if the import fails.\n\n Simplified version of the function with same name from `Werkzeug`_.\n\n :param import_name:\n The dotted name for the object to import.\n :param silent:\n If True, import errors are ignored and None is returned instead.\n :returns:\n The imported object.\n \"\"\"\n import_name = str(import_name)\n try:\n if '.' in import_name:\n module, obj = import_name.rsplit('.', 1)\n return getattr(__import__(module, None, None, [obj]), obj)\n else:\n return __import__(import_name)\n except (ImportError, AttributeError):\n if not silent:\n raise\n"
] |
"""
Copyright 2012 Numan Sachwani <numan@7Geese.com>
This file is provided to you under the Apache License,
Version 2.0 (the "License"); you may not use this file
except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from analytics.utils import import_string
try:
VERSION = __import__('pkg_resources') \
.get_distribution('analytics').version
except Exception, e:
VERSION = 'unknown'
|
udragon/pybrctl
|
pybrctl/pybrctl.py
|
_runshell
|
python
|
def _runshell(cmd, exception):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.wait() != 0:
raise BridgeException(exception)
return p
|
Run a shell command. if fails, raise a proper exception.
|
train
|
https://github.com/udragon/pybrctl/blob/9e834a605b57bd969a81c56a886dee81f7d715c1/pybrctl/pybrctl.py#L145-L150
| null |
import subprocess
from distutils import spawn
brctlexe = spawn.find_executable("brctl")
ipexe = spawn.find_executable("ip")
class BridgeException(Exception):
pass
class Bridge(object):
def __init__(self, name):
""" Initialize a bridge object. """
self.name = name
def __str__(self):
""" Return a string of the bridge name. """
return self.name
def __repr__(self):
""" Return a representaion of a bridge object. """
return "<Bridge: %s>" % self.name
def addif(self, iname):
""" Add an interface to the bridge """
_runshell([brctlexe, 'addif', self.name, iname],
"Could not add interface %s to %s." % (iname, self.name))
def delif(self, iname):
""" Delete an interface from the bridge. """
_runshell([brctlexe, 'delif', self.name, iname],
"Could not delete interface %s from %s." % (iname, self.name))
def hairpin(self, port, val=True):
""" Turn harpin on/off on a port. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'hairpin', self.name, port, state],
"Could not set hairpin in port %s in %s." % (port, self.name))
def stp(self, val=True):
""" Turn STP protocol on/off. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'stp', self.name, state],
"Could not set stp on %s." % self.name)
def setageing(self, time):
""" Set bridge ageing time. """
_runshell([brctlexe, 'setageing', self.name, str(time)],
"Could not set ageing time in %s." % self.name)
def setbridgeprio(self, prio):
""" Set bridge priority value. """
_runshell([brctlexe, 'setbridgeprio', self.name, str(prio)],
"Could not set bridge priority in %s." % self.name)
def setfd(self, time):
""" Set bridge forward delay time value. """
_runshell([brctlexe, 'setfd', self.name, str(time)],
"Could not set forward delay in %s." % self.name)
def sethello(self, time):
""" Set bridge hello time value. """
_runshell([brctlexe, 'sethello', self.name, str(time)],
"Could not set hello time in %s." % self.name)
def setmaxage(self, time):
""" Set bridge max message age time. """
_runshell([brctlexe, 'setmaxage', self.name, str(time)],
"Could not set max message age in %s." % self.name)
def setpathcost(self, port, cost):
""" Set port path cost value for STP protocol. """
_runshell([brctlexe, 'setpathcost', self.name, port, str(cost)],
"Could not set path cost in port %s in %s." % (port, self.name))
def setportprio(self, port, prio):
""" Set port priority value. """
_runshell([brctlexe, 'setportprio', self.name, port, str(prio)],
"Could not set priority in port %s in %s." % (port, self.name))
def _show(self):
""" Return a list of unsorted bridge details. """
p = _runshell([brctlexe, 'show', self.name],
"Could not show %s." % self.name)
return p.stdout.read().split()[7:]
def getid(self):
""" Return the bridge id value. """
return self._show()[1]
def getifs(self):
""" Return a list of bridge interfaces. """
return self._show()[3:]
def getstp(self):
""" Return if STP protocol is enabled. """
return self._show()[2] == 'yes'
def showmacs(self):
""" Return a list of mac addresses. """
raise NotImplementedError()
def showstp(self):
""" Return STP information. """
raise NotImplementedError()
class BridgeController(object):
def addbr(self, name):
""" Create a bridge and set the device up. """
_runshell([brctlexe, 'addbr', name],
"Could not create bridge %s." % name)
_runshell([ipexe, 'link', 'set', 'dev', name, 'up'],
"Could not set link up for %s." % name)
return Bridge(name)
def delbr(self, name):
""" Set the device down and delete the bridge. """
self.getbr(name) # Check if exists
_runshell([ipexe, 'link', 'set', 'dev', name, 'down'],
"Could not set link down for %s." % name)
_runshell([brctlexe, 'delbr', name],
"Could not delete bridge %s." % name)
def showall(self):
""" Return a list of all available bridges. """
p = _runshell([brctlexe, 'show'],
"Could not show bridges.")
wlist = map(str.split, p.stdout.read().splitlines()[1:])
brwlist = filter(lambda x: len(x) != 1, wlist)
brlist = map(lambda x: x[0], brwlist)
return map(Bridge, brlist)
def getbr(self, name):
""" Return a bridge object."""
for br in self.showall():
if br.name == name:
return br
raise BridgeException("Bridge does not exist.")
|
udragon/pybrctl
|
pybrctl/pybrctl.py
|
Bridge.addif
|
python
|
def addif(self, iname):
_runshell([brctlexe, 'addif', self.name, iname],
"Could not add interface %s to %s." % (iname, self.name))
|
Add an interface to the bridge
|
train
|
https://github.com/udragon/pybrctl/blob/9e834a605b57bd969a81c56a886dee81f7d715c1/pybrctl/pybrctl.py#L24-L27
|
[
"def _runshell(cmd, exception):\n \"\"\" Run a shell command. if fails, raise a proper exception. \"\"\"\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if p.wait() != 0:\n raise BridgeException(exception)\n return p\n"
] |
class Bridge(object):
def __init__(self, name):
""" Initialize a bridge object. """
self.name = name
def __str__(self):
""" Return a string of the bridge name. """
return self.name
def __repr__(self):
""" Return a representaion of a bridge object. """
return "<Bridge: %s>" % self.name
def delif(self, iname):
""" Delete an interface from the bridge. """
_runshell([brctlexe, 'delif', self.name, iname],
"Could not delete interface %s from %s." % (iname, self.name))
def hairpin(self, port, val=True):
""" Turn harpin on/off on a port. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'hairpin', self.name, port, state],
"Could not set hairpin in port %s in %s." % (port, self.name))
def stp(self, val=True):
""" Turn STP protocol on/off. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'stp', self.name, state],
"Could not set stp on %s." % self.name)
def setageing(self, time):
""" Set bridge ageing time. """
_runshell([brctlexe, 'setageing', self.name, str(time)],
"Could not set ageing time in %s." % self.name)
def setbridgeprio(self, prio):
""" Set bridge priority value. """
_runshell([brctlexe, 'setbridgeprio', self.name, str(prio)],
"Could not set bridge priority in %s." % self.name)
def setfd(self, time):
""" Set bridge forward delay time value. """
_runshell([brctlexe, 'setfd', self.name, str(time)],
"Could not set forward delay in %s." % self.name)
def sethello(self, time):
""" Set bridge hello time value. """
_runshell([brctlexe, 'sethello', self.name, str(time)],
"Could not set hello time in %s." % self.name)
def setmaxage(self, time):
""" Set bridge max message age time. """
_runshell([brctlexe, 'setmaxage', self.name, str(time)],
"Could not set max message age in %s." % self.name)
def setpathcost(self, port, cost):
""" Set port path cost value for STP protocol. """
_runshell([brctlexe, 'setpathcost', self.name, port, str(cost)],
"Could not set path cost in port %s in %s." % (port, self.name))
def setportprio(self, port, prio):
""" Set port priority value. """
_runshell([brctlexe, 'setportprio', self.name, port, str(prio)],
"Could not set priority in port %s in %s." % (port, self.name))
def _show(self):
""" Return a list of unsorted bridge details. """
p = _runshell([brctlexe, 'show', self.name],
"Could not show %s." % self.name)
return p.stdout.read().split()[7:]
def getid(self):
""" Return the bridge id value. """
return self._show()[1]
def getifs(self):
""" Return a list of bridge interfaces. """
return self._show()[3:]
def getstp(self):
""" Return if STP protocol is enabled. """
return self._show()[2] == 'yes'
def showmacs(self):
""" Return a list of mac addresses. """
raise NotImplementedError()
def showstp(self):
""" Return STP information. """
raise NotImplementedError()
|
udragon/pybrctl
|
pybrctl/pybrctl.py
|
Bridge.delif
|
python
|
def delif(self, iname):
_runshell([brctlexe, 'delif', self.name, iname],
"Could not delete interface %s from %s." % (iname, self.name))
|
Delete an interface from the bridge.
|
train
|
https://github.com/udragon/pybrctl/blob/9e834a605b57bd969a81c56a886dee81f7d715c1/pybrctl/pybrctl.py#L29-L32
|
[
"def _runshell(cmd, exception):\n \"\"\" Run a shell command. if fails, raise a proper exception. \"\"\"\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if p.wait() != 0:\n raise BridgeException(exception)\n return p\n"
] |
class Bridge(object):
def __init__(self, name):
""" Initialize a bridge object. """
self.name = name
def __str__(self):
""" Return a string of the bridge name. """
return self.name
def __repr__(self):
""" Return a representaion of a bridge object. """
return "<Bridge: %s>" % self.name
def addif(self, iname):
""" Add an interface to the bridge """
_runshell([brctlexe, 'addif', self.name, iname],
"Could not add interface %s to %s." % (iname, self.name))
def hairpin(self, port, val=True):
""" Turn harpin on/off on a port. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'hairpin', self.name, port, state],
"Could not set hairpin in port %s in %s." % (port, self.name))
def stp(self, val=True):
""" Turn STP protocol on/off. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'stp', self.name, state],
"Could not set stp on %s." % self.name)
def setageing(self, time):
""" Set bridge ageing time. """
_runshell([brctlexe, 'setageing', self.name, str(time)],
"Could not set ageing time in %s." % self.name)
def setbridgeprio(self, prio):
""" Set bridge priority value. """
_runshell([brctlexe, 'setbridgeprio', self.name, str(prio)],
"Could not set bridge priority in %s." % self.name)
def setfd(self, time):
""" Set bridge forward delay time value. """
_runshell([brctlexe, 'setfd', self.name, str(time)],
"Could not set forward delay in %s." % self.name)
def sethello(self, time):
""" Set bridge hello time value. """
_runshell([brctlexe, 'sethello', self.name, str(time)],
"Could not set hello time in %s." % self.name)
def setmaxage(self, time):
""" Set bridge max message age time. """
_runshell([brctlexe, 'setmaxage', self.name, str(time)],
"Could not set max message age in %s." % self.name)
def setpathcost(self, port, cost):
""" Set port path cost value for STP protocol. """
_runshell([brctlexe, 'setpathcost', self.name, port, str(cost)],
"Could not set path cost in port %s in %s." % (port, self.name))
def setportprio(self, port, prio):
""" Set port priority value. """
_runshell([brctlexe, 'setportprio', self.name, port, str(prio)],
"Could not set priority in port %s in %s." % (port, self.name))
def _show(self):
""" Return a list of unsorted bridge details. """
p = _runshell([brctlexe, 'show', self.name],
"Could not show %s." % self.name)
return p.stdout.read().split()[7:]
def getid(self):
""" Return the bridge id value. """
return self._show()[1]
def getifs(self):
""" Return a list of bridge interfaces. """
return self._show()[3:]
def getstp(self):
""" Return if STP protocol is enabled. """
return self._show()[2] == 'yes'
def showmacs(self):
""" Return a list of mac addresses. """
raise NotImplementedError()
def showstp(self):
""" Return STP information. """
raise NotImplementedError()
|
udragon/pybrctl
|
pybrctl/pybrctl.py
|
Bridge.hairpin
|
python
|
def hairpin(self, port, val=True):
""" Turn harpin on/off on a port. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'hairpin', self.name, port, state],
"Could not set hairpin in port %s in %s." % (port, self.name))
|
Turn harpin on/off on a port.
|
train
|
https://github.com/udragon/pybrctl/blob/9e834a605b57bd969a81c56a886dee81f7d715c1/pybrctl/pybrctl.py#L34-L39
|
[
"def _runshell(cmd, exception):\n \"\"\" Run a shell command. if fails, raise a proper exception. \"\"\"\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if p.wait() != 0:\n raise BridgeException(exception)\n return p\n"
] |
class Bridge(object):
def __init__(self, name):
""" Initialize a bridge object. """
self.name = name
def __str__(self):
""" Return a string of the bridge name. """
return self.name
def __repr__(self):
""" Return a representaion of a bridge object. """
return "<Bridge: %s>" % self.name
def addif(self, iname):
""" Add an interface to the bridge """
_runshell([brctlexe, 'addif', self.name, iname],
"Could not add interface %s to %s." % (iname, self.name))
def delif(self, iname):
""" Delete an interface from the bridge. """
_runshell([brctlexe, 'delif', self.name, iname],
"Could not delete interface %s from %s." % (iname, self.name))
def stp(self, val=True):
""" Turn STP protocol on/off. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'stp', self.name, state],
"Could not set stp on %s." % self.name)
def setageing(self, time):
""" Set bridge ageing time. """
_runshell([brctlexe, 'setageing', self.name, str(time)],
"Could not set ageing time in %s." % self.name)
def setbridgeprio(self, prio):
""" Set bridge priority value. """
_runshell([brctlexe, 'setbridgeprio', self.name, str(prio)],
"Could not set bridge priority in %s." % self.name)
def setfd(self, time):
""" Set bridge forward delay time value. """
_runshell([brctlexe, 'setfd', self.name, str(time)],
"Could not set forward delay in %s." % self.name)
def sethello(self, time):
""" Set bridge hello time value. """
_runshell([brctlexe, 'sethello', self.name, str(time)],
"Could not set hello time in %s." % self.name)
def setmaxage(self, time):
""" Set bridge max message age time. """
_runshell([brctlexe, 'setmaxage', self.name, str(time)],
"Could not set max message age in %s." % self.name)
def setpathcost(self, port, cost):
""" Set port path cost value for STP protocol. """
_runshell([brctlexe, 'setpathcost', self.name, port, str(cost)],
"Could not set path cost in port %s in %s." % (port, self.name))
def setportprio(self, port, prio):
""" Set port priority value. """
_runshell([brctlexe, 'setportprio', self.name, port, str(prio)],
"Could not set priority in port %s in %s." % (port, self.name))
def _show(self):
""" Return a list of unsorted bridge details. """
p = _runshell([brctlexe, 'show', self.name],
"Could not show %s." % self.name)
return p.stdout.read().split()[7:]
def getid(self):
""" Return the bridge id value. """
return self._show()[1]
def getifs(self):
""" Return a list of bridge interfaces. """
return self._show()[3:]
def getstp(self):
""" Return if STP protocol is enabled. """
return self._show()[2] == 'yes'
def showmacs(self):
""" Return a list of mac addresses. """
raise NotImplementedError()
def showstp(self):
""" Return STP information. """
raise NotImplementedError()
|
udragon/pybrctl
|
pybrctl/pybrctl.py
|
Bridge.stp
|
python
|
def stp(self, val=True):
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'stp', self.name, state],
"Could not set stp on %s." % self.name)
|
Turn STP protocol on/off.
|
train
|
https://github.com/udragon/pybrctl/blob/9e834a605b57bd969a81c56a886dee81f7d715c1/pybrctl/pybrctl.py#L41-L46
|
[
"def _runshell(cmd, exception):\n \"\"\" Run a shell command. if fails, raise a proper exception. \"\"\"\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if p.wait() != 0:\n raise BridgeException(exception)\n return p\n"
] |
class Bridge(object):
def __init__(self, name):
""" Initialize a bridge object. """
self.name = name
def __str__(self):
""" Return a string of the bridge name. """
return self.name
def __repr__(self):
""" Return a representaion of a bridge object. """
return "<Bridge: %s>" % self.name
def addif(self, iname):
""" Add an interface to the bridge """
_runshell([brctlexe, 'addif', self.name, iname],
"Could not add interface %s to %s." % (iname, self.name))
def delif(self, iname):
""" Delete an interface from the bridge. """
_runshell([brctlexe, 'delif', self.name, iname],
"Could not delete interface %s from %s." % (iname, self.name))
def hairpin(self, port, val=True):
""" Turn harpin on/off on a port. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'hairpin', self.name, port, state],
"Could not set hairpin in port %s in %s." % (port, self.name))
def setageing(self, time):
""" Set bridge ageing time. """
_runshell([brctlexe, 'setageing', self.name, str(time)],
"Could not set ageing time in %s." % self.name)
def setbridgeprio(self, prio):
""" Set bridge priority value. """
_runshell([brctlexe, 'setbridgeprio', self.name, str(prio)],
"Could not set bridge priority in %s." % self.name)
def setfd(self, time):
""" Set bridge forward delay time value. """
_runshell([brctlexe, 'setfd', self.name, str(time)],
"Could not set forward delay in %s." % self.name)
def sethello(self, time):
""" Set bridge hello time value. """
_runshell([brctlexe, 'sethello', self.name, str(time)],
"Could not set hello time in %s." % self.name)
def setmaxage(self, time):
""" Set bridge max message age time. """
_runshell([brctlexe, 'setmaxage', self.name, str(time)],
"Could not set max message age in %s." % self.name)
def setpathcost(self, port, cost):
""" Set port path cost value for STP protocol. """
_runshell([brctlexe, 'setpathcost', self.name, port, str(cost)],
"Could not set path cost in port %s in %s." % (port, self.name))
def setportprio(self, port, prio):
""" Set port priority value. """
_runshell([brctlexe, 'setportprio', self.name, port, str(prio)],
"Could not set priority in port %s in %s." % (port, self.name))
def _show(self):
""" Return a list of unsorted bridge details. """
p = _runshell([brctlexe, 'show', self.name],
"Could not show %s." % self.name)
return p.stdout.read().split()[7:]
def getid(self):
""" Return the bridge id value. """
return self._show()[1]
def getifs(self):
""" Return a list of bridge interfaces. """
return self._show()[3:]
def getstp(self):
""" Return if STP protocol is enabled. """
return self._show()[2] == 'yes'
def showmacs(self):
""" Return a list of mac addresses. """
raise NotImplementedError()
def showstp(self):
""" Return STP information. """
raise NotImplementedError()
|
udragon/pybrctl
|
pybrctl/pybrctl.py
|
Bridge.setageing
|
python
|
def setageing(self, time):
_runshell([brctlexe, 'setageing', self.name, str(time)],
"Could not set ageing time in %s." % self.name)
|
Set bridge ageing time.
|
train
|
https://github.com/udragon/pybrctl/blob/9e834a605b57bd969a81c56a886dee81f7d715c1/pybrctl/pybrctl.py#L48-L51
|
[
"def _runshell(cmd, exception):\n \"\"\" Run a shell command. if fails, raise a proper exception. \"\"\"\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if p.wait() != 0:\n raise BridgeException(exception)\n return p\n"
] |
class Bridge(object):
def __init__(self, name):
""" Initialize a bridge object. """
self.name = name
def __str__(self):
""" Return a string of the bridge name. """
return self.name
def __repr__(self):
""" Return a representaion of a bridge object. """
return "<Bridge: %s>" % self.name
def addif(self, iname):
""" Add an interface to the bridge """
_runshell([brctlexe, 'addif', self.name, iname],
"Could not add interface %s to %s." % (iname, self.name))
def delif(self, iname):
""" Delete an interface from the bridge. """
_runshell([brctlexe, 'delif', self.name, iname],
"Could not delete interface %s from %s." % (iname, self.name))
def hairpin(self, port, val=True):
""" Turn harpin on/off on a port. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'hairpin', self.name, port, state],
"Could not set hairpin in port %s in %s." % (port, self.name))
def stp(self, val=True):
""" Turn STP protocol on/off. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'stp', self.name, state],
"Could not set stp on %s." % self.name)
def setbridgeprio(self, prio):
""" Set bridge priority value. """
_runshell([brctlexe, 'setbridgeprio', self.name, str(prio)],
"Could not set bridge priority in %s." % self.name)
def setfd(self, time):
""" Set bridge forward delay time value. """
_runshell([brctlexe, 'setfd', self.name, str(time)],
"Could not set forward delay in %s." % self.name)
def sethello(self, time):
""" Set bridge hello time value. """
_runshell([brctlexe, 'sethello', self.name, str(time)],
"Could not set hello time in %s." % self.name)
def setmaxage(self, time):
""" Set bridge max message age time. """
_runshell([brctlexe, 'setmaxage', self.name, str(time)],
"Could not set max message age in %s." % self.name)
def setpathcost(self, port, cost):
""" Set port path cost value for STP protocol. """
_runshell([brctlexe, 'setpathcost', self.name, port, str(cost)],
"Could not set path cost in port %s in %s." % (port, self.name))
def setportprio(self, port, prio):
""" Set port priority value. """
_runshell([brctlexe, 'setportprio', self.name, port, str(prio)],
"Could not set priority in port %s in %s." % (port, self.name))
def _show(self):
""" Return a list of unsorted bridge details. """
p = _runshell([brctlexe, 'show', self.name],
"Could not show %s." % self.name)
return p.stdout.read().split()[7:]
def getid(self):
""" Return the bridge id value. """
return self._show()[1]
def getifs(self):
""" Return a list of bridge interfaces. """
return self._show()[3:]
def getstp(self):
""" Return if STP protocol is enabled. """
return self._show()[2] == 'yes'
def showmacs(self):
""" Return a list of mac addresses. """
raise NotImplementedError()
def showstp(self):
""" Return STP information. """
raise NotImplementedError()
|
udragon/pybrctl
|
pybrctl/pybrctl.py
|
Bridge.setbridgeprio
|
python
|
def setbridgeprio(self, prio):
_runshell([brctlexe, 'setbridgeprio', self.name, str(prio)],
"Could not set bridge priority in %s." % self.name)
|
Set bridge priority value.
|
train
|
https://github.com/udragon/pybrctl/blob/9e834a605b57bd969a81c56a886dee81f7d715c1/pybrctl/pybrctl.py#L53-L56
|
[
"def _runshell(cmd, exception):\n \"\"\" Run a shell command. if fails, raise a proper exception. \"\"\"\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if p.wait() != 0:\n raise BridgeException(exception)\n return p\n"
] |
class Bridge(object):
def __init__(self, name):
""" Initialize a bridge object. """
self.name = name
def __str__(self):
""" Return a string of the bridge name. """
return self.name
def __repr__(self):
""" Return a representaion of a bridge object. """
return "<Bridge: %s>" % self.name
def addif(self, iname):
""" Add an interface to the bridge """
_runshell([brctlexe, 'addif', self.name, iname],
"Could not add interface %s to %s." % (iname, self.name))
def delif(self, iname):
""" Delete an interface from the bridge. """
_runshell([brctlexe, 'delif', self.name, iname],
"Could not delete interface %s from %s." % (iname, self.name))
def hairpin(self, port, val=True):
""" Turn harpin on/off on a port. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'hairpin', self.name, port, state],
"Could not set hairpin in port %s in %s." % (port, self.name))
def stp(self, val=True):
""" Turn STP protocol on/off. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'stp', self.name, state],
"Could not set stp on %s." % self.name)
def setageing(self, time):
""" Set bridge ageing time. """
_runshell([brctlexe, 'setageing', self.name, str(time)],
"Could not set ageing time in %s." % self.name)
def setfd(self, time):
""" Set bridge forward delay time value. """
_runshell([brctlexe, 'setfd', self.name, str(time)],
"Could not set forward delay in %s." % self.name)
def sethello(self, time):
""" Set bridge hello time value. """
_runshell([brctlexe, 'sethello', self.name, str(time)],
"Could not set hello time in %s." % self.name)
def setmaxage(self, time):
""" Set bridge max message age time. """
_runshell([brctlexe, 'setmaxage', self.name, str(time)],
"Could not set max message age in %s." % self.name)
def setpathcost(self, port, cost):
""" Set port path cost value for STP protocol. """
_runshell([brctlexe, 'setpathcost', self.name, port, str(cost)],
"Could not set path cost in port %s in %s." % (port, self.name))
def setportprio(self, port, prio):
""" Set port priority value. """
_runshell([brctlexe, 'setportprio', self.name, port, str(prio)],
"Could not set priority in port %s in %s." % (port, self.name))
def _show(self):
""" Return a list of unsorted bridge details. """
p = _runshell([brctlexe, 'show', self.name],
"Could not show %s." % self.name)
return p.stdout.read().split()[7:]
def getid(self):
""" Return the bridge id value. """
return self._show()[1]
def getifs(self):
""" Return a list of bridge interfaces. """
return self._show()[3:]
def getstp(self):
""" Return if STP protocol is enabled. """
return self._show()[2] == 'yes'
def showmacs(self):
""" Return a list of mac addresses. """
raise NotImplementedError()
def showstp(self):
""" Return STP information. """
raise NotImplementedError()
|
udragon/pybrctl
|
pybrctl/pybrctl.py
|
Bridge.setfd
|
python
|
def setfd(self, time):
_runshell([brctlexe, 'setfd', self.name, str(time)],
"Could not set forward delay in %s." % self.name)
|
Set bridge forward delay time value.
|
train
|
https://github.com/udragon/pybrctl/blob/9e834a605b57bd969a81c56a886dee81f7d715c1/pybrctl/pybrctl.py#L58-L61
|
[
"def _runshell(cmd, exception):\n \"\"\" Run a shell command. if fails, raise a proper exception. \"\"\"\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if p.wait() != 0:\n raise BridgeException(exception)\n return p\n"
] |
class Bridge(object):
def __init__(self, name):
""" Initialize a bridge object. """
self.name = name
def __str__(self):
""" Return a string of the bridge name. """
return self.name
def __repr__(self):
""" Return a representaion of a bridge object. """
return "<Bridge: %s>" % self.name
def addif(self, iname):
""" Add an interface to the bridge """
_runshell([brctlexe, 'addif', self.name, iname],
"Could not add interface %s to %s." % (iname, self.name))
def delif(self, iname):
""" Delete an interface from the bridge. """
_runshell([brctlexe, 'delif', self.name, iname],
"Could not delete interface %s from %s." % (iname, self.name))
def hairpin(self, port, val=True):
""" Turn harpin on/off on a port. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'hairpin', self.name, port, state],
"Could not set hairpin in port %s in %s." % (port, self.name))
def stp(self, val=True):
""" Turn STP protocol on/off. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'stp', self.name, state],
"Could not set stp on %s." % self.name)
def setageing(self, time):
""" Set bridge ageing time. """
_runshell([brctlexe, 'setageing', self.name, str(time)],
"Could not set ageing time in %s." % self.name)
def setbridgeprio(self, prio):
""" Set bridge priority value. """
_runshell([brctlexe, 'setbridgeprio', self.name, str(prio)],
"Could not set bridge priority in %s." % self.name)
def sethello(self, time):
""" Set bridge hello time value. """
_runshell([brctlexe, 'sethello', self.name, str(time)],
"Could not set hello time in %s." % self.name)
def setmaxage(self, time):
""" Set bridge max message age time. """
_runshell([brctlexe, 'setmaxage', self.name, str(time)],
"Could not set max message age in %s." % self.name)
def setpathcost(self, port, cost):
""" Set port path cost value for STP protocol. """
_runshell([brctlexe, 'setpathcost', self.name, port, str(cost)],
"Could not set path cost in port %s in %s." % (port, self.name))
def setportprio(self, port, prio):
""" Set port priority value. """
_runshell([brctlexe, 'setportprio', self.name, port, str(prio)],
"Could not set priority in port %s in %s." % (port, self.name))
def _show(self):
""" Return a list of unsorted bridge details. """
p = _runshell([brctlexe, 'show', self.name],
"Could not show %s." % self.name)
return p.stdout.read().split()[7:]
def getid(self):
""" Return the bridge id value. """
return self._show()[1]
def getifs(self):
""" Return a list of bridge interfaces. """
return self._show()[3:]
def getstp(self):
""" Return if STP protocol is enabled. """
return self._show()[2] == 'yes'
def showmacs(self):
""" Return a list of mac addresses. """
raise NotImplementedError()
def showstp(self):
""" Return STP information. """
raise NotImplementedError()
|
udragon/pybrctl
|
pybrctl/pybrctl.py
|
Bridge.sethello
|
python
|
def sethello(self, time):
_runshell([brctlexe, 'sethello', self.name, str(time)],
"Could not set hello time in %s." % self.name)
|
Set bridge hello time value.
|
train
|
https://github.com/udragon/pybrctl/blob/9e834a605b57bd969a81c56a886dee81f7d715c1/pybrctl/pybrctl.py#L63-L66
|
[
"def _runshell(cmd, exception):\n \"\"\" Run a shell command. if fails, raise a proper exception. \"\"\"\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if p.wait() != 0:\n raise BridgeException(exception)\n return p\n"
] |
class Bridge(object):
def __init__(self, name):
""" Initialize a bridge object. """
self.name = name
def __str__(self):
""" Return a string of the bridge name. """
return self.name
def __repr__(self):
""" Return a representaion of a bridge object. """
return "<Bridge: %s>" % self.name
def addif(self, iname):
""" Add an interface to the bridge """
_runshell([brctlexe, 'addif', self.name, iname],
"Could not add interface %s to %s." % (iname, self.name))
def delif(self, iname):
""" Delete an interface from the bridge. """
_runshell([brctlexe, 'delif', self.name, iname],
"Could not delete interface %s from %s." % (iname, self.name))
def hairpin(self, port, val=True):
""" Turn harpin on/off on a port. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'hairpin', self.name, port, state],
"Could not set hairpin in port %s in %s." % (port, self.name))
def stp(self, val=True):
""" Turn STP protocol on/off. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'stp', self.name, state],
"Could not set stp on %s." % self.name)
def setageing(self, time):
""" Set bridge ageing time. """
_runshell([brctlexe, 'setageing', self.name, str(time)],
"Could not set ageing time in %s." % self.name)
def setbridgeprio(self, prio):
""" Set bridge priority value. """
_runshell([brctlexe, 'setbridgeprio', self.name, str(prio)],
"Could not set bridge priority in %s." % self.name)
def setfd(self, time):
""" Set bridge forward delay time value. """
_runshell([brctlexe, 'setfd', self.name, str(time)],
"Could not set forward delay in %s." % self.name)
def setmaxage(self, time):
""" Set bridge max message age time. """
_runshell([brctlexe, 'setmaxage', self.name, str(time)],
"Could not set max message age in %s." % self.name)
def setpathcost(self, port, cost):
""" Set port path cost value for STP protocol. """
_runshell([brctlexe, 'setpathcost', self.name, port, str(cost)],
"Could not set path cost in port %s in %s." % (port, self.name))
def setportprio(self, port, prio):
""" Set port priority value. """
_runshell([brctlexe, 'setportprio', self.name, port, str(prio)],
"Could not set priority in port %s in %s." % (port, self.name))
def _show(self):
""" Return a list of unsorted bridge details. """
p = _runshell([brctlexe, 'show', self.name],
"Could not show %s." % self.name)
return p.stdout.read().split()[7:]
def getid(self):
""" Return the bridge id value. """
return self._show()[1]
def getifs(self):
""" Return a list of bridge interfaces. """
return self._show()[3:]
def getstp(self):
""" Return if STP protocol is enabled. """
return self._show()[2] == 'yes'
def showmacs(self):
""" Return a list of mac addresses. """
raise NotImplementedError()
def showstp(self):
""" Return STP information. """
raise NotImplementedError()
|
udragon/pybrctl
|
pybrctl/pybrctl.py
|
Bridge.setmaxage
|
python
|
def setmaxage(self, time):
_runshell([brctlexe, 'setmaxage', self.name, str(time)],
"Could not set max message age in %s." % self.name)
|
Set bridge max message age time.
|
train
|
https://github.com/udragon/pybrctl/blob/9e834a605b57bd969a81c56a886dee81f7d715c1/pybrctl/pybrctl.py#L68-L71
|
[
"def _runshell(cmd, exception):\n \"\"\" Run a shell command. if fails, raise a proper exception. \"\"\"\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if p.wait() != 0:\n raise BridgeException(exception)\n return p\n"
] |
class Bridge(object):
def __init__(self, name):
""" Initialize a bridge object. """
self.name = name
def __str__(self):
""" Return a string of the bridge name. """
return self.name
def __repr__(self):
""" Return a representaion of a bridge object. """
return "<Bridge: %s>" % self.name
def addif(self, iname):
""" Add an interface to the bridge """
_runshell([brctlexe, 'addif', self.name, iname],
"Could not add interface %s to %s." % (iname, self.name))
def delif(self, iname):
""" Delete an interface from the bridge. """
_runshell([brctlexe, 'delif', self.name, iname],
"Could not delete interface %s from %s." % (iname, self.name))
def hairpin(self, port, val=True):
""" Turn harpin on/off on a port. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'hairpin', self.name, port, state],
"Could not set hairpin in port %s in %s." % (port, self.name))
def stp(self, val=True):
""" Turn STP protocol on/off. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'stp', self.name, state],
"Could not set stp on %s." % self.name)
def setageing(self, time):
""" Set bridge ageing time. """
_runshell([brctlexe, 'setageing', self.name, str(time)],
"Could not set ageing time in %s." % self.name)
def setbridgeprio(self, prio):
""" Set bridge priority value. """
_runshell([brctlexe, 'setbridgeprio', self.name, str(prio)],
"Could not set bridge priority in %s." % self.name)
def setfd(self, time):
""" Set bridge forward delay time value. """
_runshell([brctlexe, 'setfd', self.name, str(time)],
"Could not set forward delay in %s." % self.name)
def sethello(self, time):
""" Set bridge hello time value. """
_runshell([brctlexe, 'sethello', self.name, str(time)],
"Could not set hello time in %s." % self.name)
def setpathcost(self, port, cost):
""" Set port path cost value for STP protocol. """
_runshell([brctlexe, 'setpathcost', self.name, port, str(cost)],
"Could not set path cost in port %s in %s." % (port, self.name))
def setportprio(self, port, prio):
""" Set port priority value. """
_runshell([brctlexe, 'setportprio', self.name, port, str(prio)],
"Could not set priority in port %s in %s." % (port, self.name))
def _show(self):
""" Return a list of unsorted bridge details. """
p = _runshell([brctlexe, 'show', self.name],
"Could not show %s." % self.name)
return p.stdout.read().split()[7:]
def getid(self):
""" Return the bridge id value. """
return self._show()[1]
def getifs(self):
""" Return a list of bridge interfaces. """
return self._show()[3:]
def getstp(self):
""" Return if STP protocol is enabled. """
return self._show()[2] == 'yes'
def showmacs(self):
""" Return a list of mac addresses. """
raise NotImplementedError()
def showstp(self):
""" Return STP information. """
raise NotImplementedError()
|
udragon/pybrctl
|
pybrctl/pybrctl.py
|
Bridge.setpathcost
|
python
|
def setpathcost(self, port, cost):
_runshell([brctlexe, 'setpathcost', self.name, port, str(cost)],
"Could not set path cost in port %s in %s." % (port, self.name))
|
Set port path cost value for STP protocol.
|
train
|
https://github.com/udragon/pybrctl/blob/9e834a605b57bd969a81c56a886dee81f7d715c1/pybrctl/pybrctl.py#L73-L76
|
[
"def _runshell(cmd, exception):\n \"\"\" Run a shell command. if fails, raise a proper exception. \"\"\"\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if p.wait() != 0:\n raise BridgeException(exception)\n return p\n"
] |
class Bridge(object):
def __init__(self, name):
""" Initialize a bridge object. """
self.name = name
def __str__(self):
""" Return a string of the bridge name. """
return self.name
def __repr__(self):
""" Return a representaion of a bridge object. """
return "<Bridge: %s>" % self.name
def addif(self, iname):
""" Add an interface to the bridge """
_runshell([brctlexe, 'addif', self.name, iname],
"Could not add interface %s to %s." % (iname, self.name))
def delif(self, iname):
""" Delete an interface from the bridge. """
_runshell([brctlexe, 'delif', self.name, iname],
"Could not delete interface %s from %s." % (iname, self.name))
def hairpin(self, port, val=True):
""" Turn harpin on/off on a port. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'hairpin', self.name, port, state],
"Could not set hairpin in port %s in %s." % (port, self.name))
def stp(self, val=True):
""" Turn STP protocol on/off. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'stp', self.name, state],
"Could not set stp on %s." % self.name)
def setageing(self, time):
""" Set bridge ageing time. """
_runshell([brctlexe, 'setageing', self.name, str(time)],
"Could not set ageing time in %s." % self.name)
def setbridgeprio(self, prio):
""" Set bridge priority value. """
_runshell([brctlexe, 'setbridgeprio', self.name, str(prio)],
"Could not set bridge priority in %s." % self.name)
def setfd(self, time):
""" Set bridge forward delay time value. """
_runshell([brctlexe, 'setfd', self.name, str(time)],
"Could not set forward delay in %s." % self.name)
def sethello(self, time):
""" Set bridge hello time value. """
_runshell([brctlexe, 'sethello', self.name, str(time)],
"Could not set hello time in %s." % self.name)
def setmaxage(self, time):
""" Set bridge max message age time. """
_runshell([brctlexe, 'setmaxage', self.name, str(time)],
"Could not set max message age in %s." % self.name)
def setportprio(self, port, prio):
""" Set port priority value. """
_runshell([brctlexe, 'setportprio', self.name, port, str(prio)],
"Could not set priority in port %s in %s." % (port, self.name))
def _show(self):
""" Return a list of unsorted bridge details. """
p = _runshell([brctlexe, 'show', self.name],
"Could not show %s." % self.name)
return p.stdout.read().split()[7:]
def getid(self):
""" Return the bridge id value. """
return self._show()[1]
def getifs(self):
""" Return a list of bridge interfaces. """
return self._show()[3:]
def getstp(self):
""" Return if STP protocol is enabled. """
return self._show()[2] == 'yes'
def showmacs(self):
""" Return a list of mac addresses. """
raise NotImplementedError()
def showstp(self):
""" Return STP information. """
raise NotImplementedError()
|
udragon/pybrctl
|
pybrctl/pybrctl.py
|
Bridge.setportprio
|
python
|
def setportprio(self, port, prio):
_runshell([brctlexe, 'setportprio', self.name, port, str(prio)],
"Could not set priority in port %s in %s." % (port, self.name))
|
Set port priority value.
|
train
|
https://github.com/udragon/pybrctl/blob/9e834a605b57bd969a81c56a886dee81f7d715c1/pybrctl/pybrctl.py#L78-L81
|
[
"def _runshell(cmd, exception):\n \"\"\" Run a shell command. if fails, raise a proper exception. \"\"\"\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if p.wait() != 0:\n raise BridgeException(exception)\n return p\n"
] |
class Bridge(object):
def __init__(self, name):
""" Initialize a bridge object. """
self.name = name
def __str__(self):
""" Return a string of the bridge name. """
return self.name
def __repr__(self):
""" Return a representaion of a bridge object. """
return "<Bridge: %s>" % self.name
def addif(self, iname):
""" Add an interface to the bridge """
_runshell([brctlexe, 'addif', self.name, iname],
"Could not add interface %s to %s." % (iname, self.name))
def delif(self, iname):
""" Delete an interface from the bridge. """
_runshell([brctlexe, 'delif', self.name, iname],
"Could not delete interface %s from %s." % (iname, self.name))
def hairpin(self, port, val=True):
""" Turn harpin on/off on a port. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'hairpin', self.name, port, state],
"Could not set hairpin in port %s in %s." % (port, self.name))
def stp(self, val=True):
""" Turn STP protocol on/off. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'stp', self.name, state],
"Could not set stp on %s." % self.name)
def setageing(self, time):
""" Set bridge ageing time. """
_runshell([brctlexe, 'setageing', self.name, str(time)],
"Could not set ageing time in %s." % self.name)
def setbridgeprio(self, prio):
""" Set bridge priority value. """
_runshell([brctlexe, 'setbridgeprio', self.name, str(prio)],
"Could not set bridge priority in %s." % self.name)
def setfd(self, time):
""" Set bridge forward delay time value. """
_runshell([brctlexe, 'setfd', self.name, str(time)],
"Could not set forward delay in %s." % self.name)
def sethello(self, time):
""" Set bridge hello time value. """
_runshell([brctlexe, 'sethello', self.name, str(time)],
"Could not set hello time in %s." % self.name)
def setmaxage(self, time):
""" Set bridge max message age time. """
_runshell([brctlexe, 'setmaxage', self.name, str(time)],
"Could not set max message age in %s." % self.name)
def setpathcost(self, port, cost):
""" Set port path cost value for STP protocol. """
_runshell([brctlexe, 'setpathcost', self.name, port, str(cost)],
"Could not set path cost in port %s in %s." % (port, self.name))
def _show(self):
""" Return a list of unsorted bridge details. """
p = _runshell([brctlexe, 'show', self.name],
"Could not show %s." % self.name)
return p.stdout.read().split()[7:]
def getid(self):
""" Return the bridge id value. """
return self._show()[1]
def getifs(self):
""" Return a list of bridge interfaces. """
return self._show()[3:]
def getstp(self):
""" Return if STP protocol is enabled. """
return self._show()[2] == 'yes'
def showmacs(self):
""" Return a list of mac addresses. """
raise NotImplementedError()
def showstp(self):
""" Return STP information. """
raise NotImplementedError()
|
udragon/pybrctl
|
pybrctl/pybrctl.py
|
Bridge._show
|
python
|
def _show(self):
""" Return a list of unsorted bridge details. """
p = _runshell([brctlexe, 'show', self.name],
"Could not show %s." % self.name)
return p.stdout.read().split()[7:]
|
Return a list of unsorted bridge details.
|
train
|
https://github.com/udragon/pybrctl/blob/9e834a605b57bd969a81c56a886dee81f7d715c1/pybrctl/pybrctl.py#L83-L87
|
[
"def _runshell(cmd, exception):\n \"\"\" Run a shell command. if fails, raise a proper exception. \"\"\"\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if p.wait() != 0:\n raise BridgeException(exception)\n return p\n"
] |
class Bridge(object):
def __init__(self, name):
""" Initialize a bridge object. """
self.name = name
def __str__(self):
""" Return a string of the bridge name. """
return self.name
def __repr__(self):
""" Return a representaion of a bridge object. """
return "<Bridge: %s>" % self.name
def addif(self, iname):
""" Add an interface to the bridge """
_runshell([brctlexe, 'addif', self.name, iname],
"Could not add interface %s to %s." % (iname, self.name))
def delif(self, iname):
""" Delete an interface from the bridge. """
_runshell([brctlexe, 'delif', self.name, iname],
"Could not delete interface %s from %s." % (iname, self.name))
def hairpin(self, port, val=True):
""" Turn harpin on/off on a port. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'hairpin', self.name, port, state],
"Could not set hairpin in port %s in %s." % (port, self.name))
def stp(self, val=True):
""" Turn STP protocol on/off. """
if val: state = 'on'
else: state = 'off'
_runshell([brctlexe, 'stp', self.name, state],
"Could not set stp on %s." % self.name)
def setageing(self, time):
""" Set bridge ageing time. """
_runshell([brctlexe, 'setageing', self.name, str(time)],
"Could not set ageing time in %s." % self.name)
def setbridgeprio(self, prio):
""" Set bridge priority value. """
_runshell([brctlexe, 'setbridgeprio', self.name, str(prio)],
"Could not set bridge priority in %s." % self.name)
def setfd(self, time):
""" Set bridge forward delay time value. """
_runshell([brctlexe, 'setfd', self.name, str(time)],
"Could not set forward delay in %s." % self.name)
def sethello(self, time):
""" Set bridge hello time value. """
_runshell([brctlexe, 'sethello', self.name, str(time)],
"Could not set hello time in %s." % self.name)
def setmaxage(self, time):
""" Set bridge max message age time. """
_runshell([brctlexe, 'setmaxage', self.name, str(time)],
"Could not set max message age in %s." % self.name)
def setpathcost(self, port, cost):
""" Set port path cost value for STP protocol. """
_runshell([brctlexe, 'setpathcost', self.name, port, str(cost)],
"Could not set path cost in port %s in %s." % (port, self.name))
def setportprio(self, port, prio):
""" Set port priority value. """
_runshell([brctlexe, 'setportprio', self.name, port, str(prio)],
"Could not set priority in port %s in %s." % (port, self.name))
def getid(self):
""" Return the bridge id value. """
return self._show()[1]
def getifs(self):
""" Return a list of bridge interfaces. """
return self._show()[3:]
def getstp(self):
""" Return if STP protocol is enabled. """
return self._show()[2] == 'yes'
def showmacs(self):
""" Return a list of mac addresses. """
raise NotImplementedError()
def showstp(self):
""" Return STP information. """
raise NotImplementedError()
|
udragon/pybrctl
|
pybrctl/pybrctl.py
|
BridgeController.addbr
|
python
|
def addbr(self, name):
_runshell([brctlexe, 'addbr', name],
"Could not create bridge %s." % name)
_runshell([ipexe, 'link', 'set', 'dev', name, 'up'],
"Could not set link up for %s." % name)
return Bridge(name)
|
Create a bridge and set the device up.
|
train
|
https://github.com/udragon/pybrctl/blob/9e834a605b57bd969a81c56a886dee81f7d715c1/pybrctl/pybrctl.py#L112-L118
|
[
"def _runshell(cmd, exception):\n \"\"\" Run a shell command. if fails, raise a proper exception. \"\"\"\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if p.wait() != 0:\n raise BridgeException(exception)\n return p\n"
] |
class BridgeController(object):
def delbr(self, name):
""" Set the device down and delete the bridge. """
self.getbr(name) # Check if exists
_runshell([ipexe, 'link', 'set', 'dev', name, 'down'],
"Could not set link down for %s." % name)
_runshell([brctlexe, 'delbr', name],
"Could not delete bridge %s." % name)
def showall(self):
""" Return a list of all available bridges. """
p = _runshell([brctlexe, 'show'],
"Could not show bridges.")
wlist = map(str.split, p.stdout.read().splitlines()[1:])
brwlist = filter(lambda x: len(x) != 1, wlist)
brlist = map(lambda x: x[0], brwlist)
return map(Bridge, brlist)
def getbr(self, name):
""" Return a bridge object."""
for br in self.showall():
if br.name == name:
return br
raise BridgeException("Bridge does not exist.")
|
udragon/pybrctl
|
pybrctl/pybrctl.py
|
BridgeController.delbr
|
python
|
def delbr(self, name):
self.getbr(name) # Check if exists
_runshell([ipexe, 'link', 'set', 'dev', name, 'down'],
"Could not set link down for %s." % name)
_runshell([brctlexe, 'delbr', name],
"Could not delete bridge %s." % name)
|
Set the device down and delete the bridge.
|
train
|
https://github.com/udragon/pybrctl/blob/9e834a605b57bd969a81c56a886dee81f7d715c1/pybrctl/pybrctl.py#L120-L126
|
[
"def _runshell(cmd, exception):\n \"\"\" Run a shell command. if fails, raise a proper exception. \"\"\"\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if p.wait() != 0:\n raise BridgeException(exception)\n return p\n",
"def getbr(self, name):\n \"\"\" Return a bridge object.\"\"\"\n for br in self.showall():\n if br.name == name:\n return br\n raise BridgeException(\"Bridge does not exist.\")\n"
] |
class BridgeController(object):
def addbr(self, name):
""" Create a bridge and set the device up. """
_runshell([brctlexe, 'addbr', name],
"Could not create bridge %s." % name)
_runshell([ipexe, 'link', 'set', 'dev', name, 'up'],
"Could not set link up for %s." % name)
return Bridge(name)
def showall(self):
""" Return a list of all available bridges. """
p = _runshell([brctlexe, 'show'],
"Could not show bridges.")
wlist = map(str.split, p.stdout.read().splitlines()[1:])
brwlist = filter(lambda x: len(x) != 1, wlist)
brlist = map(lambda x: x[0], brwlist)
return map(Bridge, brlist)
def getbr(self, name):
""" Return a bridge object."""
for br in self.showall():
if br.name == name:
return br
raise BridgeException("Bridge does not exist.")
|
udragon/pybrctl
|
pybrctl/pybrctl.py
|
BridgeController.showall
|
python
|
def showall(self):
p = _runshell([brctlexe, 'show'],
"Could not show bridges.")
wlist = map(str.split, p.stdout.read().splitlines()[1:])
brwlist = filter(lambda x: len(x) != 1, wlist)
brlist = map(lambda x: x[0], brwlist)
return map(Bridge, brlist)
|
Return a list of all available bridges.
|
train
|
https://github.com/udragon/pybrctl/blob/9e834a605b57bd969a81c56a886dee81f7d715c1/pybrctl/pybrctl.py#L128-L135
|
[
"def _runshell(cmd, exception):\n \"\"\" Run a shell command. if fails, raise a proper exception. \"\"\"\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if p.wait() != 0:\n raise BridgeException(exception)\n return p\n"
] |
class BridgeController(object):
def addbr(self, name):
""" Create a bridge and set the device up. """
_runshell([brctlexe, 'addbr', name],
"Could not create bridge %s." % name)
_runshell([ipexe, 'link', 'set', 'dev', name, 'up'],
"Could not set link up for %s." % name)
return Bridge(name)
def delbr(self, name):
""" Set the device down and delete the bridge. """
self.getbr(name) # Check if exists
_runshell([ipexe, 'link', 'set', 'dev', name, 'down'],
"Could not set link down for %s." % name)
_runshell([brctlexe, 'delbr', name],
"Could not delete bridge %s." % name)
def getbr(self, name):
""" Return a bridge object."""
for br in self.showall():
if br.name == name:
return br
raise BridgeException("Bridge does not exist.")
|
udragon/pybrctl
|
pybrctl/pybrctl.py
|
BridgeController.getbr
|
python
|
def getbr(self, name):
for br in self.showall():
if br.name == name:
return br
raise BridgeException("Bridge does not exist.")
|
Return a bridge object.
|
train
|
https://github.com/udragon/pybrctl/blob/9e834a605b57bd969a81c56a886dee81f7d715c1/pybrctl/pybrctl.py#L137-L142
|
[
"def showall(self):\n \"\"\" Return a list of all available bridges. \"\"\"\n p = _runshell([brctlexe, 'show'],\n \"Could not show bridges.\")\n wlist = map(str.split, p.stdout.read().splitlines()[1:])\n brwlist = filter(lambda x: len(x) != 1, wlist)\n brlist = map(lambda x: x[0], brwlist)\n return map(Bridge, brlist)\n"
] |
class BridgeController(object):
def addbr(self, name):
""" Create a bridge and set the device up. """
_runshell([brctlexe, 'addbr', name],
"Could not create bridge %s." % name)
_runshell([ipexe, 'link', 'set', 'dev', name, 'up'],
"Could not set link up for %s." % name)
return Bridge(name)
def delbr(self, name):
""" Set the device down and delete the bridge. """
self.getbr(name) # Check if exists
_runshell([ipexe, 'link', 'set', 'dev', name, 'down'],
"Could not set link down for %s." % name)
_runshell([brctlexe, 'delbr', name],
"Could not delete bridge %s." % name)
def showall(self):
""" Return a list of all available bridges. """
p = _runshell([brctlexe, 'show'],
"Could not show bridges.")
wlist = map(str.split, p.stdout.read().splitlines()[1:])
brwlist = filter(lambda x: len(x) != 1, wlist)
brlist = map(lambda x: x[0], brwlist)
return map(Bridge, brlist)
|
9b/frisbee
|
frisbee/modules/bing.py
|
Module._format
|
python
|
def _format(self):
self.log.debug("Formatting URLs to request")
items = list()
for i in range(0, self.limit, 10):
query = '"%s" %s' % (self.domain, self.modifier)
url = self.host + "/search?q=" + query + "&first=" + str(i)
items.append(url)
self.log.debug("URLs were generated")
return items
|
Format search queries to perform in bulk.
Build up the URLs to call for the search engine. These will be ran
through a bulk processor and returned to a detailer.
|
train
|
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/modules/bing.py#L30-L43
| null |
class Module(Base):
"""Custom search module."""
def __init__(self, domain=None, modifier=None, engine="bing", greedy=False,
fuzzy=False, limit=500):
"""Setup the primary client instance."""
super(Base, self).__init__()
self.name = "Bing"
self.host = "https://www.bing.com"
self.domain = domain
self.modifier = modifier
self.limit = limit
self.greedy = greedy
self.fuzzy = fuzzy
self.results = list()
self.data = list()
self._start_time = None
self._end_time = None
self._duration = None
def _process(self, responses):
"""Process search engine results for detailed analysis.
Search engine result pages (SERPs) come back with each request and will
need to be extracted in order to crawl the actual hits.
"""
self.log.debug("Processing search results")
items = list()
for response in responses:
try:
soup = BeautifulSoup(response.content, 'html.parser',
from_encoding="iso-8859-1")
except:
continue
else:
listings = soup.findAll('li', {'class': 'b_algo'})
items.extend([l.find('a')['href'] for l in listings])
self.log.debug("Search result URLs were extracted")
return items
def _fetch(self, urls):
"""Perform bulk collection of data and return the content.
Gathering responses is handled by the base class and uses futures to
speed up the processing. Response data is saved inside a local variable
to be used later in extraction.
"""
responses = self._request_bulk(urls)
for response in responses:
try:
soup = BeautifulSoup(response.content, 'html.parser',
from_encoding="iso-8859-1")
text = soup.get_text()
except Exception:
text = response.text
self.data.append(text) # Opportunistic findings
return responses
def _extract(self):
"""Extract email addresses from results.
Text content from all crawled pages are ran through a simple email
extractor. Data is cleaned prior to running pattern expressions.
"""
self.log.debug("Extracting emails from text content")
for item in self.data:
emails = extract_emails(item, self.domain, self.fuzzy)
self.results.extend(emails)
self.log.debug("Email extraction completed")
return list(set(self.results))
def search(self):
"""Run the full search process.
Simple public method to abstract the steps needed to produce a full
search using the engine.
"""
requests = self._format()
serps = self._fetch(requests)
urls = self._process(serps)
details = self._fetch(urls)
emails = self._extract()
return {'emails': emails, 'processed': len(self.data)}
|
9b/frisbee
|
frisbee/modules/bing.py
|
Module._process
|
python
|
def _process(self, responses):
self.log.debug("Processing search results")
items = list()
for response in responses:
try:
soup = BeautifulSoup(response.content, 'html.parser',
from_encoding="iso-8859-1")
except:
continue
else:
listings = soup.findAll('li', {'class': 'b_algo'})
items.extend([l.find('a')['href'] for l in listings])
self.log.debug("Search result URLs were extracted")
return items
|
Process search engine results for detailed analysis.
Search engine result pages (SERPs) come back with each request and will
need to be extracted in order to crawl the actual hits.
|
train
|
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/modules/bing.py#L45-L63
| null |
class Module(Base):
"""Custom search module."""
def __init__(self, domain=None, modifier=None, engine="bing", greedy=False,
fuzzy=False, limit=500):
"""Setup the primary client instance."""
super(Base, self).__init__()
self.name = "Bing"
self.host = "https://www.bing.com"
self.domain = domain
self.modifier = modifier
self.limit = limit
self.greedy = greedy
self.fuzzy = fuzzy
self.results = list()
self.data = list()
self._start_time = None
self._end_time = None
self._duration = None
def _format(self):
"""Format search queries to perform in bulk.
Build up the URLs to call for the search engine. These will be ran
through a bulk processor and returned to a detailer.
"""
self.log.debug("Formatting URLs to request")
items = list()
for i in range(0, self.limit, 10):
query = '"%s" %s' % (self.domain, self.modifier)
url = self.host + "/search?q=" + query + "&first=" + str(i)
items.append(url)
self.log.debug("URLs were generated")
return items
def _fetch(self, urls):
"""Perform bulk collection of data and return the content.
Gathering responses is handled by the base class and uses futures to
speed up the processing. Response data is saved inside a local variable
to be used later in extraction.
"""
responses = self._request_bulk(urls)
for response in responses:
try:
soup = BeautifulSoup(response.content, 'html.parser',
from_encoding="iso-8859-1")
text = soup.get_text()
except Exception:
text = response.text
self.data.append(text) # Opportunistic findings
return responses
def _extract(self):
"""Extract email addresses from results.
Text content from all crawled pages are ran through a simple email
extractor. Data is cleaned prior to running pattern expressions.
"""
self.log.debug("Extracting emails from text content")
for item in self.data:
emails = extract_emails(item, self.domain, self.fuzzy)
self.results.extend(emails)
self.log.debug("Email extraction completed")
return list(set(self.results))
def search(self):
"""Run the full search process.
Simple public method to abstract the steps needed to produce a full
search using the engine.
"""
requests = self._format()
serps = self._fetch(requests)
urls = self._process(serps)
details = self._fetch(urls)
emails = self._extract()
return {'emails': emails, 'processed': len(self.data)}
|
9b/frisbee
|
frisbee/modules/bing.py
|
Module._fetch
|
python
|
def _fetch(self, urls):
responses = self._request_bulk(urls)
for response in responses:
try:
soup = BeautifulSoup(response.content, 'html.parser',
from_encoding="iso-8859-1")
text = soup.get_text()
except Exception:
text = response.text
self.data.append(text) # Opportunistic findings
return responses
|
Perform bulk collection of data and return the content.
Gathering responses is handled by the base class and uses futures to
speed up the processing. Response data is saved inside a local variable
to be used later in extraction.
|
train
|
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/modules/bing.py#L65-L81
|
[
"def _request_bulk(self, urls: List[str]) -> List:\n \"\"\"Batch the requests going out.\"\"\"\n if not urls:\n raise Exception(\"No results were found\")\n session: FuturesSession = FuturesSession(max_workers=len(urls))\n self.log.info(\"Bulk requesting: %d\" % len(urls))\n futures = [session.get(u, headers=gen_headers(), timeout=3) for u in urls]\n done, incomplete = wait(futures)\n results: List = list()\n for response in done:\n try:\n results.append(response.result())\n except Exception as err:\n self.log.warn(\"Failed result: %s\" % err)\n return results\n"
] |
class Module(Base):
"""Custom search module."""
def __init__(self, domain=None, modifier=None, engine="bing", greedy=False,
fuzzy=False, limit=500):
"""Setup the primary client instance."""
super(Base, self).__init__()
self.name = "Bing"
self.host = "https://www.bing.com"
self.domain = domain
self.modifier = modifier
self.limit = limit
self.greedy = greedy
self.fuzzy = fuzzy
self.results = list()
self.data = list()
self._start_time = None
self._end_time = None
self._duration = None
def _format(self):
"""Format search queries to perform in bulk.
Build up the URLs to call for the search engine. These will be ran
through a bulk processor and returned to a detailer.
"""
self.log.debug("Formatting URLs to request")
items = list()
for i in range(0, self.limit, 10):
query = '"%s" %s' % (self.domain, self.modifier)
url = self.host + "/search?q=" + query + "&first=" + str(i)
items.append(url)
self.log.debug("URLs were generated")
return items
def _process(self, responses):
"""Process search engine results for detailed analysis.
Search engine result pages (SERPs) come back with each request and will
need to be extracted in order to crawl the actual hits.
"""
self.log.debug("Processing search results")
items = list()
for response in responses:
try:
soup = BeautifulSoup(response.content, 'html.parser',
from_encoding="iso-8859-1")
except:
continue
else:
listings = soup.findAll('li', {'class': 'b_algo'})
items.extend([l.find('a')['href'] for l in listings])
self.log.debug("Search result URLs were extracted")
return items
def _extract(self):
"""Extract email addresses from results.
Text content from all crawled pages are ran through a simple email
extractor. Data is cleaned prior to running pattern expressions.
"""
self.log.debug("Extracting emails from text content")
for item in self.data:
emails = extract_emails(item, self.domain, self.fuzzy)
self.results.extend(emails)
self.log.debug("Email extraction completed")
return list(set(self.results))
def search(self):
"""Run the full search process.
Simple public method to abstract the steps needed to produce a full
search using the engine.
"""
requests = self._format()
serps = self._fetch(requests)
urls = self._process(serps)
details = self._fetch(urls)
emails = self._extract()
return {'emails': emails, 'processed': len(self.data)}
|
9b/frisbee
|
frisbee/modules/bing.py
|
Module._extract
|
python
|
def _extract(self):
self.log.debug("Extracting emails from text content")
for item in self.data:
emails = extract_emails(item, self.domain, self.fuzzy)
self.results.extend(emails)
self.log.debug("Email extraction completed")
return list(set(self.results))
|
Extract email addresses from results.
Text content from all crawled pages are ran through a simple email
extractor. Data is cleaned prior to running pattern expressions.
|
train
|
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/modules/bing.py#L83-L94
|
[
"def extract_emails(results: str, domain: str, fuzzy: bool) -> List[str]:\n \"\"\"Grab email addresses from raw text data.\"\"\"\n pattern: Pattern = re.compile(r'([\\w.-]+@[\\w.-]+)')\n hits: List[str] = pattern.findall(results)\n if fuzzy:\n seed = domain.split('.')[0]\n emails: List[str] = [x.lower() for x in hits if x.split('@')[1].__contains__(seed)]\n else:\n emails: List[str] = [x.lower() for x in hits if x.endswith(domain)]\n return list(set(emails))\n"
] |
class Module(Base):
"""Custom search module."""
def __init__(self, domain=None, modifier=None, engine="bing", greedy=False,
fuzzy=False, limit=500):
"""Setup the primary client instance."""
super(Base, self).__init__()
self.name = "Bing"
self.host = "https://www.bing.com"
self.domain = domain
self.modifier = modifier
self.limit = limit
self.greedy = greedy
self.fuzzy = fuzzy
self.results = list()
self.data = list()
self._start_time = None
self._end_time = None
self._duration = None
def _format(self):
"""Format search queries to perform in bulk.
Build up the URLs to call for the search engine. These will be ran
through a bulk processor and returned to a detailer.
"""
self.log.debug("Formatting URLs to request")
items = list()
for i in range(0, self.limit, 10):
query = '"%s" %s' % (self.domain, self.modifier)
url = self.host + "/search?q=" + query + "&first=" + str(i)
items.append(url)
self.log.debug("URLs were generated")
return items
def _process(self, responses):
"""Process search engine results for detailed analysis.
Search engine result pages (SERPs) come back with each request and will
need to be extracted in order to crawl the actual hits.
"""
self.log.debug("Processing search results")
items = list()
for response in responses:
try:
soup = BeautifulSoup(response.content, 'html.parser',
from_encoding="iso-8859-1")
except:
continue
else:
listings = soup.findAll('li', {'class': 'b_algo'})
items.extend([l.find('a')['href'] for l in listings])
self.log.debug("Search result URLs were extracted")
return items
def _fetch(self, urls):
"""Perform bulk collection of data and return the content.
Gathering responses is handled by the base class and uses futures to
speed up the processing. Response data is saved inside a local variable
to be used later in extraction.
"""
responses = self._request_bulk(urls)
for response in responses:
try:
soup = BeautifulSoup(response.content, 'html.parser',
from_encoding="iso-8859-1")
text = soup.get_text()
except Exception:
text = response.text
self.data.append(text) # Opportunistic findings
return responses
def search(self):
"""Run the full search process.
Simple public method to abstract the steps needed to produce a full
search using the engine.
"""
requests = self._format()
serps = self._fetch(requests)
urls = self._process(serps)
details = self._fetch(urls)
emails = self._extract()
return {'emails': emails, 'processed': len(self.data)}
|
9b/frisbee
|
frisbee/modules/bing.py
|
Module.search
|
python
|
def search(self):
requests = self._format()
serps = self._fetch(requests)
urls = self._process(serps)
details = self._fetch(urls)
emails = self._extract()
return {'emails': emails, 'processed': len(self.data)}
|
Run the full search process.
Simple public method to abstract the steps needed to produce a full
search using the engine.
|
train
|
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/modules/bing.py#L96-L107
|
[
"def _format(self):\n \"\"\"Format search queries to perform in bulk.\n\n Build up the URLs to call for the search engine. These will be ran\n through a bulk processor and returned to a detailer.\n \"\"\"\n self.log.debug(\"Formatting URLs to request\")\n items = list()\n for i in range(0, self.limit, 10):\n query = '\"%s\" %s' % (self.domain, self.modifier)\n url = self.host + \"/search?q=\" + query + \"&first=\" + str(i)\n items.append(url)\n self.log.debug(\"URLs were generated\")\n return items\n",
"def _process(self, responses):\n \"\"\"Process search engine results for detailed analysis.\n\n Search engine result pages (SERPs) come back with each request and will\n need to be extracted in order to crawl the actual hits.\n \"\"\"\n self.log.debug(\"Processing search results\")\n items = list()\n for response in responses:\n try:\n soup = BeautifulSoup(response.content, 'html.parser',\n from_encoding=\"iso-8859-1\")\n except:\n continue\n else:\n listings = soup.findAll('li', {'class': 'b_algo'})\n items.extend([l.find('a')['href'] for l in listings])\n self.log.debug(\"Search result URLs were extracted\")\n return items\n",
"def _fetch(self, urls):\n \"\"\"Perform bulk collection of data and return the content.\n\n Gathering responses is handled by the base class and uses futures to\n speed up the processing. Response data is saved inside a local variable\n to be used later in extraction.\n \"\"\"\n responses = self._request_bulk(urls)\n for response in responses:\n try:\n soup = BeautifulSoup(response.content, 'html.parser',\n from_encoding=\"iso-8859-1\")\n text = soup.get_text()\n except Exception:\n text = response.text\n self.data.append(text) # Opportunistic findings\n return responses\n",
"def _extract(self):\n \"\"\"Extract email addresses from results.\n\n Text content from all crawled pages are ran through a simple email\n extractor. Data is cleaned prior to running pattern expressions.\n \"\"\"\n self.log.debug(\"Extracting emails from text content\")\n for item in self.data:\n emails = extract_emails(item, self.domain, self.fuzzy)\n self.results.extend(emails)\n self.log.debug(\"Email extraction completed\")\n return list(set(self.results))\n"
] |
class Module(Base):
"""Custom search module."""
def __init__(self, domain=None, modifier=None, engine="bing", greedy=False,
fuzzy=False, limit=500):
"""Setup the primary client instance."""
super(Base, self).__init__()
self.name = "Bing"
self.host = "https://www.bing.com"
self.domain = domain
self.modifier = modifier
self.limit = limit
self.greedy = greedy
self.fuzzy = fuzzy
self.results = list()
self.data = list()
self._start_time = None
self._end_time = None
self._duration = None
def _format(self):
"""Format search queries to perform in bulk.
Build up the URLs to call for the search engine. These will be ran
through a bulk processor and returned to a detailer.
"""
self.log.debug("Formatting URLs to request")
items = list()
for i in range(0, self.limit, 10):
query = '"%s" %s' % (self.domain, self.modifier)
url = self.host + "/search?q=" + query + "&first=" + str(i)
items.append(url)
self.log.debug("URLs were generated")
return items
def _process(self, responses):
"""Process search engine results for detailed analysis.
Search engine result pages (SERPs) come back with each request and will
need to be extracted in order to crawl the actual hits.
"""
self.log.debug("Processing search results")
items = list()
for response in responses:
try:
soup = BeautifulSoup(response.content, 'html.parser',
from_encoding="iso-8859-1")
except:
continue
else:
listings = soup.findAll('li', {'class': 'b_algo'})
items.extend([l.find('a')['href'] for l in listings])
self.log.debug("Search result URLs were extracted")
return items
def _fetch(self, urls):
"""Perform bulk collection of data and return the content.
Gathering responses is handled by the base class and uses futures to
speed up the processing. Response data is saved inside a local variable
to be used later in extraction.
"""
responses = self._request_bulk(urls)
for response in responses:
try:
soup = BeautifulSoup(response.content, 'html.parser',
from_encoding="iso-8859-1")
text = soup.get_text()
except Exception:
text = response.text
self.data.append(text) # Opportunistic findings
return responses
def _extract(self):
"""Extract email addresses from results.
Text content from all crawled pages are ran through a simple email
extractor. Data is cleaned prior to running pattern expressions.
"""
self.log.debug("Extracting emails from text content")
for item in self.data:
emails = extract_emails(item, self.domain, self.fuzzy)
self.results.extend(emails)
self.log.debug("Email extraction completed")
return list(set(self.results))
|
9b/frisbee
|
frisbee/utils.py
|
gen_logger
|
python
|
def gen_logger(name: str, log_level: int=logging.INFO) -> logging.Logger:
logger = logging.getLogger(name)
logger.setLevel(log_level)
shandler: logging.StreamHandler = logging.StreamHandler(sys.stdout)
fmt: str = '\033[1;32m%(levelname)-5s %(module)s:%(funcName)s():'
fmt += '%(lineno)d %(asctime)s\033[0m| %(message)s'
shandler.setFormatter(logging.Formatter(fmt))
logger.addHandler(shandler)
return logger
|
Create a logger to be used between processes.
:returns: Logging instance.
|
train
|
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/utils.py#L13-L25
| null |
#!/usr/bin/env python
import datetime
import logging
import os
import random
import re
import sys
from typing import Dict
from typing import List
from typing import Pattern
def gen_headers() -> Dict[str, str]:
"""Generate a header pairing."""
ua_list: List[str] = ['Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.117 Safari/537.36']
headers: Dict[str, str] = {'User-Agent': ua_list[random.randint(0, len(ua_list) - 1)]}
return headers
def str_datetime(stamp: datetime.datetime) -> str:
"""Convert datetime to str format."""
return stamp.strftime("%Y-%m-%d %H:%M:%S")
def now_time() -> datetime.datetime:
"""Get the current time."""
return datetime.datetime.now()
def extract_emails(results: str, domain: str, fuzzy: bool) -> List[str]:
"""Grab email addresses from raw text data."""
pattern: Pattern = re.compile(r'([\w.-]+@[\w.-]+)')
hits: List[str] = pattern.findall(results)
if fuzzy:
seed = domain.split('.')[0]
emails: List[str] = [x.lower() for x in hits if x.split('@')[1].__contains__(seed)]
else:
emails: List[str] = [x.lower() for x in hits if x.endswith(domain)]
return list(set(emails))
|
9b/frisbee
|
frisbee/utils.py
|
gen_headers
|
python
|
def gen_headers() -> Dict[str, str]:
ua_list: List[str] = ['Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.117 Safari/537.36']
headers: Dict[str, str] = {'User-Agent': ua_list[random.randint(0, len(ua_list) - 1)]}
return headers
|
Generate a header pairing.
|
train
|
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/utils.py#L28-L32
| null |
#!/usr/bin/env python
import datetime
import logging
import os
import random
import re
import sys
from typing import Dict
from typing import List
from typing import Pattern
def gen_logger(name: str, log_level: int=logging.INFO) -> logging.Logger:
"""Create a logger to be used between processes.
:returns: Logging instance.
"""
logger = logging.getLogger(name)
logger.setLevel(log_level)
shandler: logging.StreamHandler = logging.StreamHandler(sys.stdout)
fmt: str = '\033[1;32m%(levelname)-5s %(module)s:%(funcName)s():'
fmt += '%(lineno)d %(asctime)s\033[0m| %(message)s'
shandler.setFormatter(logging.Formatter(fmt))
logger.addHandler(shandler)
return logger
def str_datetime(stamp: datetime.datetime) -> str:
"""Convert datetime to str format."""
return stamp.strftime("%Y-%m-%d %H:%M:%S")
def now_time() -> datetime.datetime:
"""Get the current time."""
return datetime.datetime.now()
def extract_emails(results: str, domain: str, fuzzy: bool) -> List[str]:
"""Grab email addresses from raw text data."""
pattern: Pattern = re.compile(r'([\w.-]+@[\w.-]+)')
hits: List[str] = pattern.findall(results)
if fuzzy:
seed = domain.split('.')[0]
emails: List[str] = [x.lower() for x in hits if x.split('@')[1].__contains__(seed)]
else:
emails: List[str] = [x.lower() for x in hits if x.endswith(domain)]
return list(set(emails))
|
9b/frisbee
|
frisbee/utils.py
|
extract_emails
|
python
|
def extract_emails(results: str, domain: str, fuzzy: bool) -> List[str]:
pattern: Pattern = re.compile(r'([\w.-]+@[\w.-]+)')
hits: List[str] = pattern.findall(results)
if fuzzy:
seed = domain.split('.')[0]
emails: List[str] = [x.lower() for x in hits if x.split('@')[1].__contains__(seed)]
else:
emails: List[str] = [x.lower() for x in hits if x.endswith(domain)]
return list(set(emails))
|
Grab email addresses from raw text data.
|
train
|
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/utils.py#L45-L54
| null |
#!/usr/bin/env python
import datetime
import logging
import os
import random
import re
import sys
from typing import Dict
from typing import List
from typing import Pattern
def gen_logger(name: str, log_level: int=logging.INFO) -> logging.Logger:
"""Create a logger to be used between processes.
:returns: Logging instance.
"""
logger = logging.getLogger(name)
logger.setLevel(log_level)
shandler: logging.StreamHandler = logging.StreamHandler(sys.stdout)
fmt: str = '\033[1;32m%(levelname)-5s %(module)s:%(funcName)s():'
fmt += '%(lineno)d %(asctime)s\033[0m| %(message)s'
shandler.setFormatter(logging.Formatter(fmt))
logger.addHandler(shandler)
return logger
def gen_headers() -> Dict[str, str]:
"""Generate a header pairing."""
ua_list: List[str] = ['Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.117 Safari/537.36']
headers: Dict[str, str] = {'User-Agent': ua_list[random.randint(0, len(ua_list) - 1)]}
return headers
def str_datetime(stamp: datetime.datetime) -> str:
"""Convert datetime to str format."""
return stamp.strftime("%Y-%m-%d %H:%M:%S")
def now_time() -> datetime.datetime:
"""Get the current time."""
return datetime.datetime.now()
|
9b/frisbee
|
frisbee/__init__.py
|
Frisbee._reset
|
python
|
def _reset(self) -> None:
self.project: str = namesgenerator.get_random_name()
self._processed: List = list()
self.results: List = list()
|
Reset some of the state in the class for multi-searches.
|
train
|
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/__init__.py#L48-L52
| null |
class Frisbee:
"""Class to interact with the core code."""
NAME: ClassVar[str] = "Frisbee"
PROCESSES: ClassVar[int] = 100
MODULE_PATH: ClassVar[str] = 'frisbee.modules'
def __init__(self, project: str = namesgenerator.get_random_name(),
log_level: int = logging.INFO, save: bool = False):
"""Creation."""
self.project: str = project
self._log: logging.Logger = gen_logger(self.NAME, log_level)
self.output: bool = save
self.folder: str = os.getcwd()
self._config_bootstrap()
self._unfullfilled: Queue = Queue()
self._fulfilled: Queue = Queue()
self._processes: List = list()
self._processed: List = list()
self.results: List = list()
self.saved: List = list()
def _config_bootstrap(self) -> None:
"""Handle the basic setup of the tool prior to user control.
Bootstrap will load all the available modules for searching and set
them up for use by this main class.
"""
if self.output:
self.folder: str = os.getcwd() + "/" + self.project
os.mkdir(self.folder)
def _dyn_loader(self, module: str, kwargs: str):
"""Dynamically load a specific module instance."""
package_directory: str = os.path.dirname(os.path.abspath(__file__))
modules: str = package_directory + "/modules"
module = module + ".py"
if module not in os.listdir(modules):
raise Exception("Module %s is not valid" % module)
module_name: str = module[:-3]
import_path: str = "%s.%s" % (self.MODULE_PATH, module_name)
imported = import_module(import_path)
obj = getattr(imported, 'Module')
return obj(**kwargs)
def _job_handler(self) -> bool:
"""Process the work items."""
while True:
try:
task = self._unfullfilled.get_nowait()
except queue.Empty:
break
else:
self._log.debug("Job: %s" % str(task))
engine = self._dyn_loader(task['engine'], task)
task['start_time'] = now_time()
results = engine.search()
task['end_time'] = now_time()
duration: str = str((task['end_time'] - task['start_time']).seconds)
task['duration'] = duration
task.update({'results': results})
self._fulfilled.put(task)
return True
def _save(self) -> None:
"""Save output to a directory."""
self._log.info("Saving results to '%s'" % self.folder)
path: str = self.folder + "/"
for job in self.results:
if job['domain'] in self.saved:
continue
job['start_time'] = str_datetime(job['start_time'])
job['end_time'] = str_datetime(job['end_time'])
jid: int = random.randint(100000, 999999)
filename: str = "%s_%s_%d_job.json" % (self.project, job['domain'], jid)
handle = open(path + filename, 'w')
handle.write(json.dumps(job, indent=4))
handle.close()
filename = "%s_%s_%d_emails.txt" % (self.project, job['domain'], jid)
handle = open(path + filename, 'w')
for email in job['results']['emails']:
handle.write(email + "\n")
handle.close()
self.saved.append(job['domain'])
def search(self, jobs: List[Dict[str, str]]) -> None:
"""Perform searches based on job orders."""
if not isinstance(jobs, list):
raise Exception("Jobs must be of type list.")
self._log.info("Project: %s" % self.project)
self._log.info("Processing jobs: %d", len(jobs))
for _, job in enumerate(jobs):
self._unfullfilled.put(job)
for _ in range(self.PROCESSES):
proc: Process = Process(target=self._job_handler)
self._processes.append(proc)
proc.start()
for proc in self._processes:
proc.join()
while not self._fulfilled.empty():
output: Dict = self._fulfilled.get()
output.update({'project': self.project})
self._processed.append(output['domain'])
self.results.append(output)
if output['greedy']:
bonus_jobs: List = list()
observed: List = list()
for item in output['results']['emails']:
found: str = item.split('@')[1]
if found in self._processed or found in observed:
continue
observed.append(found)
base: Dict = dict()
base['limit'] = output['limit']
base['modifier'] = output['modifier']
base['engine'] = output['engine']
base['greedy'] = False
base['domain'] = found
bonus_jobs.append(base)
if len(bonus_jobs) > 0:
self.search(bonus_jobs)
self._log.info("All jobs processed")
if self.output:
self._save()
def get_results(self) -> List:
"""Return results from the search."""
return self.results
|
9b/frisbee
|
frisbee/__init__.py
|
Frisbee._config_bootstrap
|
python
|
def _config_bootstrap(self) -> None:
if self.output:
self.folder: str = os.getcwd() + "/" + self.project
os.mkdir(self.folder)
|
Handle the basic setup of the tool prior to user control.
Bootstrap will load all the available modules for searching and set
them up for use by this main class.
|
train
|
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/__init__.py#L54-L62
| null |
class Frisbee:
"""Class to interact with the core code."""
NAME: ClassVar[str] = "Frisbee"
PROCESSES: ClassVar[int] = 100
MODULE_PATH: ClassVar[str] = 'frisbee.modules'
def __init__(self, project: str = namesgenerator.get_random_name(),
log_level: int = logging.INFO, save: bool = False):
"""Creation."""
self.project: str = project
self._log: logging.Logger = gen_logger(self.NAME, log_level)
self.output: bool = save
self.folder: str = os.getcwd()
self._config_bootstrap()
self._unfullfilled: Queue = Queue()
self._fulfilled: Queue = Queue()
self._processes: List = list()
self._processed: List = list()
self.results: List = list()
self.saved: List = list()
def _reset(self) -> None:
"""Reset some of the state in the class for multi-searches."""
self.project: str = namesgenerator.get_random_name()
self._processed: List = list()
self.results: List = list()
def _dyn_loader(self, module: str, kwargs: str):
"""Dynamically load a specific module instance."""
package_directory: str = os.path.dirname(os.path.abspath(__file__))
modules: str = package_directory + "/modules"
module = module + ".py"
if module not in os.listdir(modules):
raise Exception("Module %s is not valid" % module)
module_name: str = module[:-3]
import_path: str = "%s.%s" % (self.MODULE_PATH, module_name)
imported = import_module(import_path)
obj = getattr(imported, 'Module')
return obj(**kwargs)
def _job_handler(self) -> bool:
"""Process the work items."""
while True:
try:
task = self._unfullfilled.get_nowait()
except queue.Empty:
break
else:
self._log.debug("Job: %s" % str(task))
engine = self._dyn_loader(task['engine'], task)
task['start_time'] = now_time()
results = engine.search()
task['end_time'] = now_time()
duration: str = str((task['end_time'] - task['start_time']).seconds)
task['duration'] = duration
task.update({'results': results})
self._fulfilled.put(task)
return True
def _save(self) -> None:
"""Save output to a directory."""
self._log.info("Saving results to '%s'" % self.folder)
path: str = self.folder + "/"
for job in self.results:
if job['domain'] in self.saved:
continue
job['start_time'] = str_datetime(job['start_time'])
job['end_time'] = str_datetime(job['end_time'])
jid: int = random.randint(100000, 999999)
filename: str = "%s_%s_%d_job.json" % (self.project, job['domain'], jid)
handle = open(path + filename, 'w')
handle.write(json.dumps(job, indent=4))
handle.close()
filename = "%s_%s_%d_emails.txt" % (self.project, job['domain'], jid)
handle = open(path + filename, 'w')
for email in job['results']['emails']:
handle.write(email + "\n")
handle.close()
self.saved.append(job['domain'])
def search(self, jobs: List[Dict[str, str]]) -> None:
"""Perform searches based on job orders."""
if not isinstance(jobs, list):
raise Exception("Jobs must be of type list.")
self._log.info("Project: %s" % self.project)
self._log.info("Processing jobs: %d", len(jobs))
for _, job in enumerate(jobs):
self._unfullfilled.put(job)
for _ in range(self.PROCESSES):
proc: Process = Process(target=self._job_handler)
self._processes.append(proc)
proc.start()
for proc in self._processes:
proc.join()
while not self._fulfilled.empty():
output: Dict = self._fulfilled.get()
output.update({'project': self.project})
self._processed.append(output['domain'])
self.results.append(output)
if output['greedy']:
bonus_jobs: List = list()
observed: List = list()
for item in output['results']['emails']:
found: str = item.split('@')[1]
if found in self._processed or found in observed:
continue
observed.append(found)
base: Dict = dict()
base['limit'] = output['limit']
base['modifier'] = output['modifier']
base['engine'] = output['engine']
base['greedy'] = False
base['domain'] = found
bonus_jobs.append(base)
if len(bonus_jobs) > 0:
self.search(bonus_jobs)
self._log.info("All jobs processed")
if self.output:
self._save()
def get_results(self) -> List:
"""Return results from the search."""
return self.results
|
9b/frisbee
|
frisbee/__init__.py
|
Frisbee._dyn_loader
|
python
|
def _dyn_loader(self, module: str, kwargs: str):
package_directory: str = os.path.dirname(os.path.abspath(__file__))
modules: str = package_directory + "/modules"
module = module + ".py"
if module not in os.listdir(modules):
raise Exception("Module %s is not valid" % module)
module_name: str = module[:-3]
import_path: str = "%s.%s" % (self.MODULE_PATH, module_name)
imported = import_module(import_path)
obj = getattr(imported, 'Module')
return obj(**kwargs)
|
Dynamically load a specific module instance.
|
train
|
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/__init__.py#L64-L75
| null |
class Frisbee:
"""Class to interact with the core code."""
NAME: ClassVar[str] = "Frisbee"
PROCESSES: ClassVar[int] = 100
MODULE_PATH: ClassVar[str] = 'frisbee.modules'
def __init__(self, project: str = namesgenerator.get_random_name(),
log_level: int = logging.INFO, save: bool = False):
"""Creation."""
self.project: str = project
self._log: logging.Logger = gen_logger(self.NAME, log_level)
self.output: bool = save
self.folder: str = os.getcwd()
self._config_bootstrap()
self._unfullfilled: Queue = Queue()
self._fulfilled: Queue = Queue()
self._processes: List = list()
self._processed: List = list()
self.results: List = list()
self.saved: List = list()
def _reset(self) -> None:
"""Reset some of the state in the class for multi-searches."""
self.project: str = namesgenerator.get_random_name()
self._processed: List = list()
self.results: List = list()
def _config_bootstrap(self) -> None:
"""Handle the basic setup of the tool prior to user control.
Bootstrap will load all the available modules for searching and set
them up for use by this main class.
"""
if self.output:
self.folder: str = os.getcwd() + "/" + self.project
os.mkdir(self.folder)
def _job_handler(self) -> bool:
"""Process the work items."""
while True:
try:
task = self._unfullfilled.get_nowait()
except queue.Empty:
break
else:
self._log.debug("Job: %s" % str(task))
engine = self._dyn_loader(task['engine'], task)
task['start_time'] = now_time()
results = engine.search()
task['end_time'] = now_time()
duration: str = str((task['end_time'] - task['start_time']).seconds)
task['duration'] = duration
task.update({'results': results})
self._fulfilled.put(task)
return True
def _save(self) -> None:
"""Save output to a directory."""
self._log.info("Saving results to '%s'" % self.folder)
path: str = self.folder + "/"
for job in self.results:
if job['domain'] in self.saved:
continue
job['start_time'] = str_datetime(job['start_time'])
job['end_time'] = str_datetime(job['end_time'])
jid: int = random.randint(100000, 999999)
filename: str = "%s_%s_%d_job.json" % (self.project, job['domain'], jid)
handle = open(path + filename, 'w')
handle.write(json.dumps(job, indent=4))
handle.close()
filename = "%s_%s_%d_emails.txt" % (self.project, job['domain'], jid)
handle = open(path + filename, 'w')
for email in job['results']['emails']:
handle.write(email + "\n")
handle.close()
self.saved.append(job['domain'])
def search(self, jobs: List[Dict[str, str]]) -> None:
"""Perform searches based on job orders."""
if not isinstance(jobs, list):
raise Exception("Jobs must be of type list.")
self._log.info("Project: %s" % self.project)
self._log.info("Processing jobs: %d", len(jobs))
for _, job in enumerate(jobs):
self._unfullfilled.put(job)
for _ in range(self.PROCESSES):
proc: Process = Process(target=self._job_handler)
self._processes.append(proc)
proc.start()
for proc in self._processes:
proc.join()
while not self._fulfilled.empty():
output: Dict = self._fulfilled.get()
output.update({'project': self.project})
self._processed.append(output['domain'])
self.results.append(output)
if output['greedy']:
bonus_jobs: List = list()
observed: List = list()
for item in output['results']['emails']:
found: str = item.split('@')[1]
if found in self._processed or found in observed:
continue
observed.append(found)
base: Dict = dict()
base['limit'] = output['limit']
base['modifier'] = output['modifier']
base['engine'] = output['engine']
base['greedy'] = False
base['domain'] = found
bonus_jobs.append(base)
if len(bonus_jobs) > 0:
self.search(bonus_jobs)
self._log.info("All jobs processed")
if self.output:
self._save()
def get_results(self) -> List:
"""Return results from the search."""
return self.results
|
9b/frisbee
|
frisbee/__init__.py
|
Frisbee._job_handler
|
python
|
def _job_handler(self) -> bool:
while True:
try:
task = self._unfullfilled.get_nowait()
except queue.Empty:
break
else:
self._log.debug("Job: %s" % str(task))
engine = self._dyn_loader(task['engine'], task)
task['start_time'] = now_time()
results = engine.search()
task['end_time'] = now_time()
duration: str = str((task['end_time'] - task['start_time']).seconds)
task['duration'] = duration
task.update({'results': results})
self._fulfilled.put(task)
return True
|
Process the work items.
|
train
|
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/__init__.py#L77-L94
| null |
class Frisbee:
"""Class to interact with the core code."""
NAME: ClassVar[str] = "Frisbee"
PROCESSES: ClassVar[int] = 100
MODULE_PATH: ClassVar[str] = 'frisbee.modules'
def __init__(self, project: str = namesgenerator.get_random_name(),
log_level: int = logging.INFO, save: bool = False):
"""Creation."""
self.project: str = project
self._log: logging.Logger = gen_logger(self.NAME, log_level)
self.output: bool = save
self.folder: str = os.getcwd()
self._config_bootstrap()
self._unfullfilled: Queue = Queue()
self._fulfilled: Queue = Queue()
self._processes: List = list()
self._processed: List = list()
self.results: List = list()
self.saved: List = list()
def _reset(self) -> None:
"""Reset some of the state in the class for multi-searches."""
self.project: str = namesgenerator.get_random_name()
self._processed: List = list()
self.results: List = list()
def _config_bootstrap(self) -> None:
"""Handle the basic setup of the tool prior to user control.
Bootstrap will load all the available modules for searching and set
them up for use by this main class.
"""
if self.output:
self.folder: str = os.getcwd() + "/" + self.project
os.mkdir(self.folder)
def _dyn_loader(self, module: str, kwargs: str):
"""Dynamically load a specific module instance."""
package_directory: str = os.path.dirname(os.path.abspath(__file__))
modules: str = package_directory + "/modules"
module = module + ".py"
if module not in os.listdir(modules):
raise Exception("Module %s is not valid" % module)
module_name: str = module[:-3]
import_path: str = "%s.%s" % (self.MODULE_PATH, module_name)
imported = import_module(import_path)
obj = getattr(imported, 'Module')
return obj(**kwargs)
def _save(self) -> None:
"""Save output to a directory."""
self._log.info("Saving results to '%s'" % self.folder)
path: str = self.folder + "/"
for job in self.results:
if job['domain'] in self.saved:
continue
job['start_time'] = str_datetime(job['start_time'])
job['end_time'] = str_datetime(job['end_time'])
jid: int = random.randint(100000, 999999)
filename: str = "%s_%s_%d_job.json" % (self.project, job['domain'], jid)
handle = open(path + filename, 'w')
handle.write(json.dumps(job, indent=4))
handle.close()
filename = "%s_%s_%d_emails.txt" % (self.project, job['domain'], jid)
handle = open(path + filename, 'w')
for email in job['results']['emails']:
handle.write(email + "\n")
handle.close()
self.saved.append(job['domain'])
def search(self, jobs: List[Dict[str, str]]) -> None:
"""Perform searches based on job orders."""
if not isinstance(jobs, list):
raise Exception("Jobs must be of type list.")
self._log.info("Project: %s" % self.project)
self._log.info("Processing jobs: %d", len(jobs))
for _, job in enumerate(jobs):
self._unfullfilled.put(job)
for _ in range(self.PROCESSES):
proc: Process = Process(target=self._job_handler)
self._processes.append(proc)
proc.start()
for proc in self._processes:
proc.join()
while not self._fulfilled.empty():
output: Dict = self._fulfilled.get()
output.update({'project': self.project})
self._processed.append(output['domain'])
self.results.append(output)
if output['greedy']:
bonus_jobs: List = list()
observed: List = list()
for item in output['results']['emails']:
found: str = item.split('@')[1]
if found in self._processed or found in observed:
continue
observed.append(found)
base: Dict = dict()
base['limit'] = output['limit']
base['modifier'] = output['modifier']
base['engine'] = output['engine']
base['greedy'] = False
base['domain'] = found
bonus_jobs.append(base)
if len(bonus_jobs) > 0:
self.search(bonus_jobs)
self._log.info("All jobs processed")
if self.output:
self._save()
def get_results(self) -> List:
"""Return results from the search."""
return self.results
|
9b/frisbee
|
frisbee/__init__.py
|
Frisbee._save
|
python
|
def _save(self) -> None:
self._log.info("Saving results to '%s'" % self.folder)
path: str = self.folder + "/"
for job in self.results:
if job['domain'] in self.saved:
continue
job['start_time'] = str_datetime(job['start_time'])
job['end_time'] = str_datetime(job['end_time'])
jid: int = random.randint(100000, 999999)
filename: str = "%s_%s_%d_job.json" % (self.project, job['domain'], jid)
handle = open(path + filename, 'w')
handle.write(json.dumps(job, indent=4))
handle.close()
filename = "%s_%s_%d_emails.txt" % (self.project, job['domain'], jid)
handle = open(path + filename, 'w')
for email in job['results']['emails']:
handle.write(email + "\n")
handle.close()
self.saved.append(job['domain'])
|
Save output to a directory.
|
train
|
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/__init__.py#L96-L116
| null |
class Frisbee:
"""Class to interact with the core code."""
NAME: ClassVar[str] = "Frisbee"
PROCESSES: ClassVar[int] = 100
MODULE_PATH: ClassVar[str] = 'frisbee.modules'
def __init__(self, project: str = namesgenerator.get_random_name(),
log_level: int = logging.INFO, save: bool = False):
"""Creation."""
self.project: str = project
self._log: logging.Logger = gen_logger(self.NAME, log_level)
self.output: bool = save
self.folder: str = os.getcwd()
self._config_bootstrap()
self._unfullfilled: Queue = Queue()
self._fulfilled: Queue = Queue()
self._processes: List = list()
self._processed: List = list()
self.results: List = list()
self.saved: List = list()
def _reset(self) -> None:
"""Reset some of the state in the class for multi-searches."""
self.project: str = namesgenerator.get_random_name()
self._processed: List = list()
self.results: List = list()
def _config_bootstrap(self) -> None:
"""Handle the basic setup of the tool prior to user control.
Bootstrap will load all the available modules for searching and set
them up for use by this main class.
"""
if self.output:
self.folder: str = os.getcwd() + "/" + self.project
os.mkdir(self.folder)
def _dyn_loader(self, module: str, kwargs: str):
"""Dynamically load a specific module instance."""
package_directory: str = os.path.dirname(os.path.abspath(__file__))
modules: str = package_directory + "/modules"
module = module + ".py"
if module not in os.listdir(modules):
raise Exception("Module %s is not valid" % module)
module_name: str = module[:-3]
import_path: str = "%s.%s" % (self.MODULE_PATH, module_name)
imported = import_module(import_path)
obj = getattr(imported, 'Module')
return obj(**kwargs)
def _job_handler(self) -> bool:
"""Process the work items."""
while True:
try:
task = self._unfullfilled.get_nowait()
except queue.Empty:
break
else:
self._log.debug("Job: %s" % str(task))
engine = self._dyn_loader(task['engine'], task)
task['start_time'] = now_time()
results = engine.search()
task['end_time'] = now_time()
duration: str = str((task['end_time'] - task['start_time']).seconds)
task['duration'] = duration
task.update({'results': results})
self._fulfilled.put(task)
return True
def search(self, jobs: List[Dict[str, str]]) -> None:
"""Perform searches based on job orders."""
if not isinstance(jobs, list):
raise Exception("Jobs must be of type list.")
self._log.info("Project: %s" % self.project)
self._log.info("Processing jobs: %d", len(jobs))
for _, job in enumerate(jobs):
self._unfullfilled.put(job)
for _ in range(self.PROCESSES):
proc: Process = Process(target=self._job_handler)
self._processes.append(proc)
proc.start()
for proc in self._processes:
proc.join()
while not self._fulfilled.empty():
output: Dict = self._fulfilled.get()
output.update({'project': self.project})
self._processed.append(output['domain'])
self.results.append(output)
if output['greedy']:
bonus_jobs: List = list()
observed: List = list()
for item in output['results']['emails']:
found: str = item.split('@')[1]
if found in self._processed or found in observed:
continue
observed.append(found)
base: Dict = dict()
base['limit'] = output['limit']
base['modifier'] = output['modifier']
base['engine'] = output['engine']
base['greedy'] = False
base['domain'] = found
bonus_jobs.append(base)
if len(bonus_jobs) > 0:
self.search(bonus_jobs)
self._log.info("All jobs processed")
if self.output:
self._save()
def get_results(self) -> List:
"""Return results from the search."""
return self.results
|
9b/frisbee
|
frisbee/__init__.py
|
Frisbee.search
|
python
|
def search(self, jobs: List[Dict[str, str]]) -> None:
if not isinstance(jobs, list):
raise Exception("Jobs must be of type list.")
self._log.info("Project: %s" % self.project)
self._log.info("Processing jobs: %d", len(jobs))
for _, job in enumerate(jobs):
self._unfullfilled.put(job)
for _ in range(self.PROCESSES):
proc: Process = Process(target=self._job_handler)
self._processes.append(proc)
proc.start()
for proc in self._processes:
proc.join()
while not self._fulfilled.empty():
output: Dict = self._fulfilled.get()
output.update({'project': self.project})
self._processed.append(output['domain'])
self.results.append(output)
if output['greedy']:
bonus_jobs: List = list()
observed: List = list()
for item in output['results']['emails']:
found: str = item.split('@')[1]
if found in self._processed or found in observed:
continue
observed.append(found)
base: Dict = dict()
base['limit'] = output['limit']
base['modifier'] = output['modifier']
base['engine'] = output['engine']
base['greedy'] = False
base['domain'] = found
bonus_jobs.append(base)
if len(bonus_jobs) > 0:
self.search(bonus_jobs)
self._log.info("All jobs processed")
if self.output:
self._save()
|
Perform searches based on job orders.
|
train
|
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/__init__.py#L118-L162
| null |
class Frisbee:
"""Class to interact with the core code."""
NAME: ClassVar[str] = "Frisbee"
PROCESSES: ClassVar[int] = 100
MODULE_PATH: ClassVar[str] = 'frisbee.modules'
def __init__(self, project: str = namesgenerator.get_random_name(),
log_level: int = logging.INFO, save: bool = False):
"""Creation."""
self.project: str = project
self._log: logging.Logger = gen_logger(self.NAME, log_level)
self.output: bool = save
self.folder: str = os.getcwd()
self._config_bootstrap()
self._unfullfilled: Queue = Queue()
self._fulfilled: Queue = Queue()
self._processes: List = list()
self._processed: List = list()
self.results: List = list()
self.saved: List = list()
def _reset(self) -> None:
"""Reset some of the state in the class for multi-searches."""
self.project: str = namesgenerator.get_random_name()
self._processed: List = list()
self.results: List = list()
def _config_bootstrap(self) -> None:
"""Handle the basic setup of the tool prior to user control.
Bootstrap will load all the available modules for searching and set
them up for use by this main class.
"""
if self.output:
self.folder: str = os.getcwd() + "/" + self.project
os.mkdir(self.folder)
def _dyn_loader(self, module: str, kwargs: str):
"""Dynamically load a specific module instance."""
package_directory: str = os.path.dirname(os.path.abspath(__file__))
modules: str = package_directory + "/modules"
module = module + ".py"
if module not in os.listdir(modules):
raise Exception("Module %s is not valid" % module)
module_name: str = module[:-3]
import_path: str = "%s.%s" % (self.MODULE_PATH, module_name)
imported = import_module(import_path)
obj = getattr(imported, 'Module')
return obj(**kwargs)
def _job_handler(self) -> bool:
"""Process the work items."""
while True:
try:
task = self._unfullfilled.get_nowait()
except queue.Empty:
break
else:
self._log.debug("Job: %s" % str(task))
engine = self._dyn_loader(task['engine'], task)
task['start_time'] = now_time()
results = engine.search()
task['end_time'] = now_time()
duration: str = str((task['end_time'] - task['start_time']).seconds)
task['duration'] = duration
task.update({'results': results})
self._fulfilled.put(task)
return True
def _save(self) -> None:
"""Save output to a directory."""
self._log.info("Saving results to '%s'" % self.folder)
path: str = self.folder + "/"
for job in self.results:
if job['domain'] in self.saved:
continue
job['start_time'] = str_datetime(job['start_time'])
job['end_time'] = str_datetime(job['end_time'])
jid: int = random.randint(100000, 999999)
filename: str = "%s_%s_%d_job.json" % (self.project, job['domain'], jid)
handle = open(path + filename, 'w')
handle.write(json.dumps(job, indent=4))
handle.close()
filename = "%s_%s_%d_emails.txt" % (self.project, job['domain'], jid)
handle = open(path + filename, 'w')
for email in job['results']['emails']:
handle.write(email + "\n")
handle.close()
self.saved.append(job['domain'])
def get_results(self) -> List:
"""Return results from the search."""
return self.results
|
9b/frisbee
|
frisbee/cli/client.py
|
main
|
python
|
def main():
parser = ArgumentParser()
subs = parser.add_subparsers(dest='cmd')
setup_parser = subs.add_parser('search')
setup_parser.add_argument('-e', '--engine', dest='engine', required=True,
help='Search engine to use.',
choices=['bing'])
setup_parser.add_argument('-d', '--domain', dest='domain', required=True,
help='Email domain to collect upon.', type=str)
setup_parser.add_argument('-l', '--limit', dest='limit', required=False,
help='Limit number of results.', type=int,
default=100)
setup_parser.add_argument('-m', '--modifier', dest='modifier', required=False,
help='Search modifier to add to the query.',
type=str, default=None)
setup_parser.add_argument('-s', '--save', dest='to_save', required=False,
help='Save results to a file.', default=False,
action='store_true')
setup_parser.add_argument('-g', '--greedy', dest='greedy', required=False,
help='Use found results to search more.', default=False,
action='store_true')
setup_parser.add_argument('-f', '--fuzzy', dest='fuzzy', required=False,
help='Use keyword instead of domain.', default=False,
action='store_true')
args = parser.parse_args()
if args.cmd == 'search':
frisbee = Frisbee(log_level=logging.DEBUG, save=args.to_save)
jobs = [{'engine': args.engine, 'modifier': args.modifier,
'domain': args.domain, 'limit': args.limit,
'greedy': args.greedy, 'fuzzy': args.fuzzy}]
frisbee.search(jobs)
results = frisbee.get_results()
for job in results:
print("-= %s Details =-" % job['project'].upper())
print("\t[*] Engine: %s" % job['engine'])
print("\t[*] Domain: %s" % job['domain'])
print("\t[*] Modifer: %s" % job['modifier'])
print("\t[*] Limit: %d" % job['limit'])
print("\t[*] Duration: %s seconds" % job['duration'])
print("\n-= Email Results=-")
for email in job['results']['emails']:
print(email)
print("")
sys.exit(1)
|
Run the core.
|
train
|
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/cli/client.py#L10-L57
|
[
"def search(self, jobs: List[Dict[str, str]]) -> None:\n \"\"\"Perform searches based on job orders.\"\"\"\n if not isinstance(jobs, list):\n raise Exception(\"Jobs must be of type list.\")\n self._log.info(\"Project: %s\" % self.project)\n self._log.info(\"Processing jobs: %d\", len(jobs))\n for _, job in enumerate(jobs):\n self._unfullfilled.put(job)\n\n for _ in range(self.PROCESSES):\n proc: Process = Process(target=self._job_handler)\n self._processes.append(proc)\n proc.start()\n\n for proc in self._processes:\n proc.join()\n\n while not self._fulfilled.empty():\n output: Dict = self._fulfilled.get()\n output.update({'project': self.project})\n self._processed.append(output['domain'])\n self.results.append(output)\n\n if output['greedy']:\n bonus_jobs: List = list()\n observed: List = list()\n for item in output['results']['emails']:\n found: str = item.split('@')[1]\n if found in self._processed or found in observed:\n continue\n observed.append(found)\n base: Dict = dict()\n base['limit'] = output['limit']\n base['modifier'] = output['modifier']\n base['engine'] = output['engine']\n base['greedy'] = False\n base['domain'] = found\n bonus_jobs.append(base)\n\n if len(bonus_jobs) > 0:\n self.search(bonus_jobs)\n\n self._log.info(\"All jobs processed\")\n if self.output:\n self._save()\n",
"def get_results(self) -> List:\n \"\"\"Return results from the search.\"\"\"\n return self.results\n"
] |
#!/usr/bin/env python
"""Conduct searches for email addresses across different modules."""
import logging
import sys
from argparse import ArgumentParser
from frisbee import Frisbee
|
9b/frisbee
|
frisbee/modules/base.py
|
Base.set_log_level
|
python
|
def set_log_level(self, level: str) -> None:
if level == 'info':
to_set = logging.INFO
if level == 'debug':
to_set = logging.DEBUG
if level == 'error':
to_set = logging.ERROR
self.log.setLevel(to_set)
|
Override the default log level of the class.
|
train
|
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/modules/base.py#L25-L33
| null |
class Base(object):
"""Base module class to assist in writing new modules."""
name: ClassVar[str] = 'base'
log: ClassVar[logging.Logger] = gen_logger(name, logging.INFO)
limit: ClassVar[int] = 500
def __init__(self, log_level=logging.INFO) -> None:
"""Local variables for the module."""
self.set_log_level(log_level)
def _request_bulk(self, urls: List[str]) -> List:
"""Batch the requests going out."""
if not urls:
raise Exception("No results were found")
session: FuturesSession = FuturesSession(max_workers=len(urls))
self.log.info("Bulk requesting: %d" % len(urls))
futures = [session.get(u, headers=gen_headers(), timeout=3) for u in urls]
done, incomplete = wait(futures)
results: List = list()
for response in done:
try:
results.append(response.result())
except Exception as err:
self.log.warn("Failed result: %s" % err)
return results
def search(self) -> None:
"""Execute search function and hand to processor."""
raise NotImplementedError
def _format(self) -> None:
"""Format search queries to perform in bulk.
Build up the URLs to call for the search engine. These will be ran
through a bulk processor and returned to a detailer.
"""
raise NotImplementedError
def _process(self, responses: List[str]) -> None:
"""Process search engine results for detailed analysis.
Search engine result pages (SERPs) come back with each request and will
need to be extracted in order to crawl the actual hits.
"""
raise NotImplementedError
def _fetch(self, urls: List[str]) -> None:
"""Perform bulk collection of data and return the content.
Gathering responses is handled by the base class and uses futures to
speed up the processing. Response data is saved inside a local variable
to be used later in extraction.
"""
raise NotImplementedError
def _extract(self) -> None:
"""Extract email addresses from results.
Text content from all crawled pages are ran through a simple email
extractor. Data is cleaned prior to running pattern expressions.
"""
raise NotImplementedError
|
9b/frisbee
|
frisbee/modules/base.py
|
Base._request_bulk
|
python
|
def _request_bulk(self, urls: List[str]) -> List:
if not urls:
raise Exception("No results were found")
session: FuturesSession = FuturesSession(max_workers=len(urls))
self.log.info("Bulk requesting: %d" % len(urls))
futures = [session.get(u, headers=gen_headers(), timeout=3) for u in urls]
done, incomplete = wait(futures)
results: List = list()
for response in done:
try:
results.append(response.result())
except Exception as err:
self.log.warn("Failed result: %s" % err)
return results
|
Batch the requests going out.
|
train
|
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/modules/base.py#L35-L49
| null |
class Base(object):
"""Base module class to assist in writing new modules."""
name: ClassVar[str] = 'base'
log: ClassVar[logging.Logger] = gen_logger(name, logging.INFO)
limit: ClassVar[int] = 500
def __init__(self, log_level=logging.INFO) -> None:
"""Local variables for the module."""
self.set_log_level(log_level)
def set_log_level(self, level: str) -> None:
"""Override the default log level of the class."""
if level == 'info':
to_set = logging.INFO
if level == 'debug':
to_set = logging.DEBUG
if level == 'error':
to_set = logging.ERROR
self.log.setLevel(to_set)
def search(self) -> None:
"""Execute search function and hand to processor."""
raise NotImplementedError
def _format(self) -> None:
"""Format search queries to perform in bulk.
Build up the URLs to call for the search engine. These will be ran
through a bulk processor and returned to a detailer.
"""
raise NotImplementedError
def _process(self, responses: List[str]) -> None:
"""Process search engine results for detailed analysis.
Search engine result pages (SERPs) come back with each request and will
need to be extracted in order to crawl the actual hits.
"""
raise NotImplementedError
def _fetch(self, urls: List[str]) -> None:
"""Perform bulk collection of data and return the content.
Gathering responses is handled by the base class and uses futures to
speed up the processing. Response data is saved inside a local variable
to be used later in extraction.
"""
raise NotImplementedError
def _extract(self) -> None:
"""Extract email addresses from results.
Text content from all crawled pages are ran through a simple email
extractor. Data is cleaned prior to running pattern expressions.
"""
raise NotImplementedError
|
daethnir/authprogs
|
authprogs/authprogs.py
|
main
|
python
|
def main(): # pylint: disable-msg=R0912,R0915
parser = optparse.OptionParser()
parser.usage = textwrap.dedent("""\
%prog {--run|--install_key|--dump_config} [options]
SSH command authenticator.
Used to restrict which commands can be run via trusted SSH keys.
""")
group = optparse.OptionGroup(
parser, 'Run Mode Options',
'These options determine in which mode the authprogs '
'program runs.')
group.add_option(
'-r', '--run', dest='run', action='store_true',
help='Act as ssh command authenticator. Use this '
'when calling from authorized_keys.')
group.add_option(
'--dump_config', dest='dump_config',
action='store_true',
help='Dump configuration (python format) '
'to standard out and exit.')
group.add_option(
'--install_key', dest='install_key',
help='Install the named ssh public key file to '
'authorized_keys.', metavar='FILE')
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Other Options')
group.add_option(
'--keyname', dest='keyname',
help='Name for this key, used when matching '
'config blocks.')
group.add_option(
'--configfile', dest='configfile',
help='Path to authprogs configuration file. '
'Defaults to ~/.ssh/authprogs.yaml',
metavar='FILE')
group.add_option(
'--configdir', dest='configdir',
help='Path to authprogs configuration directory. '
'Defaults to ~/.ssh/authprogs.d',
metavar='DIR')
group.add_option('--logfile', dest='logfile',
help='Write logging info to this file. '
'Defaults to no logging.',
metavar='FILE')
group.add_option('--debug', dest='debug', action='store_true',
help='Write additional debugging information '
'to --logfile')
group.add_option('--authorized_keys', dest='authorized_keys',
default=os.path.expanduser('~/.ssh/authorized_keys'),
help='Location of authorized_keys file for '
'--install_key. Defaults to ~/.ssh/authorized_keys',
metavar='FILE')
parser.add_option_group(group)
opts, args = parser.parse_args()
if args:
sys.exit('authprogs does not accept commandline arguments.')
if not opts.configfile:
cfg = os.path.expanduser('~/.ssh/authprogs.yaml')
if os.path.isfile(cfg):
opts.configfile = cfg
if not opts.configdir:
cfg = os.path.expanduser('~/.ssh/authprogs.d')
if os.path.isdir(cfg):
opts.configdir = cfg
if opts.debug and not opts.logfile:
parser.error('--debug requires use of --logfile')
ap = None
try:
ap = AuthProgs(logfile=opts.logfile, # pylint: disable-msg=C0103
configfile=opts.configfile,
configdir=opts.configdir,
debug=opts.debug,
keyname=opts.keyname)
if opts.dump_config:
ap.dump_config()
sys.exit(0)
elif opts.install_key:
try:
ap.install_key(opts.install_key, opts.authorized_keys)
sys.stderr.write('Key installed successfully.\n')
sys.exit(0)
except InstallError as err:
sys.stderr.write('Key install failed: %s' % err)
sys.exit(1)
elif opts.run:
ap.exec_command()
sys.exit('authprogs command returned - should '
'never happen.')
else:
parser.error('Not sure what to do. Consider --help')
except SSHEnvironmentError as err:
ap.log('SSHEnvironmentError "%s"\n%s\n' % (
err, traceback.format_exc()))
sys.exit('authprogs: %s' % err)
except ConfigError as err:
ap.log('ConfigError "%s"\n%s\n' % (
err, traceback.format_exc()))
sys.exit('authprogs: %s' % err)
except CommandRejected as err:
sys.exit('authprogs: %s' % err)
except Exception as err:
if ap:
ap.log('Unexpected exception: %s\n%s\n' % (
err, traceback.format_exc()))
else:
sys.stderr.write('Unexpected exception: %s\n%s\n' % (
err, traceback.format_exc()))
sys.exit('authprogs experienced an unexpected exception.')
|
Main.
|
train
|
https://github.com/daethnir/authprogs/blob/0b1e13a609ebeabdb0f10d11fc5dc6e0b20c0343/authprogs/authprogs.py#L449-L569
|
[
"def log(self, message):\n \"\"\"Log information.\"\"\"\n if self.logfh:\n self.logfh.write(message) # pylint: disable-msg=E1103\n",
"def dump_config(self):\n \"\"\"Pretty print the configuration dict to stdout.\"\"\"\n yaml_content = self.get_merged_config()\n print('YAML Configuration\\n%s\\n' % yaml_content.read())\n try:\n self.load()\n print('Python Configuration\\n%s\\n' % pretty(self.yamldocs))\n except ConfigError:\n sys.stderr.write(\n 'config parse error. try running with --logfile=/dev/tty\\n')\n raise\n",
"def install_key(self, keyfile, authorized_keys):\n \"\"\"Install a key into the authorized_keys file.\"\"\"\n\n # Make the directory containing the authorized_keys\n # file, if it doesn't exist. (Typically ~/.ssh).\n # Ignore errors; we'll fail shortly if we can't\n # create the authkeys file.\n try:\n os.makedirs(os.path.dirname(authorized_keys), 0o700)\n except OSError:\n pass\n\n keydata = open(keyfile).read()\n target_fd = os.open(authorized_keys, os.O_RDWR | os.O_CREAT, 0o600)\n self.install_key_data(keydata, os.fdopen(target_fd, 'w+'))\n",
"def exec_command(self):\n \"\"\"Glean the command to run and exec.\n\n On problems, sys.exit.\n This method should *never* return.\n \"\"\"\n if not self.original_command_string:\n raise SSHEnvironmentError('no SSH command found; '\n 'interactive shell disallowed.')\n\n command_info = {'from': self.get_client_ip(),\n 'keyname': self.keyname,\n 'ssh_original_comand': self.original_command_string,\n 'time': time.time()}\n\n os.environ['AUTHPROGS_KEYNAME'] = self.keyname\n\n retcode = 126\n try:\n match = self.find_match()\n command_info['command'] = match.get('command')\n self.logdebug('find_match returned \"%s\"\\n' % match)\n\n command = match['command']\n retcode = subprocess.call(command)\n command_info['code'] = retcode\n self.log('result: %s\\n' % command_info)\n sys.exit(retcode)\n except (CommandRejected, OSError) as err:\n command_info['exception'] = '%s' % err\n self.log('result: %s\\n' % command_info)\n sys.exit(retcode)\n"
] |
"""authprogs: SSH command authenticator module.
Used to restrict which commands can be run via trusted SSH keys."""
# Copyright (C) 2013 Bri Hatch (daethnir) <bri@ifokr.org>
#
# This file is part of authprogs.
#
# Authprogs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
try:
import StringIO as io
except ImportError:
import io
import optparse
import os
import pprint
import re
import subprocess
import sys
import textwrap
import time
import traceback
import yaml
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
def pretty(thing):
"""Return pretty-printable version."""
ppthing = pprint.PrettyPrinter(indent=4)
return ppthing.pformat(thing)
class Error(Exception):
"""authprogs error class."""
pass
class SSHEnvironmentError(Error):
"""Problem with the SSH server-side environment.
These error messages are show directly to users, so be
cautious in what you say.
"""
pass
class ConfigError(Error):
"""Problem with the authprogs configuration."""
pass
class CommandRejected(Error):
"""Client command rejected.
These error messages are show directly to users, so be
cautious in what you say.
"""
pass
class InstallError(Error):
"""Problem with the installing an authorized_keys entry."""
pass
class AuthProgs(object): # pylint: disable-msg=R0902
"""AuthProgs class"""
def __init__(self, logfile=None, configfile=None,
configdir=None, debug=False, **kwargs):
"""AuthProgs constructor.
kwargs include:
authprogs_binary: path to this binary, when creating
authorized_keys entries.
If not specified, determines from sys.argv[0]
name: the name of this key, for matching in rules.
"""
self.debug = debug
self.logfile = logfile
self.client_ip = None
if logfile:
self.logfh = open(logfile, 'a')
else:
self.logfh = False
if kwargs.get('authprogs_binary'):
self.authprogs_binary = kwargs['authprogs_binary']
else:
self.authprogs_binary = (
os.path.abspath(os.path.abspath(sys.argv[0])))
self.original_command_string = os.environ.get(
'SSH_ORIGINAL_COMMAND', '')
self.original_command_list = self.original_command_string.split()
self.keyname = kwargs.get('keyname')
if not self.keyname:
self.keyname = ''
if ' ' in self.keyname or '\t' in self.keyname:
self.log('FATAL: keyname contains space/tabs\n')
raise Error('--keyname may contain neither spaces nor tabs.')
self.yamldocs = None
self.configfile = configfile
self.configdir = configdir
def raise_and_log_error(self, error, message):
"""Raise error, including message and original traceback.
error: the error to raise
message: the user-facing error message
"""
self.log('raising %s, traceback %s\n' %
(error, traceback.format_exc()))
raise error(message)
def get_client_ip(self):
"""Return the client IP from the environment."""
if self.client_ip:
return self.client_ip
try:
client = os.environ.get('SSH_CONNECTION',
os.environ.get('SSH_CLIENT'))
self.client_ip = client.split()[0]
self.logdebug('client_ip: %s\n' % self.client_ip)
return self.client_ip
except:
raise SSHEnvironmentError('cannot identify the ssh client '
'IP address')
def logdebug(self, message):
"""Log debugging information."""
if self.debug:
self.log(message)
def log(self, message):
"""Log information."""
if self.logfh:
self.logfh.write(message) # pylint: disable-msg=E1103
def check_keyname(self, rule):
"""If a key name is specified, verify it is permitted."""
keynames = rule.get('keynames')
if not keynames:
self.logdebug('no keynames requirement.\n')
return True
if not isinstance(keynames, list):
keynames = [keynames]
if self.keyname in keynames:
self.logdebug('keyname "%s" matches rule.\n' % self.keyname)
return True
else:
self.logdebug('keyname "%s" does not match rule.\n' % self.keyname)
return False
def check_client_ip(self, rule):
"""If a client IP is specified, verify it is permitted."""
if not rule.get('from'):
self.logdebug('no "from" requirement.\n')
return True
allow_from = rule.get('from')
if not isinstance(allow_from, list):
allow_from = [allow_from]
client_ip = self.get_client_ip()
if client_ip in allow_from:
self.logdebug('client_ip %s in %s\n' % (client_ip, allow_from))
return True
else:
self.logdebug('client_ip %s not in %s' % (client_ip, allow_from))
return False
def get_merged_config(self):
"""Get merged config file.
Returns an open StringIO containing the
merged config file.
"""
if self.yamldocs:
return
loadfiles = []
if self.configfile:
loadfiles.append(self.configfile)
if self.configdir:
# Gets list of all non-dotfile files from configdir.
loadfiles.extend(
[f for f in
[os.path.join(self.configdir, x) for x in
os.listdir(self.configdir)]
if os.path.isfile(f) and
not os.path.basename(f).startswith('.')])
merged_configfile = io.StringIO()
merged_configfile.write('-\n')
for thefile in loadfiles:
self.logdebug('reading in config file %s\n' % thefile)
merged_configfile.write(open(thefile).read())
merged_configfile.write('\n-\n')
merged_configfile.seek(0)
self.logdebug('merged log file: """\n%s\n"""\n' %
merged_configfile.read())
merged_configfile.seek(0)
return merged_configfile
def load(self):
"""Load our config, log and raise on error."""
try:
merged_configfile = self.get_merged_config()
self.yamldocs = yaml.load(merged_configfile, Loader=Loader)
# Strip out the top level 'None's we get from concatenation.
# Functionally not required, but makes dumps cleaner.
self.yamldocs = [x for x in self.yamldocs if x]
self.logdebug('parsed_rules:\n%s\n' % pretty(self.yamldocs))
except (yaml.scanner.ScannerError, yaml.parser.ParserError):
self.raise_and_log_error(ConfigError, 'error parsing config.')
def dump_config(self):
"""Pretty print the configuration dict to stdout."""
yaml_content = self.get_merged_config()
print('YAML Configuration\n%s\n' % yaml_content.read())
try:
self.load()
print('Python Configuration\n%s\n' % pretty(self.yamldocs))
except ConfigError:
sys.stderr.write(
'config parse error. try running with --logfile=/dev/tty\n')
raise
def install_key_data(self, keydata, target):
"""Install the key data into the open file."""
target.seek(0)
contents = target.read()
ssh_opts = 'no-port-forwarding'
if keydata in contents:
raise InstallError('key data already in file - refusing '
'to double-install.\n')
command = '%s --run' % self.authprogs_binary
if self.logfile:
command += ' --logfile=%s' % self.logfile
if self.keyname:
command += ' --keyname=%s' % self.keyname
target.write('command="%(command)s",%(ssh_opts)s %(keydata)s\n' %
{'command': command,
'keydata': keydata,
'ssh_opts': ssh_opts})
def install_key(self, keyfile, authorized_keys):
"""Install a key into the authorized_keys file."""
# Make the directory containing the authorized_keys
# file, if it doesn't exist. (Typically ~/.ssh).
# Ignore errors; we'll fail shortly if we can't
# create the authkeys file.
try:
os.makedirs(os.path.dirname(authorized_keys), 0o700)
except OSError:
pass
keydata = open(keyfile).read()
target_fd = os.open(authorized_keys, os.O_RDWR | os.O_CREAT, 0o600)
self.install_key_data(keydata, os.fdopen(target_fd, 'w+'))
def find_match_scp(self, rule): # pylint: disable-msg=R0911,R0912
"""Handle scp commands."""
orig_list = []
orig_list.extend(self.original_command_list)
binary = orig_list.pop(0)
allowed_binaries = ['scp', '/usr/bin/scp']
if binary not in allowed_binaries:
self.logdebug('skipping scp processing - binary "%s" '
'not in approved list.\n' % binary)
return
filepath = orig_list.pop()
arguments = orig_list
if '-f' in arguments:
if not rule.get('allow_download'):
self.logdebug('scp denied - downloading forbidden.\n')
return
if '-t' in arguments:
if not rule.get('allow_upload'):
self.log('scp denied - uploading forbidden.\n')
return
if '-r' in arguments:
if not rule.get('allow_recursion'):
self.log('scp denied - recursive transfers forbidden.\n')
return
if '-p' in arguments:
if not rule.get('allow_permissions', 'true'):
self.log('scp denied - set/getting permissions '
'forbidden.\n')
return
if rule.get('files'):
files = rule.get('files')
if not isinstance(files, list):
files = [files]
if filepath not in files:
self.log('scp denied - file "%s" - not in approved '
'list %s\n' % (filepath, files))
return
# Allow it!
return {'command': self.original_command_list}
def find_match_command(self, rule):
"""Return a matching (possibly munged) command, if found in rule."""
command_string = rule['command']
command_list = command_string.split()
self.logdebug('comparing "%s" to "%s"\n' %
(command_list, self.original_command_list))
if rule.get('allow_trailing_args'):
self.logdebug('allow_trailing_args is true - comparing initial '
'list.\n')
# Verify the initial arguments are all the same
if (self.original_command_list[:len(command_list)] ==
command_list):
self.logdebug('initial list is same\n')
return {'command': self.original_command_list}
else:
self.logdebug('initial list is not same\n')
elif rule.get('pcre_match'):
if re.search(command_string, self.original_command_string):
return {'command': self.original_command_list}
elif command_list == self.original_command_list:
return {'command': command_list}
def find_match(self):
"""Load the config and find a matching rule.
returns the results of find_match_command, a dict of
the command and (in the future) other metadata.
"""
self.load()
for yamldoc in self.yamldocs:
self.logdebug('\nchecking rule """%s"""\n' % yamldoc)
if not yamldoc:
continue
if not self.check_client_ip(yamldoc):
# Rejected - Client IP does not match
continue
if not self.check_keyname(yamldoc):
# Rejected - keyname does not match
continue
rules = yamldoc.get('allow')
if not isinstance(rules, list):
rules = [rules]
for rule in rules:
rule_type = rule.get('rule_type', 'command')
if rule_type == 'command':
sub = self.find_match_command
elif rule_type == 'scp':
sub = self.find_match_scp
else:
self.log('fatal: no such rule_type "%s"\n' % rule_type)
self.raise_and_log_error(ConfigError,
'error parsing config.')
match = sub(rule)
if match:
return match
# No matches, time to give up.
raise CommandRejected('command "%s" denied.' %
self.original_command_string)
def exec_command(self):
"""Glean the command to run and exec.
On problems, sys.exit.
This method should *never* return.
"""
if not self.original_command_string:
raise SSHEnvironmentError('no SSH command found; '
'interactive shell disallowed.')
command_info = {'from': self.get_client_ip(),
'keyname': self.keyname,
'ssh_original_comand': self.original_command_string,
'time': time.time()}
os.environ['AUTHPROGS_KEYNAME'] = self.keyname
retcode = 126
try:
match = self.find_match()
command_info['command'] = match.get('command')
self.logdebug('find_match returned "%s"\n' % match)
command = match['command']
retcode = subprocess.call(command)
command_info['code'] = retcode
self.log('result: %s\n' % command_info)
sys.exit(retcode)
except (CommandRejected, OSError) as err:
command_info['exception'] = '%s' % err
self.log('result: %s\n' % command_info)
sys.exit(retcode)
if __name__ == '__main__':
sys.exit('This is a library only.')
|
daethnir/authprogs
|
authprogs/authprogs.py
|
AuthProgs.raise_and_log_error
|
python
|
def raise_and_log_error(self, error, message):
self.log('raising %s, traceback %s\n' %
(error, traceback.format_exc()))
raise error(message)
|
Raise error, including message and original traceback.
error: the error to raise
message: the user-facing error message
|
train
|
https://github.com/daethnir/authprogs/blob/0b1e13a609ebeabdb0f10d11fc5dc6e0b20c0343/authprogs/authprogs.py#L128-L136
|
[
"def log(self, message):\n \"\"\"Log information.\"\"\"\n if self.logfh:\n self.logfh.write(message) # pylint: disable-msg=E1103\n"
] |
class AuthProgs(object): # pylint: disable-msg=R0902
"""AuthProgs class"""
def __init__(self, logfile=None, configfile=None,
configdir=None, debug=False, **kwargs):
"""AuthProgs constructor.
kwargs include:
authprogs_binary: path to this binary, when creating
authorized_keys entries.
If not specified, determines from sys.argv[0]
name: the name of this key, for matching in rules.
"""
self.debug = debug
self.logfile = logfile
self.client_ip = None
if logfile:
self.logfh = open(logfile, 'a')
else:
self.logfh = False
if kwargs.get('authprogs_binary'):
self.authprogs_binary = kwargs['authprogs_binary']
else:
self.authprogs_binary = (
os.path.abspath(os.path.abspath(sys.argv[0])))
self.original_command_string = os.environ.get(
'SSH_ORIGINAL_COMMAND', '')
self.original_command_list = self.original_command_string.split()
self.keyname = kwargs.get('keyname')
if not self.keyname:
self.keyname = ''
if ' ' in self.keyname or '\t' in self.keyname:
self.log('FATAL: keyname contains space/tabs\n')
raise Error('--keyname may contain neither spaces nor tabs.')
self.yamldocs = None
self.configfile = configfile
self.configdir = configdir
def get_client_ip(self):
"""Return the client IP from the environment."""
if self.client_ip:
return self.client_ip
try:
client = os.environ.get('SSH_CONNECTION',
os.environ.get('SSH_CLIENT'))
self.client_ip = client.split()[0]
self.logdebug('client_ip: %s\n' % self.client_ip)
return self.client_ip
except:
raise SSHEnvironmentError('cannot identify the ssh client '
'IP address')
def logdebug(self, message):
"""Log debugging information."""
if self.debug:
self.log(message)
def log(self, message):
"""Log information."""
if self.logfh:
self.logfh.write(message) # pylint: disable-msg=E1103
def check_keyname(self, rule):
"""If a key name is specified, verify it is permitted."""
keynames = rule.get('keynames')
if not keynames:
self.logdebug('no keynames requirement.\n')
return True
if not isinstance(keynames, list):
keynames = [keynames]
if self.keyname in keynames:
self.logdebug('keyname "%s" matches rule.\n' % self.keyname)
return True
else:
self.logdebug('keyname "%s" does not match rule.\n' % self.keyname)
return False
def check_client_ip(self, rule):
"""If a client IP is specified, verify it is permitted."""
if not rule.get('from'):
self.logdebug('no "from" requirement.\n')
return True
allow_from = rule.get('from')
if not isinstance(allow_from, list):
allow_from = [allow_from]
client_ip = self.get_client_ip()
if client_ip in allow_from:
self.logdebug('client_ip %s in %s\n' % (client_ip, allow_from))
return True
else:
self.logdebug('client_ip %s not in %s' % (client_ip, allow_from))
return False
def get_merged_config(self):
"""Get merged config file.
Returns an open StringIO containing the
merged config file.
"""
if self.yamldocs:
return
loadfiles = []
if self.configfile:
loadfiles.append(self.configfile)
if self.configdir:
# Gets list of all non-dotfile files from configdir.
loadfiles.extend(
[f for f in
[os.path.join(self.configdir, x) for x in
os.listdir(self.configdir)]
if os.path.isfile(f) and
not os.path.basename(f).startswith('.')])
merged_configfile = io.StringIO()
merged_configfile.write('-\n')
for thefile in loadfiles:
self.logdebug('reading in config file %s\n' % thefile)
merged_configfile.write(open(thefile).read())
merged_configfile.write('\n-\n')
merged_configfile.seek(0)
self.logdebug('merged log file: """\n%s\n"""\n' %
merged_configfile.read())
merged_configfile.seek(0)
return merged_configfile
def load(self):
"""Load our config, log and raise on error."""
try:
merged_configfile = self.get_merged_config()
self.yamldocs = yaml.load(merged_configfile, Loader=Loader)
# Strip out the top level 'None's we get from concatenation.
# Functionally not required, but makes dumps cleaner.
self.yamldocs = [x for x in self.yamldocs if x]
self.logdebug('parsed_rules:\n%s\n' % pretty(self.yamldocs))
except (yaml.scanner.ScannerError, yaml.parser.ParserError):
self.raise_and_log_error(ConfigError, 'error parsing config.')
def dump_config(self):
"""Pretty print the configuration dict to stdout."""
yaml_content = self.get_merged_config()
print('YAML Configuration\n%s\n' % yaml_content.read())
try:
self.load()
print('Python Configuration\n%s\n' % pretty(self.yamldocs))
except ConfigError:
sys.stderr.write(
'config parse error. try running with --logfile=/dev/tty\n')
raise
def install_key_data(self, keydata, target):
"""Install the key data into the open file."""
target.seek(0)
contents = target.read()
ssh_opts = 'no-port-forwarding'
if keydata in contents:
raise InstallError('key data already in file - refusing '
'to double-install.\n')
command = '%s --run' % self.authprogs_binary
if self.logfile:
command += ' --logfile=%s' % self.logfile
if self.keyname:
command += ' --keyname=%s' % self.keyname
target.write('command="%(command)s",%(ssh_opts)s %(keydata)s\n' %
{'command': command,
'keydata': keydata,
'ssh_opts': ssh_opts})
def install_key(self, keyfile, authorized_keys):
"""Install a key into the authorized_keys file."""
# Make the directory containing the authorized_keys
# file, if it doesn't exist. (Typically ~/.ssh).
# Ignore errors; we'll fail shortly if we can't
# create the authkeys file.
try:
os.makedirs(os.path.dirname(authorized_keys), 0o700)
except OSError:
pass
keydata = open(keyfile).read()
target_fd = os.open(authorized_keys, os.O_RDWR | os.O_CREAT, 0o600)
self.install_key_data(keydata, os.fdopen(target_fd, 'w+'))
def find_match_scp(self, rule): # pylint: disable-msg=R0911,R0912
"""Handle scp commands."""
orig_list = []
orig_list.extend(self.original_command_list)
binary = orig_list.pop(0)
allowed_binaries = ['scp', '/usr/bin/scp']
if binary not in allowed_binaries:
self.logdebug('skipping scp processing - binary "%s" '
'not in approved list.\n' % binary)
return
filepath = orig_list.pop()
arguments = orig_list
if '-f' in arguments:
if not rule.get('allow_download'):
self.logdebug('scp denied - downloading forbidden.\n')
return
if '-t' in arguments:
if not rule.get('allow_upload'):
self.log('scp denied - uploading forbidden.\n')
return
if '-r' in arguments:
if not rule.get('allow_recursion'):
self.log('scp denied - recursive transfers forbidden.\n')
return
if '-p' in arguments:
if not rule.get('allow_permissions', 'true'):
self.log('scp denied - set/getting permissions '
'forbidden.\n')
return
if rule.get('files'):
files = rule.get('files')
if not isinstance(files, list):
files = [files]
if filepath not in files:
self.log('scp denied - file "%s" - not in approved '
'list %s\n' % (filepath, files))
return
# Allow it!
return {'command': self.original_command_list}
def find_match_command(self, rule):
"""Return a matching (possibly munged) command, if found in rule."""
command_string = rule['command']
command_list = command_string.split()
self.logdebug('comparing "%s" to "%s"\n' %
(command_list, self.original_command_list))
if rule.get('allow_trailing_args'):
self.logdebug('allow_trailing_args is true - comparing initial '
'list.\n')
# Verify the initial arguments are all the same
if (self.original_command_list[:len(command_list)] ==
command_list):
self.logdebug('initial list is same\n')
return {'command': self.original_command_list}
else:
self.logdebug('initial list is not same\n')
elif rule.get('pcre_match'):
if re.search(command_string, self.original_command_string):
return {'command': self.original_command_list}
elif command_list == self.original_command_list:
return {'command': command_list}
def find_match(self):
"""Load the config and find a matching rule.
returns the results of find_match_command, a dict of
the command and (in the future) other metadata.
"""
self.load()
for yamldoc in self.yamldocs:
self.logdebug('\nchecking rule """%s"""\n' % yamldoc)
if not yamldoc:
continue
if not self.check_client_ip(yamldoc):
# Rejected - Client IP does not match
continue
if not self.check_keyname(yamldoc):
# Rejected - keyname does not match
continue
rules = yamldoc.get('allow')
if not isinstance(rules, list):
rules = [rules]
for rule in rules:
rule_type = rule.get('rule_type', 'command')
if rule_type == 'command':
sub = self.find_match_command
elif rule_type == 'scp':
sub = self.find_match_scp
else:
self.log('fatal: no such rule_type "%s"\n' % rule_type)
self.raise_and_log_error(ConfigError,
'error parsing config.')
match = sub(rule)
if match:
return match
# No matches, time to give up.
raise CommandRejected('command "%s" denied.' %
self.original_command_string)
def exec_command(self):
"""Glean the command to run and exec.
On problems, sys.exit.
This method should *never* return.
"""
if not self.original_command_string:
raise SSHEnvironmentError('no SSH command found; '
'interactive shell disallowed.')
command_info = {'from': self.get_client_ip(),
'keyname': self.keyname,
'ssh_original_comand': self.original_command_string,
'time': time.time()}
os.environ['AUTHPROGS_KEYNAME'] = self.keyname
retcode = 126
try:
match = self.find_match()
command_info['command'] = match.get('command')
self.logdebug('find_match returned "%s"\n' % match)
command = match['command']
retcode = subprocess.call(command)
command_info['code'] = retcode
self.log('result: %s\n' % command_info)
sys.exit(retcode)
except (CommandRejected, OSError) as err:
command_info['exception'] = '%s' % err
self.log('result: %s\n' % command_info)
sys.exit(retcode)
|
daethnir/authprogs
|
authprogs/authprogs.py
|
AuthProgs.get_client_ip
|
python
|
def get_client_ip(self):
if self.client_ip:
return self.client_ip
try:
client = os.environ.get('SSH_CONNECTION',
os.environ.get('SSH_CLIENT'))
self.client_ip = client.split()[0]
self.logdebug('client_ip: %s\n' % self.client_ip)
return self.client_ip
except:
raise SSHEnvironmentError('cannot identify the ssh client '
'IP address')
|
Return the client IP from the environment.
|
train
|
https://github.com/daethnir/authprogs/blob/0b1e13a609ebeabdb0f10d11fc5dc6e0b20c0343/authprogs/authprogs.py#L138-L152
|
[
"def logdebug(self, message):\n \"\"\"Log debugging information.\"\"\"\n if self.debug:\n self.log(message)\n"
] |
class AuthProgs(object): # pylint: disable-msg=R0902
"""AuthProgs class"""
def __init__(self, logfile=None, configfile=None,
configdir=None, debug=False, **kwargs):
"""AuthProgs constructor.
kwargs include:
authprogs_binary: path to this binary, when creating
authorized_keys entries.
If not specified, determines from sys.argv[0]
name: the name of this key, for matching in rules.
"""
self.debug = debug
self.logfile = logfile
self.client_ip = None
if logfile:
self.logfh = open(logfile, 'a')
else:
self.logfh = False
if kwargs.get('authprogs_binary'):
self.authprogs_binary = kwargs['authprogs_binary']
else:
self.authprogs_binary = (
os.path.abspath(os.path.abspath(sys.argv[0])))
self.original_command_string = os.environ.get(
'SSH_ORIGINAL_COMMAND', '')
self.original_command_list = self.original_command_string.split()
self.keyname = kwargs.get('keyname')
if not self.keyname:
self.keyname = ''
if ' ' in self.keyname or '\t' in self.keyname:
self.log('FATAL: keyname contains space/tabs\n')
raise Error('--keyname may contain neither spaces nor tabs.')
self.yamldocs = None
self.configfile = configfile
self.configdir = configdir
def raise_and_log_error(self, error, message):
"""Raise error, including message and original traceback.
error: the error to raise
message: the user-facing error message
"""
self.log('raising %s, traceback %s\n' %
(error, traceback.format_exc()))
raise error(message)
def logdebug(self, message):
"""Log debugging information."""
if self.debug:
self.log(message)
def log(self, message):
"""Log information."""
if self.logfh:
self.logfh.write(message) # pylint: disable-msg=E1103
def check_keyname(self, rule):
"""If a key name is specified, verify it is permitted."""
keynames = rule.get('keynames')
if not keynames:
self.logdebug('no keynames requirement.\n')
return True
if not isinstance(keynames, list):
keynames = [keynames]
if self.keyname in keynames:
self.logdebug('keyname "%s" matches rule.\n' % self.keyname)
return True
else:
self.logdebug('keyname "%s" does not match rule.\n' % self.keyname)
return False
def check_client_ip(self, rule):
"""If a client IP is specified, verify it is permitted."""
if not rule.get('from'):
self.logdebug('no "from" requirement.\n')
return True
allow_from = rule.get('from')
if not isinstance(allow_from, list):
allow_from = [allow_from]
client_ip = self.get_client_ip()
if client_ip in allow_from:
self.logdebug('client_ip %s in %s\n' % (client_ip, allow_from))
return True
else:
self.logdebug('client_ip %s not in %s' % (client_ip, allow_from))
return False
def get_merged_config(self):
"""Get merged config file.
Returns an open StringIO containing the
merged config file.
"""
if self.yamldocs:
return
loadfiles = []
if self.configfile:
loadfiles.append(self.configfile)
if self.configdir:
# Gets list of all non-dotfile files from configdir.
loadfiles.extend(
[f for f in
[os.path.join(self.configdir, x) for x in
os.listdir(self.configdir)]
if os.path.isfile(f) and
not os.path.basename(f).startswith('.')])
merged_configfile = io.StringIO()
merged_configfile.write('-\n')
for thefile in loadfiles:
self.logdebug('reading in config file %s\n' % thefile)
merged_configfile.write(open(thefile).read())
merged_configfile.write('\n-\n')
merged_configfile.seek(0)
self.logdebug('merged log file: """\n%s\n"""\n' %
merged_configfile.read())
merged_configfile.seek(0)
return merged_configfile
def load(self):
"""Load our config, log and raise on error."""
try:
merged_configfile = self.get_merged_config()
self.yamldocs = yaml.load(merged_configfile, Loader=Loader)
# Strip out the top level 'None's we get from concatenation.
# Functionally not required, but makes dumps cleaner.
self.yamldocs = [x for x in self.yamldocs if x]
self.logdebug('parsed_rules:\n%s\n' % pretty(self.yamldocs))
except (yaml.scanner.ScannerError, yaml.parser.ParserError):
self.raise_and_log_error(ConfigError, 'error parsing config.')
def dump_config(self):
"""Pretty print the configuration dict to stdout."""
yaml_content = self.get_merged_config()
print('YAML Configuration\n%s\n' % yaml_content.read())
try:
self.load()
print('Python Configuration\n%s\n' % pretty(self.yamldocs))
except ConfigError:
sys.stderr.write(
'config parse error. try running with --logfile=/dev/tty\n')
raise
def install_key_data(self, keydata, target):
"""Install the key data into the open file."""
target.seek(0)
contents = target.read()
ssh_opts = 'no-port-forwarding'
if keydata in contents:
raise InstallError('key data already in file - refusing '
'to double-install.\n')
command = '%s --run' % self.authprogs_binary
if self.logfile:
command += ' --logfile=%s' % self.logfile
if self.keyname:
command += ' --keyname=%s' % self.keyname
target.write('command="%(command)s",%(ssh_opts)s %(keydata)s\n' %
{'command': command,
'keydata': keydata,
'ssh_opts': ssh_opts})
def install_key(self, keyfile, authorized_keys):
"""Install a key into the authorized_keys file."""
# Make the directory containing the authorized_keys
# file, if it doesn't exist. (Typically ~/.ssh).
# Ignore errors; we'll fail shortly if we can't
# create the authkeys file.
try:
os.makedirs(os.path.dirname(authorized_keys), 0o700)
except OSError:
pass
keydata = open(keyfile).read()
target_fd = os.open(authorized_keys, os.O_RDWR | os.O_CREAT, 0o600)
self.install_key_data(keydata, os.fdopen(target_fd, 'w+'))
def find_match_scp(self, rule): # pylint: disable-msg=R0911,R0912
"""Handle scp commands."""
orig_list = []
orig_list.extend(self.original_command_list)
binary = orig_list.pop(0)
allowed_binaries = ['scp', '/usr/bin/scp']
if binary not in allowed_binaries:
self.logdebug('skipping scp processing - binary "%s" '
'not in approved list.\n' % binary)
return
filepath = orig_list.pop()
arguments = orig_list
if '-f' in arguments:
if not rule.get('allow_download'):
self.logdebug('scp denied - downloading forbidden.\n')
return
if '-t' in arguments:
if not rule.get('allow_upload'):
self.log('scp denied - uploading forbidden.\n')
return
if '-r' in arguments:
if not rule.get('allow_recursion'):
self.log('scp denied - recursive transfers forbidden.\n')
return
if '-p' in arguments:
if not rule.get('allow_permissions', 'true'):
self.log('scp denied - set/getting permissions '
'forbidden.\n')
return
if rule.get('files'):
files = rule.get('files')
if not isinstance(files, list):
files = [files]
if filepath not in files:
self.log('scp denied - file "%s" - not in approved '
'list %s\n' % (filepath, files))
return
# Allow it!
return {'command': self.original_command_list}
def find_match_command(self, rule):
"""Return a matching (possibly munged) command, if found in rule."""
command_string = rule['command']
command_list = command_string.split()
self.logdebug('comparing "%s" to "%s"\n' %
(command_list, self.original_command_list))
if rule.get('allow_trailing_args'):
self.logdebug('allow_trailing_args is true - comparing initial '
'list.\n')
# Verify the initial arguments are all the same
if (self.original_command_list[:len(command_list)] ==
command_list):
self.logdebug('initial list is same\n')
return {'command': self.original_command_list}
else:
self.logdebug('initial list is not same\n')
elif rule.get('pcre_match'):
if re.search(command_string, self.original_command_string):
return {'command': self.original_command_list}
elif command_list == self.original_command_list:
return {'command': command_list}
def find_match(self):
"""Load the config and find a matching rule.
returns the results of find_match_command, a dict of
the command and (in the future) other metadata.
"""
self.load()
for yamldoc in self.yamldocs:
self.logdebug('\nchecking rule """%s"""\n' % yamldoc)
if not yamldoc:
continue
if not self.check_client_ip(yamldoc):
# Rejected - Client IP does not match
continue
if not self.check_keyname(yamldoc):
# Rejected - keyname does not match
continue
rules = yamldoc.get('allow')
if not isinstance(rules, list):
rules = [rules]
for rule in rules:
rule_type = rule.get('rule_type', 'command')
if rule_type == 'command':
sub = self.find_match_command
elif rule_type == 'scp':
sub = self.find_match_scp
else:
self.log('fatal: no such rule_type "%s"\n' % rule_type)
self.raise_and_log_error(ConfigError,
'error parsing config.')
match = sub(rule)
if match:
return match
# No matches, time to give up.
raise CommandRejected('command "%s" denied.' %
self.original_command_string)
def exec_command(self):
"""Glean the command to run and exec.
On problems, sys.exit.
This method should *never* return.
"""
if not self.original_command_string:
raise SSHEnvironmentError('no SSH command found; '
'interactive shell disallowed.')
command_info = {'from': self.get_client_ip(),
'keyname': self.keyname,
'ssh_original_comand': self.original_command_string,
'time': time.time()}
os.environ['AUTHPROGS_KEYNAME'] = self.keyname
retcode = 126
try:
match = self.find_match()
command_info['command'] = match.get('command')
self.logdebug('find_match returned "%s"\n' % match)
command = match['command']
retcode = subprocess.call(command)
command_info['code'] = retcode
self.log('result: %s\n' % command_info)
sys.exit(retcode)
except (CommandRejected, OSError) as err:
command_info['exception'] = '%s' % err
self.log('result: %s\n' % command_info)
sys.exit(retcode)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.