text stringlengths 0 1.05M | meta dict |
|---|---|
__author__ = 'Bohdan Mushkevych'
from odm.document import BaseDocument
from odm.fields import StringField, ObjectIdField
PROCESS_NAME = 'process_name' # name of the process to handle the schedulables
ENTRY_NAME = 'entry_name' # name of the schedulable, if applicable
UNIT_OF_WORK_ID = 'unit_of_work_id' # associated Unit Of Work, if applicable
class SynergyMqTransmission(BaseDocument):
""" Non-persistent model. Instance of this class presents either:
- single request from Synergy Scheduler to any worker
- response/report from the worker to the Synergy Scheduler """
process_name = StringField(PROCESS_NAME)
entry_name = StringField(ENTRY_NAME)
unit_of_work_id = ObjectIdField(UNIT_OF_WORK_ID)
@BaseDocument.key.getter
def key(self):
return self.process_name, self.entry_name
@key.setter
def key(self, value):
if not isinstance(value, str):
self.process_name = value[0]
self.entry_name = value[1]
else:
self.process_name = value
self.entry_name = None
def __str__(self):
return '%s::%s#%s' % (self.process_name, self.entry_name, self.unit_of_work_id)
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/db/model/synergy_mq_transmission.py",
"copies": "1",
"size": "1216",
"license": "bsd-3-clause",
"hash": 6944137627305164000,
"line_mean": 34.7647058824,
"line_max": 88,
"alpha_frac": 0.6587171053,
"autogenerated": false,
"ratio": 3.6407185628742513,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4799435668174251,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from odm.errors import FieldDoesNotExist, ValidationError
from odm.fields import NestedDocumentField, BaseField
class BaseDocument(object):
def __init__(self, **values):
"""
:param values: list of <Document's attribute name>-<attribute value> pairs
"""
self._fields = self._get_fields()
self._attributes = self._get_attributes()
self._data = dict()
for field_name in values.keys():
if field_name not in self._attributes:
msg = f"The attribute '{field_name}' is not present in document type '{self.__class__.__name__}'"
raise FieldDoesNotExist(msg)
field = self._attributes[field_name]
field.__set__(self, values[field_name])
def __delattr__(self, name):
"""Handle deletions of fields"""
if name not in self._attributes:
super(BaseDocument, self).__delattr__(name)
else:
field = self._attributes[name]
if not field.initialized(self):
# do not delete non-initialized field
pass
elif field.null or field.default is not None:
setattr(self, name, field.default)
else:
# field.null is False and field.default is None
field.__delete__(self)
def __setattr__(self, name, value):
super(BaseDocument, self).__setattr__(name, value)
def __iter__(self):
return iter(self._fields)
def __getitem__(self, name):
""" Dictionary-style field getter.
:param name: name of the field (not the name of the Document's attribute)
:return: field value if present
:raise KeyError if the given name is not among known field_names
"""
if name not in self._fields:
raise KeyError(name)
field_obj = self._fields[name]
return field_obj.__get__(self, self.__class__)
def __setitem__(self, name, value):
""" Dictionary-style field setter.
:param name: name of the field (not the name of the Document's attribute)
:param value: value to set
:raise KeyError if the given name is not among known field_names
"""
if name not in self._fields:
raise KeyError(name)
field_obj = self._fields[name]
return field_obj.__set__(self, value)
def __delitem__(self, name):
""" Dictionary-style field deleter.
:param name: name of the field (not the name of the Document's attribute)
:raise KeyError if the given name is not among known field_names
"""
if name not in self._fields:
raise KeyError(name)
field_obj = self._fields[name]
return field_obj.__delete__(self)
def __contains__(self, name):
"""
:param name: name of the field (not the name of the Document's attribute)
:return: True if the field is set, False if the field is None or not known
"""
try:
val = self.__getitem__(name)
return val is not None
except (KeyError, AttributeError):
return False
def __len__(self):
return len(self._data)
def __repr__(self):
try:
u = self.__str__()
except (UnicodeEncodeError, UnicodeDecodeError):
u = '[Bad Unicode data]'
repr_type = str if u is None else type(u)
return repr_type(f'<{self.__class__.__name__}: {u}>')
def __str__(self):
return f'{self.__class__.__name__} object'
def __eq__(self, other):
if self is other:
return True
if isinstance(other, self.__class__):
try:
return self.key == other.key
except NotImplementedError:
pass
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.key)
@property
def key(self):
if isinstance(self.key_fields(), str):
return self[self.key_fields()]
elif isinstance(self.key_fields(), (list, tuple)):
key_list = list()
for field_name in self.key_fields():
key_list.append(self[field_name])
return tuple(key_list)
else:
raise TypeError('classmethod {0}.key_fields of type {1} is not of supported types: list, tuple'.
format(self.__class__.__name__, type(self.key_fields())))
@key.setter
def key(self, value):
if isinstance(self.key_fields(), str):
self[self.key_fields()] = value
elif isinstance(self.key_fields(), (list, tuple)):
for i, field_name in enumerate(self.key_fields()):
self[field_name] = value[i]
else:
raise TypeError('classmethod {0}.key_fields of type {1} is not of supported types: list, tuple'.
format(self.__class__.__name__, type(self.key_fields())))
@classmethod
def key_fields(cls):
raise NotImplementedError(f'classmethod {cls.__name__}.key_fields is not implemented')
@property
def document(self):
return self.to_json()
def validate(self):
"""Ensure that all fields' values are valid and that non-nullable fields are present. """
for field_name, field_obj in self._fields.items():
value = field_obj.__get__(self, self.__class__)
if value is None and field_obj.null is False:
raise ValidationError(f'Non-nullable field {field_name} is set to None')
elif value is None and field_obj.null is True:
# no further validations are possible on NoneType field
continue
if isinstance(field_obj, NestedDocumentField):
value.validate()
else:
field_obj.validate(value)
def to_json(self):
"""Converts given document to JSON dict. """
json_data = dict()
for field_name, field_obj in self._fields.items():
if isinstance(field_obj, NestedDocumentField):
nested_document = field_obj.__get__(self, self.__class__)
value = None if nested_document is None else nested_document.to_json()
elif isinstance(field_obj, BaseField):
value = field_obj.__get__(self, self.__class__)
value = field_obj.to_json(value)
else:
# ignore fields not derived from BaseField or NestedDocument
continue
if value is None:
# skip fields with None value
continue
json_data[field_name] = value
return json_data
@classmethod
def _get_fields(cls):
_fields = dict()
for field_name in dir(cls):
field_obj = getattr(cls, field_name)
if isinstance(field_obj, BaseField):
_fields[field_obj.name] = field_obj
else:
continue
return _fields
@classmethod
def _get_attributes(cls):
_attributes = dict()
for attribute_name in dir(cls):
attribute_obj = getattr(cls, attribute_name)
if isinstance(attribute_obj, BaseField):
_attributes[attribute_name] = attribute_obj
else:
continue
return _attributes
@classmethod
def _get_ordered_field_names(cls):
_fields = cls._get_fields()
return [f[1].name for f in sorted(_fields.items(), key=lambda entry: entry[1].creation_counter)]
@classmethod
def from_json(cls, json_data):
""" Converts json data to a new document instance"""
new_instance = cls()
for field_name, field_obj in cls._get_fields().items():
if isinstance(field_obj, NestedDocumentField):
if field_name in json_data:
nested_field = field_obj.__get__(new_instance, new_instance.__class__)
if not nested_field:
# here, we have to create an instance of the nested document,
# since we have a JSON object for it
nested_field = field_obj.nested_klass()
nested_document = nested_field.from_json(json_data[field_name])
field_obj.__set__(new_instance, nested_document)
elif isinstance(field_obj, BaseField):
if field_name in json_data:
value = field_obj.from_json(json_data[field_name])
field_obj.__set__(new_instance, value)
else:
continue
return new_instance
| {
"repo_name": "mushkevych/synergy_odm",
"path": "odm/document.py",
"copies": "1",
"size": "8785",
"license": "bsd-3-clause",
"hash": -7367159053698851000,
"line_mean": 35.6041666667,
"line_max": 113,
"alpha_frac": 0.5556061468,
"autogenerated": false,
"ratio": 4.457128361237951,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5512734508037951,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from odm.errors import ValidationError
from synergy.conf import settings, context
from synergy.db.dao.unit_of_work_dao import UnitOfWorkDao, QUERY_GET_FLOW_FREERUNS
from synergy.db.dao.log_recording_dao import LogRecordingDao
from synergy.db.model.unit_of_work import TYPE_MANAGED, TYPE_FREERUN
from synergy.db.model.freerun_process_entry import freerun_context_entry, build_schedulable_name, split_schedulable_name
from synergy.mx.base_request_handler import BaseRequestHandler, valid_action_request, safe_json_response
from synergy.scheduler.scheduler_constants import STATE_MACHINE_FREERUN
from synergy.system import time_helper
from werkzeug.utils import cached_property
from flow.conf import flows
from flow.core.execution_context import ExecutionContext
from flow.db.dao.flow_dao import FlowDao
from flow.db.dao.step_dao import StepDao
from flow.flow_constants import *
from flow.mx.rest_model_factory import *
RESPONSE_OK = {'response': 'OK'}
RESPONSE_NOT_OK = {'response': 'Job is not finished'}
def flow_schedulable_name(process_name, flow_name, step_name):
""" schedulable_name is used as freerun `uow.process_name`
replicates format of the `FreerunProcessEntry.schedulable_name` with an assumption
that entry_name is generated by `build_schedulable_name` """
entry_name = build_schedulable_name(flow_name, step_name)
return build_schedulable_name(process_name, entry_name)
def find_flow_step_uow(uow_dao, process_name, flow_name, step_name, timeperiod):
""" Unless created manually, FreerunProcessEntries for workflow steps are run-time only objects
i.e. they disappear on Synergy Scheduler restart;
method tries to fetch UOW associated with key <process_name::workflow_name::step_name + timeperiod>"""
try:
schedulable_name = flow_schedulable_name(process_name, flow_name, step_name)
uow = uow_dao.get_by_params(process_name=schedulable_name, timeperiod=timeperiod, start_id=0, end_id=0)
except:
uow = None
return uow
def find_all_flow_uows(uow_dao, process_name, flow_name, timeperiod):
schedulable_stem = flow_schedulable_name(process_name, flow_name, '')
query = QUERY_GET_FLOW_FREERUNS(schedulable_stem, timeperiod)
try:
records_list = uow_dao.run_query(query)
except:
records_list = list()
return records_list
class FlowRequest(object):
def __init__(self, process_name, flow_name, step_name, run_mode, timeperiod, start_timeperiod, end_timeperiod):
self.process_name = process_name
self.flow_name = flow_name
self.step_name = step_name
self.run_mode = run_mode
self.timeperiod = timeperiod
self.start_timeperiod = start_timeperiod
self.end_timeperiod = end_timeperiod
@property
def schedulable_name(self):
return flow_schedulable_name(self.process_name, self.flow_name, self.step_name)
@property
def arguments(self):
return {
ARGUMENT_FLOW_NAME: self.flow_name,
ARGUMENT_STEP_NAME: self.step_name,
ARGUMENT_RUN_MODE: self.run_mode
}
class FlowActionHandler(BaseRequestHandler):
def __init__(self, request, **values):
super(FlowActionHandler, self).__init__(request, **values)
self.flow_dao = FlowDao(self.logger)
self.step_dao = StepDao(self.logger)
self.uow_dao = UnitOfWorkDao(self.logger)
self.log_recording_dao = LogRecordingDao(self.logger)
self.process_name = self.request_arguments.get(ARGUMENT_PROCESS_NAME)
self.unit_of_work_type = self.request_arguments.get(ARGUMENT_UNIT_OF_WORK_TYPE, TYPE_MANAGED)
self.flow_name = self.request_arguments.get(ARGUMENT_FLOW_NAME)
if not self.flow_name and self.process_name:
process_entry = context.process_context[self.process_name]
self.flow_name = process_entry.arguments.get(ARGUMENT_FLOW_NAME)
self.step_name = self.request_arguments.get(ARGUMENT_STEP_NAME)
self.timeperiod = self.request_arguments.get(ARGUMENT_TIMEPERIOD)
self.is_request_valid = True if self.flow_name \
and self.flow_name in flows.flows \
and self.timeperiod \
else False
if self.is_request_valid:
self.flow_name = self.flow_name.strip()
self.timeperiod = self.timeperiod.strip()
self.run_mode = self.request_arguments.get(ARGUMENT_RUN_MODE, '')
self.run_mode = self.run_mode.strip()
def _get_tree_node(self):
tree = self.scheduler.timetable.get_tree(self.process_name)
if tree is None:
raise UserWarning('No Timetable tree is registered for process {0}'.format(self.process_name))
time_qualifier = context.process_context[self.process_name].time_qualifier
self.timeperiod = time_helper.cast_to_time_qualifier(time_qualifier, self.timeperiod)
node = tree.get_node(self.process_name, self.timeperiod)
return node
@property
def job_record(self):
node = self._get_tree_node()
return node.job_record
@property
def managed_uow_record(self):
node = self._get_tree_node()
uow_id = node.job_record.related_unit_of_work
if not uow_id:
return None
return self.uow_dao.get_one(uow_id)
@property
def freerun_uow_records(self):
valid_freerun_uow = list()
records_list = find_all_flow_uows(self.uow_dao, self.process_name, self.flow_name, self.timeperiod)
if len(records_list) == 0:
self.logger.warning('MX: no Freerun UOW records found for {0}@{1} ~> {2}.'
.format(self.process_name, self.timeperiod, self.flow_name))
return valid_freerun_uow
for uow_record in records_list:
# freerun uow.process_name is a composite in format <process_name::entry_name>
handler_key = split_schedulable_name(uow_record.process_name)
if handler_key not in self.scheduler.freerun_handlers:
# skip UOW records that have no active freerun handler
continue
valid_freerun_uow.append(uow_record)
return valid_freerun_uow
@property
def freerun_process_entry(self):
""" :returns run-time only instance of the FreerunProcessEntry """
entry_name = build_schedulable_name(self.flow_name, self.step_name)
handler_key = (self.process_name, entry_name)
if handler_key not in self.scheduler.freerun_handlers:
classname = context.process_context[self.process_name].classname
entry = freerun_context_entry(
process_name=self.process_name,
entry_name=entry_name,
classname=classname,
token=entry_name,
trigger_frequency='every {0}'.format(SECONDS_IN_CENTURY),
is_on=False,
description='Runtime freerun object to facilitate CUSTOM RUN MODES for workflow'
)
# find uow for workflow step if any
uow = find_flow_step_uow(self.uow_dao, self.process_name, self.flow_name, self.step_name, self.timeperiod)
if uow:
entry.related_unit_of_work = uow.db_id
self.scheduler.freerun_handlers[handler_key] = entry
return self.scheduler.freerun_handlers[handler_key]
@property
def flow_record(self):
return self.flow_dao.get_one([self.flow_name, self.timeperiod])
@property
def step_record(self):
return self.step_dao.get_one([self.flow_name, self.step_name, self.timeperiod])
@property
def flow_graph_obj(self):
_flow_graph_obj = copy.deepcopy(flows.flows[self.flow_name])
_flow_graph_obj.context = ExecutionContext(self.flow_name, self.timeperiod, None, None, settings.settings)
try:
flow_entry = self.flow_dao.get_one([self.flow_name, self.timeperiod])
_flow_graph_obj.context.flow_entry = flow_entry
_flow_graph_obj.context.start_timeperiod = flow_entry.start_timeperiod
_flow_graph_obj.context.end_timeperiod = flow_entry.end_timeperiod
steps = self.step_dao.get_all_by_flow_id(flow_entry.db_id)
for s in steps:
assert isinstance(s, Step)
_flow_graph_obj[s.step_name].step_entry = s
_flow_graph_obj.yielded.append(s)
except LookupError:
pass
return _flow_graph_obj
@cached_property
@valid_action_request
def flow_details(self):
rest_model = create_rest_flow(self.flow_graph_obj)
return rest_model.document
@cached_property
def active_run_mode(self):
return self.flow_dao.managed_run_mode(self.process_name, self.flow_name, self.timeperiod)
@cached_property
@valid_action_request
def step_details(self):
graph_node_obj = self.flow_graph_obj._dict[self.step_name]
rest_model = create_rest_step(graph_node_obj)
return rest_model.document
@valid_action_request
def set_run_mode(self):
"""
- set a flag for ProcessEntry.arguments[ARGUMENT_RUN_MODE] = RUN_MODE_RECOVERY
- trigger standard reprocessing
"""
if not self.job_record or not self.run_mode:
return RESPONSE_NOT_OK
try:
msg = 'MX: setting RUN MODE for {0}@{1} ~> {2} to {3}' \
.format(self.process_name, self.timeperiod, self.flow_name, self.run_mode)
self.scheduler.timetable.add_log_entry(self.process_name, self.timeperiod, msg)
self.logger.info(msg + ' {')
local_record = self.flow_record
local_record.run_mode = self.run_mode
self.flow_dao.update(local_record)
return RESPONSE_OK
except (ValidationError, LookupError):
return RESPONSE_NOT_OK
finally:
self.logger.info('}')
def perform_freerun_action(self, run_mode):
"""
- make sure that the job is finished
i.e. the job is in [STATE_NOOP, STATE_PROCESSED, STATE_SKIPPED]
- submit a FREERUN UOW for given (process_name::flow_name::step_name, timeperiod)
:return RESPONSE_OK if the UOW was submitted and RESPONSE_NOT_OK otherwise
"""
if not self.job_record or not self.job_record.is_finished:
return RESPONSE_NOT_OK
uow = self.managed_uow_record
if not uow:
# for skipped job that has no UOW associated with it
return RESPONSE_NOT_OK
flow_request = FlowRequest(self.process_name, self.flow_name, self.step_name,
run_mode,
self.timeperiod, uow.start_timeperiod, uow.end_timeperiod)
state_machine = self.scheduler.timetable.state_machines[STATE_MACHINE_FREERUN]
state_machine.manage_schedulable(self.freerun_process_entry, flow_request)
return RESPONSE_OK
@valid_action_request
def run_one_step(self):
return self.perform_freerun_action(RUN_MODE_RUN_ONE)
@valid_action_request
def run_from_step(self):
"""
- make sure that the job is finished
i.e. the job is in [STATE_NOOP, STATE_PROCESSED, STATE_SKIPPED]
- submit a FREERUN UOW for given (process_name::flow_name::step_name, timeperiod)
:return RESPONSE_OK if the UOW was submitted and RESPONSE_NOT_OK otherwise
"""
return self.perform_freerun_action(RUN_MODE_RUN_FROM)
@valid_action_request
@safe_json_response
def get_step_log(self):
try:
resp = self.log_recording_dao.get_one(self.step_record.db_id).document
except (TypeError, LookupError):
resp = {'response': 'no related step log'}
return resp
@valid_action_request
@safe_json_response
def get_flow_log(self):
try:
resp = self.log_recording_dao.get_one(self.flow_record.db_id).document
except (TypeError, LookupError):
resp = {'response': 'no related workflow log'}
return resp
| {
"repo_name": "mushkevych/synergy_flow",
"path": "flow/mx/flow_action_handler.py",
"copies": "1",
"size": "12292",
"license": "bsd-3-clause",
"hash": 7601558784633987000,
"line_mean": 40.2483221477,
"line_max": 120,
"alpha_frac": 0.6449723397,
"autogenerated": false,
"ratio": 3.5815850815850814,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47265574212850814,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from odm.fields import BooleanField, StringField, DictField, ListField, NestedDocumentField
from odm.document import BaseDocument
from synergy.db.model.job import Job
from synergy.db.model.managed_process_entry import ManagedProcessEntry
from synergy.db.model.freerun_process_entry import FreerunProcessEntry
from synergy.db.model.timetable_tree_entry import TimetableTreeEntry
class RestFreerunSchedulerEntry(FreerunProcessEntry):
is_alive = BooleanField()
next_run_in = StringField()
class RestManagedSchedulerEntry(ManagedProcessEntry):
is_alive = BooleanField()
next_run_in = StringField()
next_timeperiod = StringField()
reprocessing_queue = ListField()
class RestTimetableTree(TimetableTreeEntry):
dependant_trees = ListField()
sorted_process_names = ListField()
processes = DictField()
class RestJob(Job):
time_qualifier = StringField()
number_of_children = StringField()
class RestTimetableTreeNode(BaseDocument):
node = NestedDocumentField(RestJob, null=True)
children = DictField()
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/mx/rest_model.py",
"copies": "1",
"size": "1089",
"license": "bsd-3-clause",
"hash": -8426756871654416000,
"line_mean": 28.4324324324,
"line_max": 91,
"alpha_frac": 0.7731864096,
"autogenerated": false,
"ratio": 3.6915254237288138,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49647118333288137,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from odm.fields import ObjectIdField, DictField, NestedDocumentField
from db.model.raw_data import *
class NestedStat(BaseDocument):
number_of_pageviews = IntegerField(NUMBER_OF_PAGEVIEWS, default=0)
total_duration = IntegerField(TOTAL_DURATION, default=0)
number_of_visits = IntegerField(NUMBER_OF_VISITS, default=0)
os = DictField(FAMILY_OS)
browsers = DictField(FAMILY_BROWSERS)
screen_res = DictField(FAMILY_SCREEN_RESOLUTIONS)
languages = DictField(FAMILY_LANGUAGES)
countries = DictField(FAMILY_COUNTRIES)
class SiteStatistics(BaseDocument):
"""
class presents site statistics, such as number of visits per defined period or list of search keywords
"""
db_id = ObjectIdField('_id', null=True)
domain_name = StringField(DOMAIN_NAME)
timeperiod = StringField(TIMEPERIOD)
stat = NestedDocumentField(FAMILY_STAT, NestedStat)
@BaseDocument.key.getter
def key(self):
return self.domain_name, self.timeperiod
@key.setter
def key(self, value):
self.domain_name = value[0]
self.timeperiod = value[1]
| {
"repo_name": "eggsandbeer/scheduler",
"path": "db/model/site_statistics.py",
"copies": "1",
"size": "1138",
"license": "bsd-3-clause",
"hash": 9076762321135584000,
"line_mean": 31.5142857143,
"line_max": 106,
"alpha_frac": 0.716168717,
"autogenerated": false,
"ratio": 3.3970149253731345,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4613183642373134,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from odm.fields import ObjectIdField, NestedDocumentField, DictField
from db.model.raw_data import *
class NestedUserProfile(BaseDocument):
ip = StringField(IP)
os = StringField(OS)
browser = StringField(BROWSER)
language = StringField(LANGUAGE)
country = StringField(COUNTRY)
screen_x = IntegerField(SCREEN_X)
screen_y = IntegerField(SCREEN_Y)
@property
def screen_res(self):
return self.screen_x, self.screen_y
@screen_res.setter
def screen_res(self, value):
self.screen_x = value[0]
self.screen_y = value[1]
class NestedBrowsingHistory(BaseDocument):
total_duration = IntegerField(TOTAL_DURATION, default=0)
number_of_pageviews = IntegerField(NUMBER_OF_PAGEVIEWS, default=0)
number_of_entries = IntegerField(NUMBER_OF_ENTRIES, default=0)
entries_timestamps = DictField(FAMILY_ENTRIES)
def set_entry_timestamp(self, entry_id, value):
if not isinstance(entry_id, basestring):
entry_id = str(entry_id)
self.entries_timestamps[entry_id] = value
def get_entry_timestamp(self, entry_id):
if not isinstance(entry_id, basestring):
entry_id = str(entry_id)
return self.entries_timestamps[entry_id]
class SingleSession(BaseDocument):
"""
class presents statistics, gathered during the life of the session
"""
db_id = ObjectIdField('_id', null=True)
domain_name = StringField(DOMAIN_NAME)
timeperiod = StringField(TIMEPERIOD)
session_id = StringField(SESSION_ID)
user_profile = NestedDocumentField(FAMILY_USER_PROFILE, NestedUserProfile)
browsing_history = NestedDocumentField(FAMILY_BROWSING_HISTORY, NestedBrowsingHistory)
@BaseDocument.key.getter
def key(self):
return self.domain_name, self.timeperiod, self.session_id
@key.setter
def key(self, value):
self.domain_name = value[0]
self.timeperiod = value[1]
self.session_id = value[2]
| {
"repo_name": "eggsandbeer/scheduler",
"path": "db/model/single_session.py",
"copies": "1",
"size": "2010",
"license": "bsd-3-clause",
"hash": 2736205072029351400,
"line_mean": 30.9047619048,
"line_max": 90,
"alpha_frac": 0.6890547264,
"autogenerated": false,
"ratio": 3.5957066189624327,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47847613453624327,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from odm.fields import ObjectIdField, NestedDocumentField, DictField
from db.model.raw_data import *
class NestedUserProfile(BaseDocument):
ip = StringField()
os = StringField()
browser = StringField()
language = StringField()
country = StringField()
screen_x = IntegerField()
screen_y = IntegerField()
@property
def screen_resolution(self):
return self.screen_x, self.screen_y
@screen_resolution.setter
def screen_resolution(self, value):
self.screen_x = value[0]
self.screen_y = value[1]
class NestedBrowsingHistory(BaseDocument):
total_duration = IntegerField(default=0)
number_of_pageviews = IntegerField(default=0)
number_of_entries = IntegerField(default=0)
entries_timestamps = DictField()
def set_entry_timestamp(self, entry_id, value):
if not isinstance(entry_id, str):
entry_id = str(entry_id)
self.entries_timestamps[entry_id] = value
def get_entry_timestamp(self, entry_id):
if not isinstance(entry_id, str):
entry_id = str(entry_id)
return self.entries_timestamps[entry_id]
class SingleSession(BaseDocument):
"""
class presents statistics, gathered during the life of the session
"""
db_id = ObjectIdField(name='_id', null=True)
domain_name = StringField(name='domain')
timeperiod = StringField()
session_id = StringField()
user_profile = NestedDocumentField(NestedUserProfile)
browsing_history = NestedDocumentField(NestedBrowsingHistory)
@classmethod
def key_fields(cls):
return cls.domain_name.name, cls.timeperiod.name, cls.session_id.name
SESSION_ID = SingleSession.session_id.name
TIMEPERIOD = SingleSession.timeperiod.name
DOMAIN_NAME = SingleSession.domain_name.name
| {
"repo_name": "mushkevych/scheduler",
"path": "db/model/single_session.py",
"copies": "1",
"size": "1836",
"license": "bsd-3-clause",
"hash": 7309532944823311000,
"line_mean": 28.6129032258,
"line_max": 77,
"alpha_frac": 0.6922657952,
"autogenerated": false,
"ratio": 3.809128630705394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 62
} |
__author__ = 'Bohdan Mushkevych'
from odm.fields import StringField, ObjectIdField, BooleanField, IntegerField
from synergy.db.model.daemon_process_entry import DaemonProcessEntry
from synergy.scheduler.scheduler_constants import BLOCKING_CHILDREN, BLOCKING_DEPENDENCIES, BLOCKING_NORMAL, \
EXCHANGE_MANAGED_WORKER, STATE_MACHINE_DISCRETE
class ManagedProcessEntry(DaemonProcessEntry):
""" Class presents single configuration entry for scheduler managed (i.e. - non-freerun) processes """
db_id = ObjectIdField(name='_id', null=True)
source = StringField(null=True)
sink = StringField(null=True)
time_qualifier = StringField()
time_grouping = IntegerField()
trigger_frequency = StringField()
is_on = BooleanField(default=False)
state_machine_name = StringField()
blocking_type = StringField(choices=[BLOCKING_CHILDREN, BLOCKING_DEPENDENCIES, BLOCKING_NORMAL])
@classmethod
def key_fields(cls):
return cls.process_name.name
def managed_context_entry(process_name,
classname,
token,
time_qualifier,
trigger_frequency='every 60',
state_machine_name=STATE_MACHINE_DISCRETE,
is_on=True,
exchange=EXCHANGE_MANAGED_WORKER,
blocking_type=BLOCKING_NORMAL,
present_on_boxes=None,
time_grouping=1,
arguments=None,
queue=None,
routing=None,
source=None,
sink=None,
pid_file=None,
log_file=None):
""" forms process context entry """
_ROUTING_PREFIX = 'routing_'
_QUEUE_PREFIX = 'queue_'
if arguments is not None:
assert isinstance(arguments, dict)
process_entry = ManagedProcessEntry(
process_name=process_name,
trigger_frequency=trigger_frequency,
state_machine_name=state_machine_name,
is_on=is_on,
blocking_type=blocking_type,
classname=classname,
token=token,
source=source,
sink=sink,
mq_queue=queue if queue is not None else _QUEUE_PREFIX + token + time_qualifier,
mq_routing_key=routing if routing is not None else _ROUTING_PREFIX + token + time_qualifier,
mq_exchange=exchange,
present_on_boxes=present_on_boxes,
arguments=arguments if arguments is not None else dict(),
time_qualifier=time_qualifier,
time_grouping=time_grouping,
log_filename=log_file if log_file is not None else token + time_qualifier + '.log',
pid_filename=pid_file if pid_file is not None else token + time_qualifier + '.pid')
return process_entry
PROCESS_NAME = ManagedProcessEntry.process_name.name
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/db/model/managed_process_entry.py",
"copies": "1",
"size": "2957",
"license": "bsd-3-clause",
"hash": 3488465982388605000,
"line_mean": 38.9594594595,
"line_max": 110,
"alpha_frac": 0.6066959757,
"autogenerated": false,
"ratio": 4.310495626822157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5417191602522157,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from odm.fields import StringField, ObjectIdField, BooleanField
from synergy.db.model.daemon_process_entry import DaemonProcessEntry
from synergy.scheduler.scheduler_constants import BLOCKING_CHILDREN, BLOCKING_DEPENDENCIES, BLOCKING_NORMAL, \
EXCHANGE_MANAGED_WORKER, TYPE_MANAGED
PROCESS_NAME = 'process_name'
IS_ON = 'is_on'
RUN_ON_ACTIVE_TIMEPERIOD = 'run_on_active_timeperiod'
TRIGGER_FREQUENCY = 'trigger_frequency'
STATE_MACHINE_NAME = 'state_machine_name'
BLOCKING_TYPE = 'blocking_type'
SOURCE = 'source'
SINK = 'sink'
TIME_QUALIFIER = 'time_qualifier'
class ManagedProcessEntry(DaemonProcessEntry):
""" Class presents single configuration entry for scheduler managed (i.e. - non-freerun) processes. """
db_id = ObjectIdField('_id', null=True)
source = StringField(SOURCE)
sink = StringField(SINK)
time_qualifier = StringField(TIME_QUALIFIER)
trigger_frequency = StringField(TRIGGER_FREQUENCY)
is_on = BooleanField(IS_ON, default=False)
run_on_active_timeperiod = BooleanField(RUN_ON_ACTIVE_TIMEPERIOD)
state_machine_name = StringField(STATE_MACHINE_NAME)
blocking_type = StringField(BLOCKING_TYPE, choices=[BLOCKING_CHILDREN, BLOCKING_DEPENDENCIES, BLOCKING_NORMAL])
@DaemonProcessEntry.key.getter
def key(self):
return self.process_name
@DaemonProcessEntry.key.setter
def key(self, value):
self.process_name = value
def managed_context_entry(process_name,
classname,
token,
time_qualifier,
trigger_frequency,
state_machine_name,
is_on=True,
exchange=EXCHANGE_MANAGED_WORKER,
blocking_type=BLOCKING_NORMAL,
present_on_boxes=None,
arguments=None,
queue=None,
routing=None,
process_type=TYPE_MANAGED,
source=None,
sink=None,
pid_file=None,
log_file=None,
run_on_active_timeperiod=False):
""" forms process context entry """
_ROUTING_PREFIX = 'routing_'
_QUEUE_PREFIX = 'queue_'
if queue is None:
queue = _QUEUE_PREFIX + token + time_qualifier
if routing is None:
routing = _ROUTING_PREFIX + token + time_qualifier
if pid_file is None:
pid_file = token + time_qualifier + '.pid'
if log_file is None:
log_file = token + time_qualifier + '.log'
if arguments is None:
arguments = dict()
else:
assert isinstance(arguments, dict)
process_entry = ManagedProcessEntry(
process_name=process_name,
trigger_frequency=trigger_frequency,
state_machine_name=state_machine_name,
is_on=is_on,
blocking_type=blocking_type,
classname=classname,
token=token,
source=source,
sink=sink,
mq_queue=queue,
mq_routing_key=routing,
mq_exchange=exchange,
present_on_boxes=present_on_boxes,
arguments=arguments,
time_qualifier=time_qualifier,
process_type=process_type,
log_filename=log_file,
pid_filename=pid_file,
run_on_active_timeperiod=run_on_active_timeperiod)
return process_entry
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/db/model/managed_process_entry.py",
"copies": "1",
"size": "3501",
"license": "bsd-3-clause",
"hash": 4856946029218550000,
"line_mean": 34.7244897959,
"line_max": 115,
"alpha_frac": 0.6046843759,
"autogenerated": false,
"ratio": 4.014908256880734,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00027437712166776204,
"num_lines": 98
} |
__author__ = 'Bohdan Mushkevych'
from os import path
from azure.storage.blob import BlockBlobService
from flow.core.abstract_filesystem import AbstractFilesystem, splitpath
class AzureBlobFilesystem(AbstractFilesystem):
""" implementation of Azure Page Blob filesystem
https://docs.microsoft.com/en-us/azure/storage/blobs/storage-python-how-to-use-blob-storage#download-and-install-azure-storage-sdk-for-python"""
def __init__(self, logger, context, **kwargs):
super(AzureBlobFilesystem, self).__init__(logger, context, **kwargs)
try:
self.block_blob_service = BlockBlobService(account_name=context.settings['azure_account_name'],
account_key=context.settings['azure_account_key'])
except EnvironmentError as e:
self.logger.error('Azure Credentials are NOT valid. Terminating.', exc_info=True)
raise ValueError(e)
def __del__(self):
pass
def _azure_bucket(self, bucket_name):
if not bucket_name:
bucket_name = self.context.settings['azure_bucket']
return bucket_name
def mkdir(self, uri_path, bucket_name=None, **kwargs):
def _create_folder_file():
folder_key = path.join(root, '{0}_$folder$'.format(folder_name))
if not self.block_blob_service.exists(azure_bucket, folder_key):
self.block_blob_service.create_blob_from_text(azure_bucket, folder_key, '')
azure_bucket = self._azure_bucket(bucket_name)
root = ''
for folder_name in splitpath(uri_path):
root = path.join(root, folder_name)
_create_folder_file()
def rmdir(self, uri_path, bucket_name=None, **kwargs):
azure_bucket = self._azure_bucket(bucket_name)
for key in self.block_blob_service.list_blobs(azure_bucket, prefix='{0}/'.format(uri_path)):
self.block_blob_service.delete_blob(azure_bucket, key)
def rm(self, uri_path, bucket_name=None, **kwargs):
azure_bucket = self._azure_bucket(bucket_name)
self.block_blob_service.delete_blob(azure_bucket, uri_path)
def cp(self, uri_source, uri_target, bucket_name_source=None, bucket_name_target=None, **kwargs):
azure_bucket_source = self._azure_bucket(bucket_name_source)
azure_bucket_target = self._azure_bucket(bucket_name_target)
source_blob_url = self.block_blob_service.make_blob_url(azure_bucket_source, uri_source)
self.block_blob_service.copy_blob(azure_bucket_target, uri_target, source_blob_url)
def mv(self, uri_source, uri_target, bucket_name_source=None, bucket_name_target=None, **kwargs):
self.cp(uri_source, uri_target, bucket_name_source, bucket_name_target, **kwargs)
self.rm(uri_source, bucket_name_source)
def copyToLocal(self, uri_source, uri_target, bucket_name_source=None, **kwargs):
azure_bucket_source = self._azure_bucket(bucket_name_source)
with open(uri_target, 'wb') as file_pointer:
self.block_blob_service.get_blob_to_stream(azure_bucket_source, uri_source, file_pointer)
def copyFromLocal(self, uri_source, uri_target, bucket_name_target=None, **kwargs):
azure_bucket_target = self._azure_bucket(bucket_name_target)
with open(uri_source, 'rb') as file_pointer:
self.block_blob_service.create_blob_from_stream(azure_bucket_target, uri_target, file_pointer)
def exists(self, uri_path, bucket_name=None, exact=False, **kwargs):
azure_bucket = self._azure_bucket(bucket_name)
is_found = self.block_blob_service.exists(azure_bucket, uri_path)
if exact is False and is_found is False:
folder_name = '{0}_$folder$'.format(path.basename(uri_path))
folder_key = path.join(uri_path, folder_name)
is_found = self.block_blob_service.exists(azure_bucket, folder_key)
return is_found
| {
"repo_name": "mushkevych/synergy_flow",
"path": "flow/core/azure_blob_filesystem.py",
"copies": "1",
"size": "3941",
"license": "bsd-3-clause",
"hash": -817618536394164900,
"line_mean": 49.5256410256,
"line_max": 148,
"alpha_frac": 0.6617609744,
"autogenerated": false,
"ratio": 3.5665158371040726,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9717419692289686,
"avg_score": 0.0021714238428775343,
"num_lines": 78
} |
__author__ = 'Bohdan Mushkevych'
from os import path
from google.cloud import storage
from google.cloud.storage import Bucket, Blob
from flow.core.gcp_credentials import gcp_credentials
from flow.core.abstract_filesystem import AbstractFilesystem, splitpath
class GcpFilesystem(AbstractFilesystem):
""" implementation of Google Cloud Platform Filesystem """
def __init__(self, logger, context, **kwargs):
super(GcpFilesystem, self).__init__(logger, context, **kwargs)
try:
service_account_file_uri = self.context.settings.get('gcp_service_account_file')
credentials = gcp_credentials(service_account_file_uri)
self.gcp_client = storage.Client(project=context.settings['gcp_project_name'], credentials=credentials)
except EnvironmentError as e:
self.logger.error('Google Cloud Credentials are NOT valid. Terminating.', exc_info=True)
raise ValueError(e)
def __del__(self):
pass
def _gcp_bucket(self, bucket_name):
if not bucket_name:
bucket_name = self.context.settings['gcp_bucket']
gcp_bucket = self.gcp_client.get_bucket(bucket_name)
return gcp_bucket
def mkdir(self, uri_path, bucket_name=None, **kwargs):
def _create_folder_file():
folder_key = path.join(root, '{0}_$folder$'.format(folder_name))
blob = Blob(folder_key, gcp_bucket)
if not blob.exists():
blob.upload_from_string(data='')
gcp_bucket = self._gcp_bucket(bucket_name)
root = ''
for folder_name in splitpath(uri_path):
root = path.join(root, folder_name)
_create_folder_file()
def rmdir(self, uri_path, bucket_name=None, **kwargs):
gcp_bucket = self._gcp_bucket(bucket_name)
for key in gcp_bucket.list_blobs(prefix='{0}/'.format(uri_path)):
key.delete()
def rm(self, uri_path, bucket_name=None, **kwargs):
self.rmdir(uri_path, bucket_name, **kwargs)
def cp(self, uri_source, uri_target, bucket_name_source=None, bucket_name_target=None, **kwargs):
gcp_bucket_source = self._gcp_bucket(bucket_name_source)
gcp_bucket_target = self._gcp_bucket(bucket_name_target)
prefix = uri_source if self.exists(uri_source, exact=True) else '{0}/'.format(uri_source)
for blob_source in gcp_bucket_source.list_blobs(prefix=prefix):
key_target = blob_source.name.replace(uri_source, uri_target)
Blob(key_target, gcp_bucket_target).rewrite(source=blob_source)
def mv(self, uri_source, uri_target, bucket_name_source=None, bucket_name_target=None, **kwargs):
self.cp(uri_source, uri_target, bucket_name_source, bucket_name_target, **kwargs)
self.rm(uri_source, bucket_name_source)
def copyToLocal(self, uri_source, uri_target, bucket_name_source=None, **kwargs):
gcp_bucket_source = self._gcp_bucket(bucket_name_source)
blob = Blob(uri_source, gcp_bucket_source)
with open(uri_target, 'wb') as file_pointer:
blob.download_to_file(file_pointer)
def copyFromLocal(self, uri_source, uri_target, bucket_name_target=None, **kwargs):
gcp_bucket_target = self._gcp_bucket(bucket_name_target)
blob = Blob(uri_target, gcp_bucket_target)
with open(uri_source, 'rb') as file_pointer:
blob.upload_from_file(file_pointer)
def exists(self, uri_path, bucket_name=None, exact=False, **kwargs):
gcp_bucket = self._gcp_bucket(bucket_name)
is_found = Blob(uri_path, gcp_bucket).exists()
if exact is False and is_found is False:
folder_name = '{0}_$folder$'.format(path.basename(uri_path))
folder_key = path.join(uri_path, folder_name)
is_found = Blob(folder_key, gcp_bucket).exists()
return is_found
| {
"repo_name": "mushkevych/synergy_flow",
"path": "flow/core/gcp_filesystem.py",
"copies": "1",
"size": "3876",
"license": "bsd-3-clause",
"hash": -174343189499324930,
"line_mean": 44.6,
"line_max": 115,
"alpha_frac": 0.6470588235,
"autogenerated": false,
"ratio": 3.598885793871866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9740462914598477,
"avg_score": 0.0010963405546779406,
"num_lines": 85
} |
__author__ = 'Bohdan Mushkevych'
from os import path
import boto3
import boto3.s3
from boto3.exceptions import Boto3Error
from botocore.exceptions import ClientError
from flow.core.abstract_filesystem import AbstractFilesystem, splitpath
class S3Filesystem(AbstractFilesystem):
""" implementation of AWS S3 filesystem """
def __init__(self, logger, context, **kwargs):
super(S3Filesystem, self).__init__(logger, context, **kwargs)
try:
self.s3_resource = boto3.resource(service_name='s3',
aws_access_key_id=context.settings['aws_access_key_id'],
aws_secret_access_key=context.settings['aws_secret_access_key'])
self.s3_client = self.s3_resource.meta.client
except (Boto3Error, ClientError) as e:
self.logger.error('AWS Credentials are NOT valid. Terminating.', exc_info=True)
raise ValueError(e)
def __del__(self):
pass
def _s3_bucket(self, bucket_name):
if not bucket_name:
bucket_name = self.context.settings['aws_s3_bucket']
s3_bucket = self.s3_resource.Bucket(bucket_name)
return s3_bucket
def mkdir(self, uri_path, bucket_name=None, **kwargs):
def _create_folder_file():
folder_key = path.join(root, '{0}_$folder$'.format(folder_name))
try:
self.s3_client.head_object(Bucket=s3_bucket.name, Key=folder_key)
except ClientError:
# Key not found
s3_key = s3_bucket.Object(folder_key)
s3_key.put(Body='')
s3_bucket = self._s3_bucket(bucket_name)
root = ''
for folder_name in splitpath(uri_path):
root = path.join(root, folder_name)
_create_folder_file()
def rmdir(self, uri_path, bucket_name=None, **kwargs):
s3_bucket = self._s3_bucket(bucket_name)
objects_to_delete = [{'Key': uri_path}]
for obj in s3_bucket.objects.filter(Prefix='{0}/'.format(uri_path)):
objects_to_delete.append({'Key': obj.key})
s3_bucket.delete_objects(
Delete={
'Objects': objects_to_delete
}
)
def rm(self, uri_path, bucket_name=None, **kwargs):
self.rmdir(uri_path, bucket_name, **kwargs)
def cp(self, uri_source, uri_target, bucket_name_source=None, bucket_name_target=None, **kwargs):
s3_bucket_source = self._s3_bucket(bucket_name_source)
s3_bucket_target = self._s3_bucket(bucket_name_target)
prefix = uri_source if self.exists(uri_source, exact=True) else '{0}/'.format(uri_source)
for obj in s3_bucket_source.objects.filter(Prefix=prefix):
# replace the prefix
new_key = obj.key.replace(uri_source, uri_target)
new_obj = s3_bucket_target.Object(new_key)
new_obj.copy({'Bucket': obj.bucket_name, 'Key': obj.key})
def mv(self, uri_source, uri_target, bucket_name_source=None, bucket_name_target=None, **kwargs):
self.cp(uri_source, uri_target, bucket_name_source, bucket_name_target, **kwargs)
self.rm(uri_source, bucket_name_source)
def copyToLocal(self, uri_source, uri_target, bucket_name_source=None, **kwargs):
s3_bucket_source = self._s3_bucket(bucket_name_source)
try:
s3_bucket_source.download_file(uri_source, uri_target)
except ClientError as e:
self.logger.error('AWS CopyToLocal Error:.', exc_info=True)
raise ValueError(e)
def copyFromLocal(self, uri_source, uri_target, bucket_name_target=None, **kwargs):
s3_bucket_target = self._s3_bucket(bucket_name_target)
try:
s3_bucket_target.upload_file(uri_source, uri_target)
except ClientError as e:
self.logger.error('AWS CopyFromLocal Error:.', exc_info=True)
raise ValueError(e)
def exists(self, uri_path, bucket_name=None, exact=False, **kwargs):
s3_bucket = self._s3_bucket(bucket_name)
if exact:
keys = [uri_path]
else:
folder_name = '{0}_$folder$'.format(path.basename(uri_path))
folder_key = path.join(uri_path, folder_name)
keys = [uri_path, folder_key]
for key in keys:
try:
self.s3_client.head_object(Bucket=s3_bucket.name, Key=key)
return True
except ClientError:
pass
return False
| {
"repo_name": "mushkevych/synergy_flow",
"path": "flow/core/s3_filesystem.py",
"copies": "1",
"size": "4543",
"license": "bsd-3-clause",
"hash": 3758360006783231500,
"line_mean": 38.850877193,
"line_max": 110,
"alpha_frac": 0.5971824785,
"autogenerated": false,
"ratio": 3.596991290577989,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4694173769077989,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from pymongo import MongoClient, ASCENDING, DESCENDING
from bson.objectid import ObjectId
from synergy.conf import settings
from synergy.db.model.unit_of_work import TIMEPERIOD
QUERY_GET_ALL = {}
if 'ds_factory' not in globals():
# this block defines global variable ds_factory
def factory():
# the only way to implement nonlocal closure variables in Python 2.X
instances = {}
def get_instance(logger):
ds_type = settings.settings['ds_type']
if ds_type not in instances:
if ds_type == "mongo_db":
instances[ds_type] = MongoDbManager(logger)
elif ds_type == "hbase":
instances[ds_type] = HBaseManager(logger)
else:
raise ValueError('Unsupported Data Source type: %s' % ds_type)
return instances[ds_type]
return get_instance
global ds_factory
ds_factory = factory()
class BaseManager(object):
"""
BaseManager holds definition of the Data Source and an interface to read, write, delete and update (CRUD)
models withing the DataSource
"""
def __init__(self, logger):
super(BaseManager, self).__init__()
self.logger = logger
def __str__(self):
raise NotImplementedError('method __str__ must be implemented by {0}'.format(self.__class__.__name__))
def is_alive(self):
""" :return: True if the database server is available. False otherwise """
raise NotImplementedError('method is_alive must be implemented by {0}'.format(self.__class__.__name__))
def get(self, table_name, primary_key):
raise NotImplementedError('method get must be implemented by {0}'.format(self.__class__.__name__))
def filter(self, table_name, query):
raise NotImplementedError('method filter must be implemented by {0}'.format(self.__class__.__name__))
def update(self, table_name, instance):
raise NotImplementedError('method update must be implemented by {0}'.format(self.__class__.__name__))
def delete(self, table_name, primary_key):
raise NotImplementedError('method delete must be implemented by {0}'.format(self.__class__.__name__))
def highest_primary_key(self, table_name, timeperiod_low, timeperiod_high):
raise NotImplementedError('method highest_primary_key must be implemented by {0}'.format(self.__class__.__name__))
def lowest_primary_key(self, table_name, timeperiod_low, timeperiod_high):
raise NotImplementedError('method lowest_primary_key must be implemented by {0}'.format(self.__class__.__name__))
def cursor_for(self,
table_name,
start_id_obj,
end_id_obj,
iteration,
start_timeperiod,
end_timeperiod,
bulk_threshold):
raise NotImplementedError('method cursor_for must be implemented by {0}'.format(self.__class__.__name__))
class MongoDbManager(BaseManager):
def __init__(self, logger):
super(MongoDbManager, self).__init__(logger)
self._db_client = MongoClient(settings.settings['mongodb_host_list'])
self._db = self._db_client[settings.settings['mongo_db_name']]
def __del__(self):
try:
self._db_client.close()
except AttributeError:
pass
def __str__(self):
return 'MongoDbManager: %s@%s' % (settings.settings['mongodb_host_list'], settings.settings['mongo_db_name'])
def is_alive(self):
return self._db_client.alive()
def connection(self, table_name):
return self._db[table_name]
def filter(self, table_name, query):
conn = self._db[table_name]
return conn.find(query)
def delete(self, table_name, primary_key):
conn = self._db[table_name]
return conn.remove(primary_key, safe=True)
def get(self, table_name, primary_key):
query = {'_id': primary_key}
conn = self._db[table_name]
db_entry = conn.find_one(query)
if db_entry is None:
msg = 'Instance with ID=%s was not found' % str(primary_key)
self.logger.warn(msg)
raise LookupError(msg)
return db_entry
def insert(self, table_name, instance):
conn = self._db[table_name]
return conn.insert(instance, safe=True)
def update(self, table_name, instance):
conn = self._db[table_name]
conn.save(instance, safe=True)
def highest_primary_key(self, table_name, timeperiod_low, timeperiod_high):
query = {TIMEPERIOD: {'$gte': timeperiod_low, '$lt': timeperiod_high}}
conn = self._db[table_name]
asc_search = conn.find(spec=query, fields='_id').sort('_id', ASCENDING).limit(1)
if asc_search.count() == 0:
raise LookupError('No messages in timeperiod: %s:%s in collection %s'
% (timeperiod_low, timeperiod_high, table_name))
return asc_search[0]['_id']
def lowest_primary_key(self, table_name, timeperiod_low, timeperiod_high):
query = {TIMEPERIOD: {'$gte': timeperiod_low, '$lt': timeperiod_high}}
conn = self._db[table_name]
dec_search = conn.find(spec=query, fields='_id').sort('_id', DESCENDING).limit(1)
last_object_id = dec_search[0]['_id']
return last_object_id
def cursor_for(self,
table_name,
start_id_obj,
end_id_obj,
iteration,
start_timeperiod,
end_timeperiod,
bulk_threshold):
if not isinstance(start_id_obj, ObjectId):
start_id_obj = ObjectId(start_id_obj)
if not isinstance(end_id_obj, ObjectId):
end_id_obj = ObjectId(end_id_obj)
if iteration == 0:
queue = {'_id': {'$gte': start_id_obj, '$lte': end_id_obj}}
else:
queue = {'_id': {'$gt': start_id_obj, '$lte': end_id_obj}}
if start_timeperiod is not None and end_timeperiod is not None:
# remove all accident objects that may be in [start_id_obj : end_id_obj] range
queue[TIMEPERIOD] = {'$gte': start_timeperiod, '$lt': end_timeperiod}
conn = self._db[table_name]
return conn.find(queue).sort('_id', ASCENDING).limit(bulk_threshold)
class HBaseManager(BaseManager):
pass
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/db/manager/ds_manager.py",
"copies": "1",
"size": "6486",
"license": "bsd-3-clause",
"hash": 767135341522762600,
"line_mean": 36.9298245614,
"line_max": 122,
"alpha_frac": 0.5979031761,
"autogenerated": false,
"ratio": 3.976701410177805,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5074604586277804,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from qds_sdk.qubole import Qubole
from qds_sdk.commands import SparkCommand, PigCommand, ShellCommand
from flow.core.abstract_cluster import AbstractCluster
from flow.core.s3_filesystem import S3Filesystem
def read_file_content(file_uri):
with open(file_uri, mode='r') as py_file:
file_content = py_file.read()
return file_content
class QuboleCluster(AbstractCluster):
""" implementation of the Qubole API """
def __init__(self, name, context, **kwargs):
super(QuboleCluster, self).__init__(name, context, kwargs=kwargs)
self._filesystem = S3Filesystem(self.logger, context, **kwargs)
Qubole.configure(api_token=context.settings['qds_api_token'])
@property
def filesystem(self):
return self._filesystem
def run_pig_step(self, uri_script, **kwargs):
program_body = read_file_content(uri_script)
spark_cmd = PigCommand.run(script=program_body, **kwargs)
self.logger.info('command id: {0}; Status: {1}'.format(spark_cmd.id, spark_cmd.status))
self.logger.info('command result: {0}'.format(spark_cmd.get_results()))
self.logger.info('command log: {0}'.format(spark_cmd.get_log()))
def run_spark_step(self, uri_script, language, **kwargs):
program_body = read_file_content(uri_script)
spark_cmd = SparkCommand.run(program=program_body, language=language, **kwargs)
self.logger.info('command id: {0}; Status: {1}'.format(spark_cmd.id, spark_cmd.status))
self.logger.info('command result: {0}'.format(spark_cmd.get_results()))
self.logger.info('command log: {0}'.format(spark_cmd.get_log()))
def run_hadoop_step(self, uri_script, **kwargs):
raise NotImplementedError('method run_hadoop_step is not yet supported for the Qubole cluster')
def run_shell_command(self, uri_script, **kwargs):
program_body = read_file_content(uri_script)
spark_cmd = ShellCommand.run(script=program_body, **kwargs)
self.logger.info('command id: {0}; Status: {1}'.format(spark_cmd.id, spark_cmd.status))
self.logger.info('command result: {0}'.format(spark_cmd.get_results()))
self.logger.info('command log: {0}'.format(spark_cmd.get_log()))
| {
"repo_name": "mushkevych/synergy_flow",
"path": "flow/core/qubole_cluster.py",
"copies": "1",
"size": "2267",
"license": "bsd-3-clause",
"hash": 7083571059562105000,
"line_mean": 41.7735849057,
"line_max": 103,
"alpha_frac": 0.6735774151,
"autogenerated": false,
"ratio": 3.414156626506024,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4587734041606024,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from settings import settings
from system.performance_tracker import SimpleTracker
from system.synergy_process import SynergyProcess
from threading import Thread
class AbstractWorker(SynergyProcess):
def __init__(self, process_name, process_id=None):
""":param process_name: id of the process, the worker will be performing """
super(AbstractWorker, self).__init__(process_name, process_id)
self._init_performance_tracker(self.logger)
msg_suffix = 'Testing Mode' if settings['under_test'] else 'Production Mode'
self.logger.info('Started {0} in {1}'.format(self.process_name, msg_suffix))
def __del__(self):
try:
self.performance_tracker.cancel()
except Exception as e:
self.logger.error('Exception caught while cancelling the performance_tracker: {0}'.format(str(e)))
super(AbstractWorker, self).__del__()
# ********************** abstract methods ****************************
def _init_performance_tracker(self, logger):
self.performance_tracker = SimpleTracker(logger)
self.performance_tracker.start()
# ********************** thread-related methods ****************************
def run(self):
""" abstract method to be overridden in children classes """
self.logger.info('Thread started')
def start(self, *_):
self.main_thread = Thread(target=self.run)
self.main_thread.start()
| {
"repo_name": "mushkevych/launch.py",
"path": "workers/abstract_worker.py",
"copies": "1",
"size": "1496",
"license": "bsd-3-clause",
"hash": 1576212709420603000,
"line_mean": 38.3684210526,
"line_max": 110,
"alpha_frac": 0.6203208556,
"autogenerated": false,
"ratio": 4.262108262108262,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004414734652092753,
"num_lines": 38
} |
__author__ = 'Bohdan Mushkevych'
from subprocess import DEVNULL
import sys
import psutil
from psutil import TimeoutExpired
from launch import get_python, PROJECT_ROOT, PROCESS_STARTER
from synergy.system.utils import remove_pid_file, get_pid_filename
from synergy.conf import settings
def get_process_pid(process_name):
""" check for process' pid file and returns pid from there """
try:
pid_filename = get_pid_filename(process_name)
with open(pid_filename, mode='r') as pid_file:
pid = int(pid_file.read().strip())
except IOError:
pid = None
return pid
def kill_process(process_name):
""" method is called to kill a running process """
try:
sys.stdout.write(f'killing: {process_name} {{ \n')
pid = get_process_pid(process_name)
if pid is not None and psutil.pid_exists(int(pid)):
p = psutil.Process(pid)
p.kill()
p.wait()
remove_pid_file(process_name)
except Exception as e:
sys.stderr.write(f'Exception on killing {process_name} : {e} \n')
finally:
sys.stdout.write('}')
def start_process(process_name, *args):
try:
sys.stdout.write(f'starting: {process_name} {{ \n')
cmd = [get_python(), PROJECT_ROOT + '/' + PROCESS_STARTER, process_name]
if not args:
# this blocks triggers when args is either None or an empty list
pass
else:
cmd.extend(*args)
p = psutil.Popen(cmd,
close_fds=True,
cwd=settings.settings['process_cwd'],
stdin=DEVNULL,
stdout=DEVNULL,
stderr=DEVNULL)
sys.stdout.write(f'Started {process_name} with pid = {p.pid} \n')
except Exception as e:
sys.stderr.write(f'Exception on starting {process_name} : {e} \n')
finally:
sys.stdout.write('}')
def poll_process(process_name):
""" between killing a process and its actual termination lies poorly documented requirement -
<purging process' io pipes and reading exit status>.
this can be done either by os.wait() or process.wait()
:return True if the process is alive and OK and False is the process was terminated """
try:
pid = get_process_pid(process_name)
if pid is None:
sys.stdout.write(f'PID file was not found. Process {process_name} is likely terminated.\n')
return False
p = psutil.Process(pid)
return_code = p.wait(timeout=0.01)
if return_code is None:
# process is already terminated
sys.stdout.write(f'Process {process_name} is terminated \n')
return False
else:
# process is terminated; possibly by OS
sys.stdout.write(f'Process {process_name} got terminated \n')
return False
except TimeoutExpired:
sys.stdout.write(f'Process {process_name} is alive and OK \n')
return True
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/system/process_helper.py",
"copies": "1",
"size": "3055",
"license": "bsd-3-clause",
"hash": -765186694668502100,
"line_mean": 33.7159090909,
"line_max": 103,
"alpha_frac": 0.5993453355,
"autogenerated": false,
"ratio": 3.9267352185089974,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5026080554008998,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from subprocess import PIPE
import sys
import psutil
from psutil import TimeoutExpired
from launch import get_python, PROJECT_ROOT, PROCESS_STARTER
from system.process_context import ProcessContext
from settings import settings
def get_process_pid(process_name):
""" check for process' pid file and returns pid from there """
try:
pid_filename = ProcessContext.get_pid_filename(process_name)
with open(pid_filename, mode='r') as pid_file:
pid = int(pid_file.read().strip())
except IOError:
pid = None
return pid
def kill_process(process_name):
""" method is called to kill a running process """
try:
sys.stdout.write('killing: {0} {{ \n'.format(process_name))
pid = get_process_pid(process_name)
if pid is not None and psutil.pid_exists(int(pid)):
p = psutil.Process(pid)
p.kill()
p.wait()
ProcessContext.remove_pid_file(process_name)
except Exception as e:
sys.stderr.write('Exception on killing {0} : {1} \n'.format(process_name, e))
finally:
sys.stdout.write('}')
def start_process(process_name, *args):
try:
sys.stdout.write('starting: {0} {{ \n'.format(process_name))
cmd = [get_python(), PROJECT_ROOT + '/' + PROCESS_STARTER, process_name]
if not args:
# this blocks triggers when args is either None or an empty list
pass
else:
cmd.extend(*args)
p = psutil.Popen(cmd,
close_fds=True,
cwd=settings['process_cwd'],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE)
sys.stdout.write('Started {0} with pid = {1} \n'.format(process_name, p.pid))
except Exception as e:
sys.stderr.write('Exception on starting {0} : {1} \n'.format(process_name, e))
finally:
sys.stdout.write('}')
def poll_process(process_name):
""" between killing a process and its actual termination lies poorly documented requirement -
<purging process' io pipes and reading exit status>.
this can be done either by os.wait() or process.wait()
:return True if the process is alive and OK and False is the process was terminated """
try:
pid = get_process_pid(process_name)
if pid is None:
sys.stdout.write('PID file was not found. Process {0} is likely terminated.\n'.format(process_name))
return False
p = psutil.Process(pid)
return_code = p.wait(timeout=0.01)
if return_code is None:
# process is already terminated
sys.stdout.write('Process {0} is terminated \n'.format(process_name))
return False
else:
# process is terminated; possibly by OS
sys.stdout.write('Process {0} got terminated \n'.format(process_name))
return False
except TimeoutExpired:
sys.stdout.write('Process {0} is alive and OK \n'.format(process_name))
return True
| {
"repo_name": "mushkevych/launch.py",
"path": "system/process_helper.py",
"copies": "1",
"size": "3132",
"license": "bsd-3-clause",
"hash": -6709610304617229000,
"line_mean": 35,
"line_max": 112,
"alpha_frac": 0.6008939974,
"autogenerated": false,
"ratio": 3.9898089171974522,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5090702914597452,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from subprocess import PIPE
import sys
import psutil
from psutil import TimeoutExpired
from launch import get_python, PROJECT_ROOT, PROCESS_STARTER
from synergy.system.utils import remove_pid_file, get_pid_filename
from synergy.conf import settings
def get_process_pid(process_name):
""" check for process' pid file and returns pid from there """
try:
pid_filename = get_pid_filename(process_name)
with open(pid_filename, mode='r') as pid_file:
pid = int(pid_file.read().strip())
except IOError:
pid = None
return pid
def kill_process(process_name):
""" method is called to kill a running process """
try:
sys.stdout.write('killing: {0} {{ \n'.format(process_name))
pid = get_process_pid(process_name)
if pid is not None and psutil.pid_exists(int(pid)):
p = psutil.Process(pid)
p.kill()
p.wait()
remove_pid_file(process_name)
except Exception as e:
sys.stderr.write('Exception on killing {0} : {1} \n'.format(process_name, str(e)))
finally:
sys.stdout.write('}')
def start_process(process_name, *args):
try:
sys.stdout.write('starting: {0} {{ \n'.format(process_name))
cmd = [get_python(), PROJECT_ROOT + '/' + PROCESS_STARTER, process_name]
if not args:
# this blocks triggers when args is either None or an empty list
pass
else:
cmd.extend(*args)
p = psutil.Popen(cmd,
close_fds=True,
cwd=settings.settings['process_cwd'],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE)
sys.stdout.write('Started {0} with pid = {1} \n'.format(process_name, p.pid))
except Exception as e:
sys.stderr.write('Exception on starting {0} : {1} \n'.format(process_name, str(e)))
finally:
sys.stdout.write('}')
def poll_process(process_name):
""" between killing a process and its actual termination lies poorly documented requirement -
<purging process' io pipes and reading exit status>.
this can be done either by os.wait() or process.wait()
:return True if the process is alive and OK and False is the process was terminated """
try:
pid = get_process_pid(process_name)
if pid is None:
sys.stdout.write('PID file was not found. Process {0} is likely terminated.\n'.format(process_name))
return False
p = psutil.Process(pid)
return_code = p.wait(timeout=0.01)
if return_code is None:
# process is already terminated
sys.stdout.write('Process {0} is terminated \n'.format(process_name))
return False
else:
# process is terminated; possibly by OS
sys.stdout.write('Process {0} got terminated \n'.format(process_name))
return False
except TimeoutExpired:
sys.stdout.write('Process {0} is alive and OK \n'.format(process_name))
return True
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/system/process_helper.py",
"copies": "1",
"size": "3143",
"license": "bsd-3-clause",
"hash": 7320228476992307000,
"line_mean": 34.7159090909,
"line_max": 112,
"alpha_frac": 0.598790964,
"autogenerated": false,
"ratio": 3.9336670838548184,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011312020765822685,
"num_lines": 88
} |
__author__ = 'Bohdan Mushkevych'
from subprocess import PIPE
import psutil
from synergy.conf import settings
from workers.abstract_cli_worker import AbstractCliWorker
class HadoopAggregatorDriver(AbstractCliWorker):
"""Python process that starts Hadoop map/reduce job, supervises its execution and updates unit_of_work"""
def __init__(self, process_name):
super(HadoopAggregatorDriver, self).__init__(process_name)
def _start_process(self, start_timeperiod, end_timeperiod, arguments):
try:
self.logger.info('start: %s {' % self.process_name)
p = psutil.Popen([settings.settings['hadoop_command'],
'jar', settings.settings['hadoop_jar'],
'-D', 'process.name=' + self.process_name,
'-D', 'timeperiod.working=' + str(start_timeperiod),
'-D', 'timeperiod.next=' + str(end_timeperiod)],
close_fds=True,
cwd=settings.settings['process_cwd'],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE)
self.cli_process = p
self.logger.info('Started %s with pid = %r' % (self.process_name, p.pid))
except Exception:
self.logger.error('Exception on starting: %s' % self.process_name, exc_info=True)
finally:
self.logger.info('}')
| {
"repo_name": "eggsandbeer/scheduler",
"path": "workers/hadoop_aggregator_driver.py",
"copies": "1",
"size": "1488",
"license": "bsd-3-clause",
"hash": 3865879909263997000,
"line_mean": 42.7647058824,
"line_max": 109,
"alpha_frac": 0.5517473118,
"autogenerated": false,
"ratio": 4.338192419825073,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5389939731625073,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from subprocess import PIPE
import psutil
from synergy.conf import settings
from synergy.conf import context
from workers.abstract_cli_worker import AbstractCliWorker
class PigDriver(AbstractCliWorker):
"""Python process that starts Pig processing job, supervises its execution and updates unit_of_work"""
def __init__(self, process_name):
super(PigDriver, self).__init__(process_name)
def _start_process(self, start_timeperiod, end_timeperiod, arguments):
try:
input_file = context.process_context[self.process_name].source
self.logger.info('start: %s {' % self.process_name)
p = psutil.Popen([settings.settings['bash_shell'],
settings.settings['pig_command'],
'-f', '/home/bmushkevych/git/synergy-pig/script.pig',
'-p', 'input_file=' + input_file + '/' + start_timeperiod,
'-p', 'timeperiod=' + start_timeperiod],
close_fds=True,
cwd=settings.settings['process_cwd'],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE)
self.cli_process = p
self.logger.info('Started %s with pid = %r' % (self.process_name, p.pid))
except Exception:
self.logger.error('Exception on starting: %s' % self.process_name, exc_info=True)
finally:
self.logger.info('}')
| {
"repo_name": "eggsandbeer/scheduler",
"path": "workers/pig_driver.py",
"copies": "1",
"size": "1568",
"license": "bsd-3-clause",
"hash": -983428223855973400,
"line_mean": 40.2631578947,
"line_max": 106,
"alpha_frac": 0.5548469388,
"autogenerated": false,
"ratio": 4.237837837837838,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5292684776637838,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from subprocess import PIPE
import psutil
from synergy.system import time_helper
from synergy.system.time_qualifier import QUALIFIER_HOURLY
from synergy.conf import settings
from synergy.conf import context
from workers.abstract_cli_worker import AbstractCliWorker
class SqoopDriver(AbstractCliWorker):
"""Process starts Sqoop import job, supervises its execution and updates unit_of_work"""
SQOOP_DATE_FORMAT = '%Y-%m-%d %H:%M:%S.000'
def __init__(self, process_name):
super(SqoopDriver, self).__init__(process_name)
def _start_process(self, start_timeperiod, end_timeperiod, arguments):
try:
start_dt = time_helper.synergy_to_datetime(QUALIFIER_HOURLY, start_timeperiod)
sqoop_slice_starttime = start_dt.strftime(SqoopDriver.SQOOP_DATE_FORMAT)
end_dt = time_helper.synergy_to_datetime(QUALIFIER_HOURLY, end_timeperiod)
sqoop_slice_endtime = end_dt.strftime(SqoopDriver.SQOOP_DATE_FORMAT)
sink_path = context.process_context[self.process_name].sink
self.logger.info('start: %s {' % self.process_name)
p = psutil.Popen([settings.settings['bash_shell'],
settings.settings['sqoop_command'],
str(sqoop_slice_starttime),
str(sqoop_slice_endtime),
sink_path + '/' + start_timeperiod],
close_fds=True,
cwd=settings.settings['process_cwd'],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE)
self.cli_process = p
self.logger.info('Started %s with pid = %r' % (self.process_name, p.pid))
except Exception:
self.logger.error('Exception on starting: %s' % self.process_name, exc_info=True)
finally:
self.logger.info('}')
| {
"repo_name": "eggsandbeer/scheduler",
"path": "workers/sqoop_driver.py",
"copies": "1",
"size": "1987",
"license": "bsd-3-clause",
"hash": 1223087017365159000,
"line_mean": 40.3958333333,
"line_max": 93,
"alpha_frac": 0.5918470055,
"autogenerated": false,
"ratio": 3.8657587548638133,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49576057603638135,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from synergy.conf import context
from synergy.db.dao.base_dao import BaseDao
from flow.flow_constants import COLLECTION_FLOW, ARGUMENT_RUN_MODE
from flow.db.model.flow import Flow, RUN_MODE_NOMINAL
class FlowDao(BaseDao):
""" Thread-safe Data Access Object for *flow* table/collection """
def __init__(self, logger):
super(FlowDao, self).__init__(logger=logger,
collection_name=COLLECTION_FLOW,
model_class=Flow)
def managed_run_mode(self, process_name, flow_name, timeperiod):
""" managed `run mode` is defined globally at ProcessEntry.arguments['run_mode']
however, can be overridden locally at Flow.run_mode
This method takes burden of checking both places and returning valid `run mode`"""
# retrieve default run_mode value from the process_context
process_entry = context.process_context[process_name]
run_mode = process_entry.arguments.get(ARGUMENT_RUN_MODE, RUN_MODE_NOMINAL)
try:
# fetch existing Flow from the DB
# if present, run_mode overrides default one
db_key = [flow_name, timeperiod]
flow_entry = self.get_one(db_key)
run_mode = flow_entry.run_mode
except LookupError:
# no flow record for given key was present in the database
# use default one
pass
return run_mode
| {
"repo_name": "mushkevych/synergy_flow",
"path": "flow/db/dao/flow_dao.py",
"copies": "1",
"size": "1495",
"license": "bsd-3-clause",
"hash": -694916221861280100,
"line_mean": 40.5277777778,
"line_max": 94,
"alpha_frac": 0.6287625418,
"autogenerated": false,
"ratio": 4.129834254143646,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009351953541782044,
"num_lines": 36
} |
__author__ = 'Bohdan Mushkevych'
from synergy.conf import settings, context
from synergy.db.model import unit_of_work
from synergy.db.model.unit_of_work import TYPE_MANAGED
from synergy.workers.abstract_uow_aware_worker import AbstractUowAwareWorker
from flow.core.execution_context import ExecutionContext
from flow.core.execution_engine import ExecutionEngine
from flow.db.model import flow
from flow.db.model.flow import RUN_MODE_NOMINAL, RUN_MODE_RECOVERY
from flow.db.dao.flow_dao import FlowDao
from flow.flow_constants import *
class FlowDriver(AbstractUowAwareWorker):
"""starts Synergy Flow processing job, supervises its execution and updates unit_of_work"""
def __init__(self, process_name):
super(FlowDriver, self).__init__(process_name, perform_db_logging=True)
self.flow_dao = FlowDao(self.logger)
def _process_uow(self, uow):
flow_name = uow.arguments[ARGUMENT_FLOW_NAME]
if uow.unit_of_work_type == TYPE_MANAGED:
run_mode = self.flow_dao.managed_run_mode(self.process_name, flow_name, uow.timeperiod)
else:
run_mode = uow.arguments.get(ARGUMENT_RUN_MODE)
try:
self.logger.info('starting Flow: {0} {{'.format(flow_name))
execution_engine = ExecutionEngine(self.logger, flow_name)
context = ExecutionContext(flow_name, uow.timeperiod, uow.start_timeperiod, uow.end_timeperiod,
settings.settings)
if run_mode == RUN_MODE_RECOVERY:
execution_engine.recover(context)
elif run_mode == RUN_MODE_RUN_ONE:
step_name = uow.arguments.get(ARGUMENT_STEP_NAME)
execution_engine.run_one(context, step_name)
elif run_mode == RUN_MODE_RUN_FROM:
step_name = uow.arguments.get(ARGUMENT_STEP_NAME)
execution_engine.run_from(context, step_name)
elif run_mode == RUN_MODE_NOMINAL:
execution_engine.run(context)
else:
raise ValueError('run mode {0} is unknown to the Synergy Flow'.format(run_mode))
if context.flow_entry.state == flow.STATE_PROCESSED:
uow_status = unit_of_work.STATE_PROCESSED
elif context.flow_entry.state == flow.STATE_NOOP:
uow_status = unit_of_work.STATE_NOOP
else:
uow_status = unit_of_work.STATE_INVALID
except Exception:
self.logger.error('Exception on workflow execution: {0}'.format(flow_name), exc_info=True)
uow_status = unit_of_work.STATE_INVALID
finally:
self.logger.info('}')
return 0, uow_status
| {
"repo_name": "mushkevych/synergy_flow",
"path": "flow/workers/flow_driver.py",
"copies": "1",
"size": "2706",
"license": "bsd-3-clause",
"hash": -2548279925088372700,
"line_mean": 43.3606557377,
"line_max": 107,
"alpha_frac": 0.6396895787,
"autogenerated": false,
"ratio": 3.74792243767313,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.488761201637313,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from synergy.db.dao.base_dao import BaseDao
from synergy.db.model.log_recording import LogRecording, PARENT_OBJECT_ID, LOG
from synergy.system.decorator import thread_safe
from synergy.scheduler.scheduler_constants import COLLECTION_LOG_RECORDING
class LogRecordingDao(BaseDao):
""" Thread-safe Data Access Object for logs stored in log_recording table/collection """
def __init__(self, logger):
super(LogRecordingDao, self).__init__(logger=logger,
collection_name=COLLECTION_LOG_RECORDING,
model_class=LogRecording)
@thread_safe
def append_log(self, uow_id, msg):
collection = self.ds.connection(self.collection_name)
result = collection.update_one({PARENT_OBJECT_ID: uow_id},
{'$push': {LOG: msg}},
upsert=True)
if result.modified_count == 0:
raise LookupError(f'Log append failed for {uow_id} in collection {self.collection_name}')
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/db/dao/log_recording_dao.py",
"copies": "1",
"size": "1104",
"license": "bsd-3-clause",
"hash": -5326603260869012000,
"line_mean": 43.16,
"line_max": 101,
"alpha_frac": 0.6114130435,
"autogenerated": false,
"ratio": 4.2298850574712645,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012768098441722732,
"num_lines": 25
} |
__author__ = 'Bohdan Mushkevych'
from synergy.db.dao.unit_of_work_dao import UnitOfWorkDao
from synergy.system import time_helper
from synergy.conf import context
from synergy.mx.base_request_handler import valid_action_request
from synergy.mx.abstract_action_handler import AbstractActionHandler
from synergy.mx.tree_node_details import TreeNodeDetails
class ManagedActionHandler(AbstractActionHandler):
def __init__(self, request, **values):
super(ManagedActionHandler, self).__init__(request, **values)
self.process_name = self.request_arguments.get('process_name')
self.timeperiod = self.request_arguments.get('timeperiod')
self.uow_dao = UnitOfWorkDao(self.logger)
self.is_request_valid = True if self.process_name and self.timeperiod else False
if self.is_request_valid:
self.process_name = self.process_name.strip()
self.timeperiod = self.timeperiod.strip()
def _get_tree_node(self):
tree = self.scheduler.timetable.get_tree(self.process_name)
if tree is None:
raise UserWarning('No Timetable tree is registered for process %s' % self.process_name)
time_qualifier = context.process_context[self.process_name].time_qualifier
self.timeperiod = time_helper.cast_to_time_qualifier(time_qualifier, self.timeperiod)
node = tree.get_node(self.process_name, self.timeperiod)
return node
@AbstractActionHandler.thread_handler.getter
def thread_handler(self):
handler_key = self.process_name
return self.scheduler.managed_handlers[handler_key]
@AbstractActionHandler.process_entry.getter
def process_entry(self):
return self.thread_handler.process_entry
@valid_action_request
def action_reprocess(self):
node = self._get_tree_node()
msg = 'MX: requesting REPROCESS for %s in timeperiod %s' % (self.process_name, self.timeperiod)
self.scheduler.timetable.add_log_entry(self.process_name, self.timeperiod, msg)
self.logger.info(msg + ' {')
effected_nodes = node.request_reprocess()
resp = dict()
for node in effected_nodes:
resp[node.timeperiod] = TreeNodeDetails.get_details(node)
self.logger.info('MX }')
return resp
@valid_action_request
def action_skip(self):
node = self._get_tree_node()
msg = 'MX: requesting SKIP for %s in timeperiod %s' % (self.process_name, self.timeperiod)
self.scheduler.timetable.add_log_entry(self.process_name, self.timeperiod, msg)
self.logger.info(msg + ' {')
effected_nodes = node.request_skip()
resp = dict()
for node in effected_nodes:
resp[node.timeperiod] = TreeNodeDetails.get_details(node)
self.logger.info('MX }')
return resp
@valid_action_request
def action_get_uow(self):
node = self._get_tree_node()
uow_id = node.job_record.related_unit_of_work
if uow_id is None:
resp = {'response': 'no related unit_of_work'}
else:
resp = self.uow_dao.get_one(uow_id).document
for key in resp:
resp[key] = str(resp[key])
return resp
@valid_action_request
def action_get_log(self):
node = self._get_tree_node()
return {'log': node.job_record.log}
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/mx/managed_action_handler.py",
"copies": "1",
"size": "3379",
"license": "bsd-3-clause",
"hash": -8984078849518542000,
"line_mean": 36.1318681319,
"line_max": 103,
"alpha_frac": 0.6569991122,
"autogenerated": false,
"ratio": 3.64902807775378,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9801281799121733,
"avg_score": 0.0009490781664095824,
"num_lines": 91
} |
__author__ = 'Bohdan Mushkevych'
from synergy.db.error import DuplicateKeyError
from synergy.db.model.job import Job
from synergy.system import time_helper
from synergy.system.time_qualifier import QUALIFIER_HOURLY
from tests.base_fixtures import create_unit_of_work
TEST_PRESET_TIMEPERIOD = '2013010122'
TEST_ACTUAL_TIMEPERIOD = time_helper.actual_timeperiod(QUALIFIER_HOURLY)
TEST_PAST_TIMEPERIOD = time_helper.increment_timeperiod(QUALIFIER_HOURLY, TEST_ACTUAL_TIMEPERIOD, delta=-1)
TEST_FUTURE_TIMEPERIOD = time_helper.increment_timeperiod(QUALIFIER_HOURLY, TEST_ACTUAL_TIMEPERIOD)
def then_raise_uw(*_):
"""mocks AbstractStateMachine._insert_uow and AbstractStateMachine.insert_and_publish_uow"""
raise UserWarning('Simulated UserWarning Exception')
def mock_insert_uow_return_uow(process_name, timeperiod, start_timeperiod, end_timeperiod, start_id, end_id):
"""mocks AbstractStateMachine._insert_uow"""
return create_unit_of_work(process_name, start_id, end_id, timeperiod, uow_id='a_uow_id')
def mock_insert_uow_raise_dpk(process_name, timeperiod, start_timeperiod, end_timeperiod, start_id, end_id):
"""mocks AbstractStateMachine._insert_uow"""
raise DuplicateKeyError(process_name, timeperiod, start_id, end_id, 'Simulated Exception')
def then_raise_dpk(job_record, start_id, end_id):
"""mocks AbstractStateMachine.insert_and_publish_uow"""
raise DuplicateKeyError(job_record.process_name, job_record.timeperiod, start_id, end_id, 'Simulated Exception')
def then_return_uow(job_record, start_id, end_id):
"""mocks AbstractStateMachine.insert_and_publish_uow"""
return create_unit_of_work(job_record.process_name, start_id, end_id, job_record.timeperiod, uow_id='a_uow_id'), \
False
def then_return_duplicate_uow(job_record, start_id, end_id):
"""mocks AbstractStateMachine.insert_and_publish_uow"""
return create_unit_of_work(job_record.process_name, start_id, end_id, job_record.timeperiod, uow_id='a_uow_id'), \
True
def get_job_record(state, timeperiod, process_name):
return Job(process_name=process_name,
timeperiod=timeperiod,
state=state,
db_id='000000000000000123456789'
)
| {
"repo_name": "mushkevych/scheduler",
"path": "tests/state_machine_testing_utils.py",
"copies": "1",
"size": "2241",
"license": "bsd-3-clause",
"hash": 8051155290546120000,
"line_mean": 42.0961538462,
"line_max": 118,
"alpha_frac": 0.7340473003,
"autogenerated": false,
"ratio": 3.2620087336244543,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4496056033924454,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from synergy.db.model.freerun_process_entry import FreerunProcessEntry
from synergy.db.model.managed_process_entry import ManagedProcessEntry
from synergy.db.dao.freerun_process_dao import FreerunProcessDao
from synergy.db.dao.managed_process_dao import ManagedProcessDao
from synergy.system.event_clock import parse_time_trigger_string
from synergy.scheduler.scheduler_constants import *
def construct_thread_handler(logger, process_entry, call_back):
""" method parses process_entry and creates a timer_handler out of it """
trigger_frequency = process_entry.trigger_frequency
if isinstance(process_entry, ManagedProcessEntry):
handler_key = process_entry.process_name
handler_type = TYPE_MANAGED
elif isinstance(process_entry, FreerunProcessEntry):
handler_key = (process_entry.process_name, process_entry.entry_name)
handler_type = TYPE_FREERUN
else:
raise ValueError('Scheduler Entry type %s is not known to the system. Skipping it.'
% process_entry.__class__.__name__)
handler = ThreadHandler(logger, handler_key, trigger_frequency, call_back, process_entry, handler_type)
return handler
class ThreadHandlerArguments(object):
""" ThreadHandlerArgument is a data structure around Thread Handler arguments.
It is passed to the Timer instance and later on - to the Scheduler's running function as an argument """
def __init__(self, key, trigger_frequency, process_entry, handler_type):
self.key = key
self.trigger_frequency = trigger_frequency
self.process_entry = process_entry
self.handler_type = handler_type
class ThreadHandler(object):
""" ThreadHandler is a thread running within the Synergy Scheduler and triggering Scheduler's fire_XXX logic"""
def __init__(self, logger, key, trigger_frequency, call_back, process_entry, handler_type):
self.logger = logger
self.key = key
self.trigger_frequency = trigger_frequency
self.call_back = call_back
self.process_entry = process_entry
self.handler_type = handler_type
parsed_trigger_frequency, timer_klass = parse_time_trigger_string(trigger_frequency)
self.timer_instance = timer_klass(parsed_trigger_frequency, call_back, args=[self.callback_args])
self.is_started = False
self.is_terminated = False
self.se_freerun_dao = FreerunProcessDao(self.logger)
self.se_managed_dao = ManagedProcessDao(self.logger)
self.logger.info('Created Synergy Scheduler Thread Handler %r~%r' % (key, trigger_frequency))
def __del__(self):
self.timer_instance.cancel()
@property
def callback_args(self):
return ThreadHandlerArguments(self.key, self.trigger_frequency, self.process_entry, self.handler_type)
def _get_dao(self):
if self.is_managed:
return self.se_managed_dao
elif self.is_freerun:
return self.se_freerun_dao
else:
raise ValueError('Scheduler Entry type %s is not known to the system. Skipping it.'
% self.process_entry.__class__.__name__)
def activate(self, update_persistent=True):
if self.timer_instance.is_alive():
return
if self.is_terminated:
parsed_trigger_frequency, timer_klass = parse_time_trigger_string(self.trigger_frequency)
self.timer_instance = timer_klass(parsed_trigger_frequency, self.call_back, args=[self.callback_args])
self.process_entry.is_on = True
if update_persistent:
self._get_dao().update(self.process_entry)
self.timer_instance.start()
self.is_terminated = False
self.is_started = True
def deactivate(self, update_persistent=True):
self.timer_instance.cancel()
self.is_terminated = True
self.process_entry.is_on = False
if update_persistent:
self._get_dao().update(self.process_entry)
def trigger(self):
self.timer_instance.trigger()
def change_interval(self, value, update_persistent=True):
parsed_trigger_frequency, timer_klass = parse_time_trigger_string(value)
if isinstance(self.timer_instance, timer_klass):
# trigger time has changed only frequency of run
self.timer_instance.change_interval(parsed_trigger_frequency)
else:
# trigger time requires different type of timer - RepeatTimer instead of EventClock or vice versa
# 1. deactivate current timer
self.deactivate()
# 2. create a new timer instance
parsed_trigger_frequency, timer_klass = parse_time_trigger_string(self.trigger_frequency)
self.timer_instance = timer_klass(parsed_trigger_frequency, self.call_back, args=[self.callback_args])
self.trigger_frequency = value
# 3. start if necessary
if self.process_entry.is_on:
self.timer_instance.start()
self.is_terminated = False
self.is_started = True
self.process_entry.trigger_frequency = value
if update_persistent:
self._get_dao().update(self.process_entry)
def next_run_in(self, utc_now=None):
return self.timer_instance.next_run_in(utc_now)
@property
def is_alive(self):
return self.timer_instance.is_alive()
@property
def is_managed(self):
return isinstance(self.process_entry, ManagedProcessEntry)
@property
def is_freerun(self):
return isinstance(self.process_entry, FreerunProcessEntry)
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/scheduler/thread_handler.py",
"copies": "1",
"size": "5688",
"license": "bsd-3-clause",
"hash": -5856365553910050000,
"line_mean": 39.3404255319,
"line_max": 115,
"alpha_frac": 0.6698312236,
"autogenerated": false,
"ratio": 4.031183557760453,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5201014781360453,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from synergy.db.model import job
from synergy.system import time_helper
from synergy.conf import context
class NodesCompositeState(object):
""" Instance of this structure represents composite state of TreeNodes """
def __init__(self):
super(NodesCompositeState, self).__init__()
# True if all dependent_on Jobs are finished
self.all_finished = True
# True if all dependent_on Jobs are successfully processed
self.all_processed = True
# True if all dependent_on Jobs are either active or successfully processed
self.all_healthy = True
# True if among dependent_on periods are some in STATE_SKIPPED
self.skipped_present = False
def enlist(self, tree_node):
assert isinstance(tree_node, TreeNode)
if not tree_node.job_record.is_finished:
self.all_finished = False
if not tree_node.job_record.is_processed:
self.all_processed = False
if not tree_node.job_record.is_active or not tree_node.job_record.is_processed:
self.all_healthy = False
if tree_node.job_record.is_skipped:
self.skipped_present = True
class TreeNode(object):
def __init__(self, tree, parent, process_name, timeperiod, job_record):
# initializes the data members
self.children = dict()
self.tree = tree
self.parent = parent
self.process_name = process_name
self.timeperiod = timeperiod
self.job_record = job_record
if parent is None and process_name is None and timeperiod is None and job_record is None:
# special case - node is TREE ROOT
self.time_qualifier = None
else:
self.time_qualifier = context.process_context[self.process_name].time_qualifier
def request_reprocess(self):
""" method marks this and all parents node as such that requires reprocessing
:return list of nodes that have been effected """
effected_nodes = []
if self.parent is None:
# do not process 'root' - the only node that has None as 'parent'
return effected_nodes
for function in self.tree.reprocess_callbacks:
# function signature: tree_node
function(self)
effected_nodes.extend(self.parent.request_reprocess())
effected_nodes.append(self)
return effected_nodes
def request_skip(self):
""" method marks this node as one to skip"""
for function in self.tree.skip_callbacks:
# function signature: tree_node
function(self)
return [self]
def request_embryo_job_record(self):
""" method is requesting outside functionality to create a job record in STATE_EMBRYO for given tree_node """
for function in self.tree.create_job_record_callbacks:
# function signature: tree_node
function(self)
def is_finalizable(self):
"""method checks whether:
- all counterpart of this node in dependent_on trees are finished
- all direct children of the node are finished
- the node itself is in active state"""
composite_state = self.dependent_on_composite_state()
if not composite_state.all_finished:
return False
if self.job_record is None:
self.request_embryo_job_record()
children_processed = all([child.job_record.is_finished for child in self.children.values()])
return children_processed and self.job_record.is_active
def validate(self):
"""method traverse tree and performs following activities:
* requests a job record in STATE_EMBRYO if no job record is currently assigned to the node
* requests nodes for reprocessing, if STATE_PROCESSED node relies on unfinalized nodes
* requests node for skipping if it is daily node and all 24 of its Hourly nodes are in STATE_SKIPPED state"""
# step 0: request Job record if current one is not set
if self.job_record is None:
self.request_embryo_job_record()
# step 1: define if current node has a younger sibling
next_timeperiod = time_helper.increment_timeperiod(self.time_qualifier, self.timeperiod)
has_younger_sibling = next_timeperiod in self.parent.children
# step 2: define if all children are done and if perhaps they all are in STATE_SKIPPED
all_children_skipped = True
all_children_finished = True
for timeperiod in self.children:
child = self.children[timeperiod]
child.validate()
if child.job_record.is_active:
all_children_finished = False
if not child.job_record.is_skipped:
all_children_skipped = False
# step 3: request this node's reprocessing if it is enroute to STATE_PROCESSED
# while some of its children are still performing processing
if all_children_finished is False and self.job_record.is_finished:
self.request_reprocess()
# step 4: verify if this node should be transferred to STATE_SKIPPED
# algorithm is following:
# point a: node must have children
# point b: existence of a younger sibling means that the tree contains another node of the same level
# thus - should the tree.build_timeperiod be not None - the children level of this node is fully constructed
# point c: if all children of this node are in STATE_SKIPPED then we will set this node state to STATE_SKIPPED
if len(self.children) != 0 \
and all_children_skipped \
and self.tree.build_timeperiod is not None \
and has_younger_sibling is True \
and not self.job_record.is_skipped:
self.request_skip()
def add_log_entry(self, entry):
""" :db.model.job record holds MAX_NUMBER_OF_LOG_ENTRIES of log entries, that can be accessed by MX
this method adds a record and removes oldest one if necessary """
log = self.job_record.log
if len(log) > job.MAX_NUMBER_OF_LOG_ENTRIES:
del log[-1]
log.insert(0, entry)
def find_counterpart_in(self, tree_b):
""" Finds a TreeNode counterpart for this node in tree_b
:param tree_b: target tree that hosts counterpart to this node
:return: TreeNode from tree_b that has the same timeperiod as this node, or None if no counterpart ware found
"""
tree_b_hierarchy_entry = tree_b.process_hierarchy.get_by_qualifier(self.time_qualifier)
if not tree_b_hierarchy_entry:
# special case when tree with more levels depends on the tree with smaller amount of levels
# for example ThreeLevel Financial tree depends on TwoLevel Google Channel
# in this case - we just verify time-periods that matches in both trees;
# for levels that have no match, we assume that dependency does not exists
# for example Financial Monthly has no counterpart in Google Daily Report -
# so we assume that its not blocked
node_b = None
else:
node_b = tree_b.get_node(tree_b_hierarchy_entry.process_entry.process_name, self.timeperiod)
return node_b
def dependent_on_composite_state(self):
""" method iterates over all nodes that provide dependency to the current node,
and compile composite state of them all
:return instance of <NodesCompositeState>
"""
composite_state = NodesCompositeState()
for dependent_on in self.tree.dependent_on:
node_b = self.find_counterpart_in(dependent_on)
if node_b is None:
# special case when counterpart tree has no process with corresponding time_qualifier
# for example Financial Monthly has no counterpart in Third-party Daily Report -
# so we assume that its not blocked
continue
composite_state.enlist(node_b)
return composite_state
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/scheduler/tree_node.py",
"copies": "1",
"size": "8161",
"license": "bsd-3-clause",
"hash": -255059765538996260,
"line_mean": 42.4095744681,
"line_max": 118,
"alpha_frac": 0.6473471388,
"autogenerated": false,
"ratio": 4.320275277924828,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0015250102512513975,
"num_lines": 188
} |
__author__ = 'Bohdan Mushkevych'
from synergy.db.model.unit_of_work import TIMEPERIOD
from workers.abstract_mongo_worker import AbstractMongoWorker
class AbstractHorizontalWorker(AbstractMongoWorker):
"""
This class could be inherited by workers that work with chunks/batches of data from DB
Its _engine_, in contrary to sequential reading, reads batches of records from DB
"""
def __init__(self, process_name):
super(AbstractHorizontalWorker, self).__init__(process_name)
def _process_bulk_array(self, array_of_documents, timeperiod):
""" abstract method to parse the bulk of documents """
pass
def _process_not_empty_cursor(self, cursor):
""" abstract method to process cursor with result set from DB"""
shall_continue = False
new_start_id = None
try:
bulk_array = []
for document in cursor:
new_start_id = document['_id']
bulk_array.append(document)
self.performance_ticker.increment()
if len(bulk_array) > 0:
# Mongo funny behaviour - cursor may be empty, with cursor.count != 0
self._process_bulk_array(bulk_array, bulk_array[0][TIMEPERIOD])
shall_continue = True
del bulk_array
except LookupError as e:
self.logger.error('Some data is missing. Proceeding to next bulk read : %r' % e)
return shall_continue, new_start_id
| {
"repo_name": "eggsandbeer/scheduler",
"path": "workers/abstract_horizontal_worker.py",
"copies": "1",
"size": "1490",
"license": "bsd-3-clause",
"hash": 819035481151562100,
"line_mean": 37.2051282051,
"line_max": 92,
"alpha_frac": 0.6281879195,
"autogenerated": false,
"ratio": 4.281609195402299,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5409797114902299,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from synergy.mx.base_request_handler import BaseRequestHandler, valid_action_request
class AbstractActionHandler(BaseRequestHandler):
def __init__(self, request, **values):
super(AbstractActionHandler, self).__init__(request, **values)
@property
def thread_handler(self):
raise NotImplementedError('not implemented yet')
@property
def process_entry(self):
raise NotImplementedError('not implemented yet')
def action_get_uow(self):
raise NotImplementedError('not implemented yet')
def action_get_log(self):
raise NotImplementedError('not implemented yet')
@valid_action_request
def action_change_interval(self):
resp = dict()
new_interval = self.request_arguments['interval']
if new_interval is not None:
thread_handler = self.thread_handler
thread_handler.change_interval(new_interval)
resp['status'] = 'changed interval for %r to %r' % (thread_handler.key, new_interval)
return resp
@valid_action_request
def action_trigger_now(self):
self.thread_handler.trigger()
return self.reply_ok()
@valid_action_request
def action_activate_trigger(self):
self.thread_handler.activate()
return self.reply_ok()
@valid_action_request
def action_deactivate_trigger(self):
self.thread_handler.deactivate()
return self.reply_ok()
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/mx/abstract_action_handler.py",
"copies": "1",
"size": "1476",
"license": "bsd-3-clause",
"hash": 4938145844538513000,
"line_mean": 29.75,
"line_max": 97,
"alpha_frac": 0.6639566396,
"autogenerated": false,
"ratio": 4.217142857142857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004576830732292917,
"num_lines": 48
} |
__author__ = 'Bohdan Mushkevych'
from synergy.mx.base_request_handler import BaseRequestHandler, valid_action_request, safe_json_response
from synergy.db.dao.unit_of_work_dao import UnitOfWorkDao
from synergy.db.dao.log_recording_dao import LogRecordingDao
class AbstractActionHandler(BaseRequestHandler):
def __init__(self, request, **values):
super(AbstractActionHandler, self).__init__(request, **values)
self.uow_dao = UnitOfWorkDao(self.logger)
self.log_recording_dao = LogRecordingDao(self.logger)
@property
def thread_handler(self):
raise NotImplementedError(f'property thread_handler must be implemented by {self.__class__.__name__}')
@property
def process_entry(self):
raise NotImplementedError(f'property process_entry must be implemented by {self.__class__.__name__}')
@property
def uow_id(self):
raise NotImplementedError(f'property uow_id must be implemented by {self.__class__.__name__}')
def get_event_log(self):
raise NotImplementedError(f'method action_get_event_log must be implemented by {self.__class__.__name__}')
@safe_json_response
def get_uow(self):
if self.uow_id is None:
resp = {'response': 'no related unit_of_work'}
else:
resp = self.uow_dao.get_one(self.uow_id).document
return resp
@safe_json_response
def get_uow_log(self):
try:
resp = self.log_recording_dao.get_one(self.uow_id).document
except (TypeError, LookupError):
resp = {'response': 'no related uow log'}
return resp
@valid_action_request
def change_interval(self):
resp = dict()
new_interval = self.request_arguments['interval']
if new_interval is not None:
self.thread_handler.change_interval(new_interval)
msg = f'changed interval for {self.thread_handler.key} to {new_interval}'
self.logger.info(f'MX: {msg}')
resp['status'] = msg
return resp
@valid_action_request
def trigger_now(self):
self.thread_handler.trigger()
self.logger.info(f'MX: triggered thread handler {self.thread_handler.key}')
return self.reply_ok()
@valid_action_request
def activate_trigger(self):
self.thread_handler.activate()
self.logger.info(f'MX: activated thread handler {self.thread_handler.key}')
return self.reply_ok()
@valid_action_request
def deactivate_trigger(self):
self.thread_handler.deactivate()
self.logger.info(f'MX: deactivated thread handler {self.thread_handler.key}')
return self.reply_ok()
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/mx/abstract_action_handler.py",
"copies": "1",
"size": "2676",
"license": "bsd-3-clause",
"hash": 5635808558397088000,
"line_mean": 35.6575342466,
"line_max": 114,
"alpha_frac": 0.6517189836,
"autogenerated": false,
"ratio": 3.8448275862068964,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.999017030573671,
"avg_score": 0.0012752528140374045,
"num_lines": 73
} |
__author__ = 'Bohdan Mushkevych'
from synergy.scheduler.scheduler_constants import STATE_MACHINE_FREERUN, EXCHANGE_FREERUN_WORKER
from synergy.db.model.daemon_process_entry import DaemonProcessEntry
from odm.fields import StringField, ListField, ObjectIdField, BooleanField
# contains list of last EVENT_LOG_MAX_SIZE job events, such as emission of the UOW
MAX_NUMBER_OF_EVENTS = 128
def split_schedulable_name(name):
return name.split('::', 1)
def build_schedulable_name(prefix, suffix):
return '{0}::{1}'.format(prefix, suffix)
class FreerunProcessEntry(DaemonProcessEntry):
""" Class presents single configuration entry for the freerun process/bash_driver """
db_id = ObjectIdField(name='_id', null=True)
source = StringField(null=True)
sink = StringField(null=True)
trigger_frequency = StringField() # either 'at DoW-HH:MM' or 'every XXX'
is_on = BooleanField(default=False) # defines if the schedulable is active or off
state_machine_name = StringField()
entry_name = StringField() # name of the schedulable
description = StringField() # description of the schedulable
event_log = ListField()
related_unit_of_work = ObjectIdField()
@classmethod
def key_fields(cls):
return cls.process_name.name, cls.entry_name.name
@property
def schedulable_name(self):
return build_schedulable_name(self.process_name, self.entry_name)
def freerun_context_entry(process_name,
entry_name,
classname,
token,
trigger_frequency,
is_on=True,
present_on_boxes=None,
description=None,
arguments=None,
exchange=EXCHANGE_FREERUN_WORKER,
queue=None,
routing=None,
pid_file=None,
log_file=None):
""" forms process context entry """
_ROUTING_PREFIX = 'routing_'
_QUEUE_PREFIX = 'queue_'
_SUFFIX = '_freerun'
if arguments is not None:
assert isinstance(arguments, dict)
process_entry = FreerunProcessEntry(
process_name=process_name,
entry_name=entry_name,
trigger_frequency=trigger_frequency,
state_machine_name=STATE_MACHINE_FREERUN,
is_on=is_on,
classname=classname,
token=token,
present_on_boxes=present_on_boxes,
description=description,
mq_queue=queue if queue is not None else _QUEUE_PREFIX + token + _SUFFIX,
mq_routing_key=routing if routing is not None else _ROUTING_PREFIX + token + _SUFFIX,
mq_exchange=exchange,
arguments=arguments if arguments is not None else dict(),
log_filename=log_file if log_file is not None else token + _SUFFIX + '.log',
pid_filename=pid_file if pid_file is not None else token + _SUFFIX + '.pid')
return process_entry
ENTRY_NAME = FreerunProcessEntry.entry_name.name
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/db/model/freerun_process_entry.py",
"copies": "1",
"size": "3114",
"license": "bsd-3-clause",
"hash": -3030450525274836500,
"line_mean": 36.0714285714,
"line_max": 96,
"alpha_frac": 0.6191393706,
"autogenerated": false,
"ratio": 3.9820971867007673,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5101236557300767,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from synergy.scheduler.scheduler_constants import TYPE_FREERUN, STATE_MACHINE_FREERUN
from synergy.db.model.daemon_process_entry import DaemonProcessEntry
from odm.fields import StringField, ListField, ObjectIdField, BooleanField
PROCESS_NAME = 'process_name' # name of the process to handle the schedulables
ENTRY_NAME = 'entry_name' # name of the schedulable
DESCRIPTION = 'description' # description of the schedulable
IS_ON = 'is_on' # defines if the schedulable is active or off
TRIGGER_FREQUENCY = 'trigger_frequency' # either 'at DoW-HH:MM' or 'every XXX'
STATE_MACHINE_NAME = 'state_machine_name'
SOURCE = 'source'
SINK = 'sink'
HISTORIC_LOG = 'historic_log' # contains list of MAX_NUMBER_OF_LOG_ENTRIES last log messages
MAX_NUMBER_OF_LOG_ENTRIES = 64
RELATED_UNIT_OF_WORK = 'related_unit_of_work'
class FreerunProcessEntry(DaemonProcessEntry):
""" Class presents single configuration entry for the freerun process/bash_driver . """
db_id = ObjectIdField('_id', null=True)
source = StringField(SOURCE)
sink = StringField(SINK)
trigger_frequency = StringField(TRIGGER_FREQUENCY)
is_on = BooleanField(IS_ON, default=False)
state_machine_name = StringField(STATE_MACHINE_NAME)
entry_name = StringField(ENTRY_NAME)
description = StringField(DESCRIPTION)
log = ListField(HISTORIC_LOG)
related_unit_of_work = ObjectIdField(RELATED_UNIT_OF_WORK)
@DaemonProcessEntry.key.getter
def key(self):
return self.process_name, self.entry_name
@DaemonProcessEntry.key.setter
def key(self, value):
self.process_name = value[0]
self.entry_name = value[1]
@property
def schedulable_name(self):
return '{0}::{1}'.format(self.process_name, self.entry_name)
def freerun_context_entry(process_name,
entry_name,
classname,
token,
exchange,
trigger_frequency,
is_on=True,
present_on_boxes=None,
description=None,
arguments=None,
queue=None,
routing=None,
process_type=TYPE_FREERUN,
pid_file=None,
log_file=None):
""" forms process context entry """
_ROUTING_PREFIX = 'routing_'
_QUEUE_PREFIX = 'queue_'
_SUFFIX = '_freerun'
if queue is None:
queue = _QUEUE_PREFIX + token + _SUFFIX
if routing is None:
routing = _ROUTING_PREFIX + token + _SUFFIX
if pid_file is None:
pid_file = token + _SUFFIX + '.pid'
if log_file is None:
log_file = token + _SUFFIX + '.log'
if arguments is None:
arguments = dict()
else:
assert isinstance(arguments, dict)
process_entry = FreerunProcessEntry(
process_name=process_name,
entry_name=entry_name,
trigger_frequency=trigger_frequency,
time_qualifier=None,
state_machine_name=STATE_MACHINE_FREERUN,
is_on=is_on,
classname=classname,
token=token,
present_on_boxes=present_on_boxes,
description=description,
mq_queue=queue,
mq_routing_key=routing,
mq_exchange=exchange,
arguments=arguments,
process_type=process_type,
log_filename=log_file,
pid_filename=pid_file)
return process_entry
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/db/model/freerun_process_entry.py",
"copies": "1",
"size": "3610",
"license": "bsd-3-clause",
"hash": 7087728179346302000,
"line_mean": 34.7425742574,
"line_max": 102,
"alpha_frac": 0.603601108,
"autogenerated": false,
"ratio": 3.877551020408163,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9978425887215532,
"avg_score": 0.0005452482385261101,
"num_lines": 101
} |
__author__ = 'Bohdan Mushkevych'
from synergy.system.event_clock import EventClock
from synergy.system.repeat_timer import RepeatTimer
TRIGGER_PREAMBLE_AT = 'at '
TRIGGER_PREAMBLE_EVERY = 'every '
def parse_time_trigger_string(trigger_frequency):
"""
:param trigger_frequency: human-readable and editable string in one of two formats:
- 'at Day_of_Week-HH:MM, ..., Day_of_Week-HH:MM'
- 'every NNN'
:return: return tuple (parsed_trigger_frequency, timer_klass)
"""
# replace multiple spaces with one
trigger_frequency = ' '.join(trigger_frequency.split())
if trigger_frequency.startswith(TRIGGER_PREAMBLE_AT):
# EventClock block
trigger_frequency = trigger_frequency[len(TRIGGER_PREAMBLE_AT):]
parsed_trigger_frequency = trigger_frequency.replace(' ', '').replace(',', ' ').split(' ')
timer_klass = EventClock
elif trigger_frequency.startswith(TRIGGER_PREAMBLE_EVERY):
# RepeatTimer block
trigger_frequency = trigger_frequency[len(TRIGGER_PREAMBLE_EVERY):]
parsed_trigger_frequency = int(trigger_frequency)
timer_klass = RepeatTimer
else:
raise ValueError(f'Unknown time trigger format {trigger_frequency}')
return parsed_trigger_frequency, timer_klass
def format_time_trigger_string(timer_instance):
"""
:param timer_instance: either instance of RepeatTimer or EventClock
:return: human-readable and editable string in one of two formats:
- 'at Day_of_Week-HH:MM, ..., Day_of_Week-HH:MM'
- 'every NNN'
"""
if isinstance(timer_instance, RepeatTimer):
return TRIGGER_PREAMBLE_EVERY + str(timer_instance.interval_new)
elif isinstance(timer_instance, EventClock):
timestamps = [repr(x) for x in timer_instance.timestamps]
return TRIGGER_PREAMBLE_AT + ','.join(timestamps)
else:
raise ValueError(f'Unknown timer instance type {timer_instance.__class__.__name__}')
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/system/time_trigger_factory.py",
"copies": "1",
"size": "1959",
"license": "bsd-3-clause",
"hash": 1994034447736779000,
"line_mean": 38.9795918367,
"line_max": 98,
"alpha_frac": 0.6865747831,
"autogenerated": false,
"ratio": 3.6823308270676693,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4868905610167669,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from synergy.system.event_clock import format_time_trigger_string
from synergy.mx.rest_model import *
def get_next_run_in(thread_handler):
if not thread_handler.is_alive:
return 'NA'
next_run = thread_handler.next_run_in()
return str(next_run).split('.')[0]
def get_next_timeperiod(timetable, process_name):
if timetable.get_tree(process_name) is None:
return 'NA'
else:
job_record = timetable.get_next_job_record(process_name)
return job_record.timeperiod
def get_dependant_trees(timetable, tree_obj):
trees = timetable._find_dependant_trees(tree_obj)
return [x.tree_name for x in trees]
def get_reprocessing_queue(timetable, process_name):
resp = []
per_process = timetable.reprocess.get(process_name)
if per_process is not None:
resp = sorted(per_process.keys())
return resp
def create_rest_managed_scheduler_entry(thread_handler, timetable):
process_entry = thread_handler.process_entry
process_name = process_entry.process_name
rest_model = RestManagedSchedulerEntry(
is_on=process_entry.is_on,
is_alive=thread_handler.is_alive,
process_name=process_name,
trigger_frequency=format_time_trigger_string(thread_handler.timer_instance),
next_run_in=get_next_run_in(thread_handler),
next_timeperiod=get_next_timeperiod(timetable, process_name),
time_qualifier=process_entry.time_qualifier,
state_machine_name=process_entry.state_machine_name,
process_type=process_entry.process_type,
blocking_type=process_entry.blocking_type,
run_on_active_timeperiod=process_entry.run_on_active_timeperiod,
reprocessing_queue=get_reprocessing_queue(timetable, process_name),
)
return rest_model
def create_rest_freerun_scheduler_entry(thread_handler):
process_name, entry_name = thread_handler.key
rest_model = RestFreerunSchedulerEntry(
is_on=thread_handler.process_entry.is_on,
is_alive=thread_handler.is_alive,
process_name=process_name,
entry_name=entry_name,
trigger_frequency=format_time_trigger_string(thread_handler.timer_instance),
description=thread_handler.process_entry.description,
next_run_in=get_next_run_in(thread_handler),
log=thread_handler.process_entry.log,
arguments=thread_handler.process_entry.arguments
)
return rest_model
def create_rest_timetable_tree(timetable, tree_obj):
rest_tree = RestTimetableTree(tree_name=tree_obj.tree_name,
mx_page=tree_obj.mx_page,
mx_name=tree_obj.mx_name,
dependent_on=[tree.tree_name for tree in tree_obj.dependent_on],
dependant_trees=get_dependant_trees(timetable, tree_obj),
sorted_process_names=[x for x in tree_obj.process_hierarchy])
return rest_tree
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/mx/rest_model_factory.py",
"copies": "1",
"size": "3015",
"license": "bsd-3-clause",
"hash": -2516784991293635000,
"line_mean": 36.6875,
"line_max": 98,
"alpha_frac": 0.6689883914,
"autogenerated": false,
"ratio": 3.5596221959858325,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47286105873858325,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from synergy.system.time_qualifier import *
from tests import base_fixtures
from db.model import raw_data
from constants import COLLECTION_SINGLE_SESSION, COLLECTION_SITE_HOURLY
from synergy.db.manager import ds_manager
from synergy.system.data_logging import get_logger
from tests.ut_context import PROCESS_UNIT_TEST
# pylint: disable=C0301
EXPECTED_SITE_HOURLY_00 = {
'stat': {'screen_resolution': {'(760, 980)': 2, '(240, 360)': 1}, 'language': {u'ua_uk': 1, u'en_ca': 2},
'total_duration': 360, 'number_of_visits': 3, 'number_of_pageviews': 11, 'country': {u'eu': 1, u'ca': 2},
'os': {u'Windows': 1, u'Linux': 2}, 'browser': {u'FF 0': 1, u'FF 2': 1, u'IE 4': 1}},
'timeperiod': '2001030311', 'domain': u'domain_name_22'}
EXPECTED_SITE_HOURLY_01 = {
'stat': {'screen_resolution': {'(760, 980)': 2, '(240, 360)': 1}, 'language': {u'ua_uk': 2, u'en_ca': 1},
'total_duration': 411, 'number_of_visits': 3, 'number_of_pageviews': 9, 'country': {u'eu': 2, u'ca': 1},
'os': {u'Windows': 2, u'Linux': 1}, 'browser': {u'IE 3': 1, u'FF 2': 1, u'IE 5': 1}},
'timeperiod': '2001030311', 'domain': u'domain_name_19'}
EXPECTED_SITE_HOURLY_02 = {
'stat': {'screen_resolution': {'(760, 980)': 2, '(240, 360)': 1}, 'language': {u'ua_uk': 1, u'en_ca': 2},
'total_duration': 380, 'number_of_visits': 3, 'number_of_pageviews': 9, 'country': {u'eu': 1, u'ca': 2},
'os': {u'Windows': 1, u'Linux': 2}, 'browser': {u'FF 0': 1, u'FF 2': 1, u'IE 4': 1}},
'timeperiod': '2001030310', 'domain': u'domain_name_4'}
EXPECTED_SITE_HOURLY_03 = {
'stat': {'screen_resolution': {'(760, 980)': 2, '(240, 360)': 1}, 'language': {u'ua_uk': 1, u'en_ca': 2},
'total_duration': 336, 'number_of_visits': 3, 'number_of_pageviews': 7, 'country': {u'eu': 1, u'ca': 2},
'os': {u'Windows': 1, u'Linux': 2}, 'browser': {u'FF 0': 1, u'FF 2': 1, u'IE 7': 1}},
'timeperiod': '2001030311', 'domain': u'domain_name_20'}
EXPECTED_SITE_HOURLY_04 = {
'stat': {'screen_resolution': {'(760, 980)': 2, '(240, 360)': 1}, 'language': {u'ua_uk': 1, u'en_ca': 2},
'total_duration': 363, 'number_of_visits': 3, 'number_of_pageviews': 12, 'country': {u'eu': 1, u'ca': 2},
'os': {u'Windows': 1, u'Linux': 2}, 'browser': {u'FF 0': 1, u'FF 2': 1, u'IE 7': 1}},
'timeperiod': '2001030310', 'domain': u'domain_name_14'}
EXPECTED_SITE_HOURLY_05 = {
'stat': {'screen_resolution': {'(760, 980)': 1, '(240, 360)': 1, '(360, 480)': 1},
'language': {u'ua_uk': 2, u'en_ca': 1}, 'total_duration': 352,
'number_of_visits': 3, 'number_of_pageviews': 7, 'country': {u'eu': 2, u'ca': 1},
'os': {u'Windows': 2, u'Linux': 1},
'browser': {u'IE 2': 1, u'IE 0': 1, u'FF 2': 1}}, 'timeperiod': '2001030310',
'domain': u'domain_name_3'}
EXPECTED_SITE_HOURLY_06 = {
'stat': {'screen_resolution': {'(760, 980)': 1, '(240, 360)': 1, '(360, 480)': 1},
'language': {u'ua_uk': 1, u'en_ca': 2}, 'total_duration': 409,
'number_of_visits': 3, 'number_of_pageviews': 7, 'country': {u'eu': 1, u'ca': 2},
'os': {u'Windows': 1, u'Linux': 2},
'browser': {u'FF 0': 1, u'FF 2': 1, u'IE 1': 1}}, 'timeperiod': '2001030310',
'domain': u'domain_name_6'}
EXPECTED_SITE_HOURLY_07 = {
'stat': {'screen_resolution': {'(760, 980)': 1, '(240, 360)': 1, '(360, 480)': 1},
'language': {u'ua_uk': 1, u'en_ca': 2}, 'total_duration': 281,
'number_of_visits': 3, 'number_of_pageviews': 10, 'country': {u'eu': 1, u'ca': 2},
'os': {u'Windows': 1, u'Linux': 2},
'browser': {u'FF 0': 1, u'FF 2': 1, u'IE 1': 1}}, 'timeperiod': '2001030311',
'domain': u'domain_name_18'}
EXPECTED_SITE_HOURLY_08 = {
'stat': {'screen_resolution': {'(760, 980)': 1, '(240, 360)': 1, '(360, 480)': 1},
'language': {u'ua_uk': 1, u'en_ca': 2}, 'total_duration': 367,
'number_of_visits': 3, 'number_of_pageviews': 8, 'country': {u'eu': 1, u'ca': 2},
'os': {u'Windows': 1, u'Linux': 2},
'browser': {u'FF 0': 1, u'FF 2': 1, u'IE 7': 1}}, 'timeperiod': '2001030311',
'domain': u'domain_name_26'}
EXPECTED_SITE_HOURLY_09 = {
'stat': {'screen_resolution': {'(760, 980)': 1, '(240, 360)': 1, '(360, 480)': 1},
'language': {u'ua_uk': 2, u'en_ca': 1}, 'total_duration': 447,
'number_of_visits': 3, 'number_of_pageviews': 7, 'country': {u'eu': 2, u'ca': 1},
'os': {u'Windows': 2, u'Linux': 1},
'browser': {u'FF 0': 1, u'IE 2': 1, u'IE 0': 1}}, 'timeperiod': '2001030311',
'domain': u'domain_name_21'}
EXPECTED_SITE_HOURLY_10 = {
'stat': {'screen_resolution': {'(760, 980)': 2, '(240, 360)': 1}, 'language': {u'ua_uk': 1, u'en_ca': 2},
'total_duration': 445, 'number_of_visits': 3, 'number_of_pageviews': 8, 'country': {u'eu': 1, u'ca': 2},
'os': {u'Windows': 1, u'Linux': 2}, 'browser': {u'FF 0': 1, u'FF 2': 1, u'IE 1': 1}},
'timeperiod': '2001030310', 'domain': u'domain_name_12'}
EXPECTED_SITE_HOURLY_11 = {
'stat': {'screen_resolution': {'(760, 980)': 2, '(240, 360)': 1}, 'language': {u'ua_uk': 2, u'en_ca': 1},
'total_duration': 281, 'number_of_visits': 3, 'number_of_pageviews': 11, 'country': {u'eu': 2, u'ca': 1},
'os': {u'Windows': 2, u'Linux': 1}, 'browser': {u'FF 0': 1, u'IE 8': 1, u'IE 6': 1}},
'timeperiod': '2001030310', 'domain': u'domain_name_5'}
EXPECTED_SITE_HOURLY_12 = {
'stat': {'screen_resolution': {'(760, 980)': 1, '(240, 360)': 1, '(360, 480)': 1},
'language': {u'ua_uk': 1, u'en_ca': 2}, 'total_duration': 387,
'number_of_visits': 3, 'number_of_pageviews': 12, 'country': {u'eu': 1, u'ca': 2},
'os': {u'Windows': 1, u'Linux': 2},
'browser': {u'FF 0': 1, u'FF 2': 1, u'IE 7': 1}}, 'timeperiod': '2001030310',
'domain': u'domain_name_8'}
EXPECTED_SITE_HOURLY_13 = {
'stat': {'screen_resolution': {'(760, 980)': 2, '(240, 360)': 1}, 'language': {u'ua_uk': 2, u'en_ca': 1},
'total_duration': 300, 'number_of_visits': 3, 'number_of_pageviews': 10, 'country': {u'eu': 2, u'ca': 1},
'os': {u'Windows': 2, u'Linux': 1}, 'browser': {u'FF 0': 1, u'IE 2': 1, u'IE 0': 1}},
'timeperiod': '2001030310', 'domain': u'domain_name_9'}
EXPECTED_SITE_HOURLY_14 = {
'stat': {'screen_resolution': {'(760, 980)': 2, '(240, 360)': 1}, 'language': {u'ua_uk': 2, u'en_ca': 1},
'total_duration': 314, 'number_of_visits': 3, 'number_of_pageviews': 12, 'country': {u'eu': 2, u'ca': 1},
'os': {u'Windows': 2, u'Linux': 1}, 'browser': {u'IE 2': 1, u'IE 0': 1, u'FF 2': 1}},
'timeperiod': '2001030310', 'domain': u'domain_name_15'}
EXPECTED_SITE_HOURLY_15 = {
'stat': {'screen_resolution': {'(760, 980)': 2, '(240, 360)': 1}, 'language': {u'ua_uk': 1, u'en_ca': 2},
'total_duration': 65, 'number_of_visits': 3, 'number_of_pageviews': 12, 'country': {u'eu': 1, u'ca': 2},
'os': {u'Windows': 1, u'Linux': 2}, 'browser': {u'FF 0': 1, u'FF 2': 1, u'IE 4': 1}},
'timeperiod': '2001030310', 'domain': u'domain_name_10'}
EXPECTED_SITE_HOURLY_16 = {
'stat': {'screen_resolution': {'(760, 980)': 2, '(240, 360)': 1}, 'language': {u'ua_uk': 2, u'en_ca': 1},
'total_duration': 120, 'number_of_visits': 3, 'number_of_pageviews': 13, 'country': {u'eu': 2, u'ca': 1},
'os': {u'Windows': 2, u'Linux': 1}, 'browser': {u'IE 3': 1, u'FF 2': 1, u'IE 5': 1}},
'timeperiod': '2001030310', 'domain': u'domain_name_7'}
EXPECTED_SITE_HOURLY_17 = {
'stat': {'screen_resolution': {'(760, 980)': 2, '(240, 360)': 1}, 'language': {u'ua_uk': 2, u'en_ca': 1},
'total_duration': 479, 'number_of_visits': 3, 'number_of_pageviews': 9, 'country': {u'eu': 2, u'ca': 1},
'os': {u'Windows': 2, u'Linux': 1}, 'browser': {u'IE 2': 1, u'IE 0': 1, u'FF 2': 1}},
'timeperiod': '2001030311', 'domain': u'domain_name_27'}
EXPECTED_SITE_HOURLY_18 = {
'stat': {'screen_resolution': {'(760, 980)': 1, '(240, 360)': 1, '(360, 480)': 1},
'language': {u'ua_uk': 2, u'en_ca': 1}, 'total_duration': 437,
'number_of_visits': 3, 'number_of_pageviews': 10, 'country': {u'eu': 2, u'ca': 1},
'os': {u'Windows': 2, u'Linux': 1},
'browser': {u'IE 3': 1, u'FF 2': 1, u'IE 5': 1}}, 'timeperiod': '2001030311',
'domain': u'domain_name_31'}
EXPECTED_SITE_HOURLY_19 = {
'stat': {'screen_resolution': {'(760, 980)': 2, '(240, 360)': 1}, 'language': {u'ua_uk': 1, u'en_ca': 2},
'total_duration': 379, 'number_of_visits': 3, 'number_of_pageviews': 10, 'country': {u'eu': 1, u'ca': 2},
'os': {u'Windows': 1, u'Linux': 2}, 'browser': {u'FF 0': 1, u'FF 2': 1, u'IE 1': 1}},
'timeperiod': '2001030310', 'domain': u'domain_name_0'}
EXPECTED_SITE_HOURLY_20 = {
'stat': {'screen_resolution': {'(760, 980)': 1, '(240, 360)': 1, '(360, 480)': 1},
'language': {u'ua_uk': 2, u'en_ca': 1}, 'total_duration': 348,
'number_of_visits': 3, 'number_of_pageviews': 8, 'country': {u'eu': 2, u'ca': 1},
'os': {u'Windows': 2, u'Linux': 1},
'browser': {u'FF 0': 1, u'IE 3': 1, u'IE 5': 1}}, 'timeperiod': '2001030310',
'domain': u'domain_name_13'}
EXPECTED_SITE_HOURLY_21 = {
'stat': {'screen_resolution': {'(240, 360)': 1, '(360, 480)': 1}, 'language': {u'ua_uk': 1, u'en_ca': 1},
'total_duration': 220, 'number_of_visits': 2, 'number_of_pageviews': 6, 'country': {u'eu': 1, u'ca': 1},
'os': {u'Windows': 1, u'Linux': 1}, 'browser': {u'FF 0': 1, u'IE 0': 1}}, 'timeperiod': '2001030311',
'domain': u'domain_name_33'}
EXPECTED_SITE_HOURLY_22 = {
'stat': {'screen_resolution': {'(760, 980)': 2, '(240, 360)': 1}, 'language': {u'ua_uk': 1, u'en_ca': 2},
'total_duration': 428, 'number_of_visits': 3, 'number_of_pageviews': 7, 'country': {u'eu': 1, u'ca': 2},
'os': {u'Windows': 1, u'Linux': 2}, 'browser': {u'FF 0': 1, u'FF 2': 1, u'IE 1': 1}},
'timeperiod': '2001030311', 'domain': u'domain_name_30'}
EXPECTED_SITE_HOURLY_23 = {
'stat': {'screen_resolution': {'(760, 980)': 2, '(240, 360)': 1}, 'language': {u'ua_uk': 1, u'en_ca': 2},
'total_duration': 95, 'number_of_visits': 3, 'number_of_pageviews': 10, 'country': {u'eu': 1, u'ca': 2},
'os': {u'Windows': 1, u'Linux': 2}, 'browser': {u'FF 0': 1, u'FF 2': 1, u'IE 1': 1}},
'timeperiod': '2001030311', 'domain': u'domain_name_24'}
EXPECTED_SITE_HOURLY_24 = {
'stat': {'screen_resolution': {'(760, 980)': 1, '(240, 360)': 1, '(360, 480)': 1},
'language': {u'ua_uk': 2, u'en_ca': 1}, 'total_duration': 181,
'number_of_visits': 3, 'number_of_pageviews': 10, 'country': {u'eu': 2, u'ca': 1},
'os': {u'Windows': 2, u'Linux': 1},
'browser': {u'FF 2': 1, u'IE 6': 1, u'IE 8': 1}}, 'timeperiod': '2001030310',
'domain': u'domain_name_11'}
EXPECTED_SITE_HOURLY_25 = {
'stat': {'screen_resolution': {'(760, 980)': 1, '(240, 360)': 1, '(360, 480)': 1},
'language': {u'ua_uk': 1, u'en_ca': 2}, 'total_duration': 311,
'number_of_visits': 3, 'number_of_pageviews': 7, 'country': {u'eu': 1, u'ca': 2},
'os': {u'Windows': 1, u'Linux': 2},
'browser': {u'FF 0': 1, u'FF 2': 1, u'IE 4': 1}}, 'timeperiod': '2001030310',
'domain': u'domain_name_16'}
EXPECTED_SITE_HOURLY_26 = {
'stat': {'screen_resolution': {'(760, 980)': 2, '(240, 360)': 1}, 'language': {u'ua_uk': 1, u'en_ca': 2},
'total_duration': 282, 'number_of_visits': 3, 'number_of_pageviews': 11, 'country': {u'eu': 1, u'ca': 2},
'os': {u'Windows': 1, u'Linux': 2}, 'browser': {u'FF 0': 1, u'FF 2': 1, u'IE 7': 1}},
'timeperiod': '2001030311', 'domain': u'domain_name_32'}
EXPECTED_SITE_HOURLY_27 = {
'stat': {'screen_resolution': {'(760, 980)': 1, '(240, 360)': 1, '(360, 480)': 1},
'language': {u'ua_uk': 2, u'en_ca': 1}, 'total_duration': 264,
'number_of_visits': 3, 'number_of_pageviews': 6, 'country': {u'eu': 2, u'ca': 1},
'os': {u'Windows': 2, u'Linux': 1},
'browser': {u'FF 0': 1, u'IE 3': 1, u'IE 5': 1}}, 'timeperiod': '2001030310',
'domain': u'domain_name_1'}
EXPECTED_SITE_HOURLY_28 = {
'stat': {'screen_resolution': {'(760, 980)': 1, '(240, 360)': 1, '(360, 480)': 1},
'language': {u'ua_uk': 1, u'en_ca': 2}, 'total_duration': 77, 'number_of_visits': 3,
'number_of_pageviews': 9, 'country': {u'eu': 1, u'ca': 2},
'os': {u'Windows': 1, u'Linux': 2},
'browser': {u'FF 0': 1, u'FF 2': 1, u'IE 4': 1}}, 'timeperiod': '2001030311',
'domain': u'domain_name_28'}
EXPECTED_SITE_HOURLY_29 = {
'stat': {'screen_resolution': {'(760, 980)': 2, '(240, 360)': 1}, 'language': {u'ua_uk': 2, u'en_ca': 1},
'total_duration': 21, 'number_of_visits': 3, 'number_of_pageviews': 4, 'country': {u'eu': 2, u'ca': 1},
'os': {u'Windows': 2, u'Linux': 1}, 'browser': {u'FF 0': 1, u'IE 8': 1, u'IE 6': 1}},
'timeperiod': '2001030311', 'domain': u'domain_name_17'}
EXPECTED_SITE_HOURLY_30 = {
'stat': {'screen_resolution': {'(760, 980)': 2, '(240, 360)': 1}, 'language': {u'ua_uk': 2, u'en_ca': 1},
'total_duration': 420, 'number_of_visits': 3, 'number_of_pageviews': 10, 'country': {u'eu': 2, u'ca': 1},
'os': {u'Windows': 2, u'Linux': 1}, 'browser': {u'FF 0': 1, u'IE 3': 1, u'IE 5': 1}},
'timeperiod': '2001030311', 'domain': u'domain_name_25'}
EXPECTED_SITE_HOURLY_31 = {
'stat': {'screen_resolution': {'(760, 980)': 2, '(240, 360)': 1}, 'language': {u'ua_uk': 1, u'en_ca': 2},
'total_duration': 181, 'number_of_visits': 3, 'number_of_pageviews': 7, 'country': {u'eu': 1, u'ca': 2},
'os': {u'Windows': 1, u'Linux': 2}, 'browser': {u'FF 0': 1, u'FF 2': 1, u'IE 7': 1}},
'timeperiod': '2001030310', 'domain': u'domain_name_2'}
EXPECTED_SITE_HOURLY_32 = {
'stat': {'screen_resolution': {'(760, 980)': 1, '(240, 360)': 1, '(360, 480)': 1},
'language': {u'ua_uk': 2, u'en_ca': 1}, 'total_duration': 237,
'number_of_visits': 3, 'number_of_pageviews': 10, 'country': {u'eu': 2, u'ca': 1},
'os': {u'Windows': 2, u'Linux': 1},
'browser': {u'FF 2': 1, u'IE 6': 1, u'IE 8': 1}}, 'timeperiod': '2001030311',
'domain': u'domain_name_23'}
EXPECTED_SITE_HOURLY_33 = {
'stat': {'screen_resolution': {'(760, 980)': 2, '(240, 360)': 1}, 'language': {u'ua_uk': 2, u'en_ca': 1},
'total_duration': 227, 'number_of_visits': 3, 'number_of_pageviews': 9, 'country': {u'eu': 2, u'ca': 1},
'os': {u'Windows': 2, u'Linux': 1}, 'browser': {u'FF 0': 1, u'IE 8': 1, u'IE 6': 1}},
'timeperiod': '2001030311', 'domain': u'domain_name_29'}
# pylint: enable=C0301
def generate_session_composite_key(index, total):
h1 = '20010303101010'
h2 = '20010303111111'
if index <= total / 2:
return 'domain_name_%s' % str(index // 3), h1
else:
return 'domain_name_%s' % str(index // 3), h2
def clean_session_entries():
logger = get_logger(PROCESS_UNIT_TEST)
ds = ds_manager.ds_factory(logger)
connection = ds.connection(COLLECTION_SINGLE_SESSION)
for i in range(base_fixtures.TOTAL_ENTRIES):
key = generate_session_composite_key(i, base_fixtures.TOTAL_ENTRIES)
connection.remove({
raw_data.DOMAIN_NAME: key[0],
raw_data.TIMEPERIOD: key[1],
raw_data.FAMILY_USER_PROFILE + '.' + raw_data.SESSION_ID: 'session_id_%s' % str(i)})
def generated_session_entries():
return base_fixtures.create_session_stats(generate_session_composite_key)
def clean_site_entries():
return base_fixtures.clean_site_entries(COLLECTION_SITE_HOURLY, QUALIFIER_HOURLY)
def generated_site_entries():
return base_fixtures.create_site_stats(COLLECTION_SITE_HOURLY, QUALIFIER_HOURLY)
if __name__ == '__main__':
pass
| {
"repo_name": "eggsandbeer/scheduler",
"path": "tests/hourly_fixtures.py",
"copies": "1",
"size": "16264",
"license": "bsd-3-clause",
"hash": -2790300917594604000,
"line_mean": 57.0857142857,
"line_max": 118,
"alpha_frac": 0.5251475652,
"autogenerated": false,
"ratio": 2.6030729833546733,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.36282205485546737,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from synergy.system.time_trigger_factory import format_time_trigger_string
from synergy.mx.rest_model import *
def get_next_run_in(thread_handler):
if not thread_handler.is_alive:
return 'NA'
next_run = thread_handler.next_run_in()
return str(next_run).split('.')[0]
def get_next_timeperiod(timetable, process_name):
if timetable.get_tree(process_name) is None:
return 'NA'
else:
job_record = timetable.get_next_job_record(process_name)
return job_record.timeperiod
def get_dependant_trees(timetable, tree_obj):
trees = timetable._find_dependant_trees(tree_obj)
return [x.tree_name for x in trees]
def get_reprocessing_queue(gc, process_name):
per_process = gc.reprocess_uows[process_name]
q = []
for priority_entry in sorted(per_process.queue):
q.append(priority_entry.entry.timeperiod)
return q
def create_rest_managed_scheduler_entry(thread_handler, timetable, gc):
process_entry = thread_handler.process_entry
process_name = process_entry.process_name
rest_model = RestManagedSchedulerEntry(
is_on=process_entry.is_on,
is_alive=thread_handler.is_alive,
process_name=process_name,
trigger_frequency=format_time_trigger_string(thread_handler.timer_instance),
next_run_in=get_next_run_in(thread_handler),
next_timeperiod=get_next_timeperiod(timetable, process_name),
time_qualifier=process_entry.time_qualifier,
time_grouping=process_entry.time_grouping,
state_machine_name=process_entry.state_machine_name,
blocking_type=process_entry.blocking_type,
reprocessing_queue=get_reprocessing_queue(gc, process_name),
)
return rest_model
def create_rest_freerun_scheduler_entry(thread_handler):
process_name, entry_name = thread_handler.key
rest_model = RestFreerunSchedulerEntry(
is_on=thread_handler.process_entry.is_on,
is_alive=thread_handler.is_alive,
process_name=process_name,
entry_name=entry_name,
trigger_frequency=format_time_trigger_string(thread_handler.timer_instance),
description=thread_handler.process_entry.description,
next_run_in=get_next_run_in(thread_handler),
event_log=thread_handler.process_entry.event_log,
arguments=thread_handler.process_entry.arguments
)
return rest_model
def create_rest_timetable_tree(timetable, tree_obj):
rest_tree = RestTimetableTree(tree_name=tree_obj.tree_name,
mx_page=tree_obj.mx_page,
mx_name=tree_obj.mx_name,
dependent_on=[tree.tree_name for tree in tree_obj.dependent_on],
dependant_trees=get_dependant_trees(timetable, tree_obj),
sorted_process_names=[x for x in tree_obj.process_hierarchy])
return rest_tree
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/mx/rest_model_factory.py",
"copies": "1",
"size": "2972",
"license": "bsd-3-clause",
"hash": -6505588013078621000,
"line_mean": 36.6202531646,
"line_max": 98,
"alpha_frac": 0.668909825,
"autogenerated": false,
"ratio": 3.5465393794749405,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9711973466895841,
"avg_score": 0.0006951475158199235,
"num_lines": 79
} |
__author__ = 'Bohdan Mushkevych'
from synergy.system.utils import tail_file
from synergy.system.system_logger import get_log_filename
from synergy.mx.base_request_handler import BaseRequestHandler, valid_action_request
from synergy.scheduler.scheduler_constants import PROCESS_GC
class GcActionHandler(BaseRequestHandler):
""" Garbage Collector UI action handler """
def __init__(self, request, **values):
super(GcActionHandler, self).__init__(request, **values)
self.process_name = self.request_arguments.get('process_name')
self.is_request_valid = True if self.process_name else False
if self.is_request_valid:
self.process_name = self.process_name.strip()
def refresh(self):
self.scheduler.gc.validate()
self.scheduler.gc.scan_uow_candidates()
self.logger.info('MX: performed GC Refresh')
return self.reply_ok()
def flush_all(self):
self.scheduler.gc.flush(ignore_priority=True)
self.logger.info('MX: performed GC Flush All')
return self.reply_ok()
@valid_action_request
def flush_one(self):
self.scheduler.gc.flush_one(process_name=self.process_name, ignore_priority=True)
self.logger.info(f'MX: performed GC Flush for {self.process_name}')
return self.reply_ok()
def tail_gc_log(self):
fqfn = get_log_filename(PROCESS_GC)
return tail_file(fqfn)
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/mx/gc_action_handler.py",
"copies": "1",
"size": "1427",
"license": "bsd-3-clause",
"hash": 5553363155366483000,
"line_mean": 35.5897435897,
"line_max": 89,
"alpha_frac": 0.681850035,
"autogenerated": false,
"ratio": 3.6126582278481014,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9791575465797657,
"avg_score": 0.0005865594100888219,
"num_lines": 39
} |
__author__ = 'Bohdan Mushkevych'
from tests import base_fixtures
from constants import COLLECTION_SITE_MONTHLY
from synergy.system.time_qualifier import QUALIFIER_MONTHLY
# pylint: disable=C0301
EXPECTED_SITE_MONTHLY_00 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 168,
'number_of_visits': 1856, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 120, u'os_2': 120, u'os_1': 120, u'os_0': 120, u'os_4': 120},
'browser': {u'browser_4': 120, u'browser_0': 120, u'browser_1': 120, u'browser_2': 120, u'browser_3': 120}},
'domain': u'domain_name_7', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_01 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 22,
'number_of_visits': 834, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 138, u'os_2': 138, u'os_1': 138, u'os_0': 138, u'os_4': 138},
'browser': {u'browser_4': 138, u'browser_0': 138, u'browser_1': 138, u'browser_2': 138, u'browser_3': 138}},
'domain': u'domain_name_13', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_02 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 119,
'number_of_visits': 1807, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 177, u'os_2': 177, u'os_1': 177, u'os_0': 177, u'os_4': 177},
'browser': {u'browser_4': 177, u'browser_0': 177, u'browser_1': 177, u'browser_2': 177, u'browser_3': 177}},
'domain': u'domain_name_26', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_03 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 241,
'number_of_visits': 1617, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 108, u'os_2': 108, u'os_1': 108, u'os_0': 108, u'os_4': 108},
'browser': {u'browser_4': 108, u'browser_0': 108, u'browser_1': 108, u'browser_2': 108, u'browser_3': 108}},
'domain': u'domain_name_3', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_04 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 165,
'number_of_visits': 887, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 150, u'os_2': 150, u'os_1': 150, u'os_0': 150, u'os_4': 150},
'browser': {u'browser_4': 150, u'browser_0': 150, u'browser_1': 150, u'browser_2': 150, u'browser_3': 150}},
'domain': u'domain_name_17', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_05 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 165,
'number_of_visits': 1812, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 195, u'os_2': 195, u'os_1': 195, u'os_0': 195, u'os_4': 195},
'browser': {u'browser_4': 195, u'browser_0': 195, u'browser_1': 195, u'browser_2': 195, u'browser_3': 195}},
'domain': u'domain_name_32', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_06 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 222,
'number_of_visits': 1247, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 165, u'os_2': 165, u'os_1': 165, u'os_0': 165, u'os_4': 165},
'browser': {u'browser_4': 165, u'browser_0': 165, u'browser_1': 165, u'browser_2': 165, u'browser_3': 165}},
'domain': u'domain_name_22', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_07 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 190,
'number_of_visits': 1312, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 156, u'os_2': 156, u'os_1': 156, u'os_0': 156, u'os_4': 156},
'browser': {u'browser_4': 156, u'browser_0': 156, u'browser_1': 156, u'browser_2': 156, u'browser_3': 156}},
'domain': u'domain_name_19', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_08 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 152,
'number_of_visits': 2234, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 186, u'os_2': 186, u'os_1': 186, u'os_0': 186, u'os_4': 186},
'browser': {u'browser_4': 186, u'browser_0': 186, u'browser_1': 186, u'browser_2': 186, u'browser_3': 186}},
'domain': u'domain_name_29', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_09 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 154,
'number_of_visits': 2550, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 147, u'os_2': 147, u'os_1': 147, u'os_0': 147, u'os_4': 147},
'browser': {u'browser_4': 147, u'browser_0': 147, u'browser_1': 147, u'browser_2': 147, u'browser_3': 147}},
'domain': u'domain_name_16', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_10 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 133,
'number_of_visits': 1896, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 114, u'os_2': 114, u'os_1': 114, u'os_0': 114, u'os_4': 114},
'browser': {u'browser_4': 114, u'browser_0': 114, u'browser_1': 114, u'browser_2': 114, u'browser_3': 114}},
'domain': u'domain_name_5', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_11 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 202,
'number_of_visits': 1446, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 168, u'os_2': 168, u'os_1': 168, u'os_0': 168, u'os_4': 168},
'browser': {u'browser_4': 168, u'browser_0': 168, u'browser_1': 168, u'browser_2': 168, u'browser_3': 168}},
'domain': u'domain_name_23', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_12 = {
'stat': {'screen_resolution': {u'(320, 240)': 12, u'(640, 480)': 20, u'(1024, 960)': 28, u'(1280, 768)': 36},
'language': {u'ua_uk': 28, u'ca_en': 12, u'ca_fr': 20, u'us_en': 36}, 'total_duration': 184,
'number_of_visits': 2102, 'number_of_pageviews': 0, 'country': {u'ca': 12, u'fr': 20, u'uk': 28, u'us': 36},
'os': {u'os_3': 202, u'os_2': 202, u'os_1': 202, u'os_0': 202, u'os_4': 202},
'browser': {u'browser_4': 202, u'browser_0': 202, u'browser_1': 202, u'browser_2': 202, u'browser_3': 202}},
'domain': u'domain_name_1', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_13 = {
'stat': {'screen_resolution': {u'(320, 240)': 12, u'(640, 480)': 20, u'(1024, 960)': 28, u'(1280, 768)': 36},
'language': {u'ua_uk': 28, u'ca_en': 12, u'ca_fr': 20, u'us_en': 36}, 'total_duration': 234,
'number_of_visits': 1634, 'number_of_pageviews': 0, 'country': {u'ca': 12, u'fr': 20, u'uk': 28, u'us': 36},
'os': {u'os_3': 198, u'os_2': 198, u'os_1': 198, u'os_0': 198, u'os_4': 198},
'browser': {u'browser_4': 198, u'browser_0': 198, u'browser_1': 198, u'browser_2': 198, u'browser_3': 198}},
'domain': u'domain_name_0', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_14 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 236,
'number_of_visits': 1722, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 183, u'os_2': 183, u'os_1': 183, u'os_0': 183, u'os_4': 183},
'browser': {u'browser_4': 183, u'browser_0': 183, u'browser_1': 183, u'browser_2': 183, u'browser_3': 183}},
'domain': u'domain_name_28', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_15 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 78,
'number_of_visits': 666, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 129, u'os_2': 129, u'os_1': 129, u'os_0': 129, u'os_4': 129},
'browser': {u'browser_4': 129, u'browser_0': 129, u'browser_1': 129, u'browser_2': 129, u'browser_3': 129}},
'domain': u'domain_name_10', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_16 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 132,
'number_of_visits': 1660, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 174, u'os_2': 174, u'os_1': 174, u'os_0': 174, u'os_4': 174},
'browser': {u'browser_4': 174, u'browser_0': 174, u'browser_1': 174, u'browser_2': 174, u'browser_3': 174}},
'domain': u'domain_name_25', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_17 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 159,
'number_of_visits': 1053, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 141, u'os_2': 141, u'os_1': 141, u'os_0': 141, u'os_4': 141},
'browser': {u'browser_4': 141, u'browser_0': 141, u'browser_1': 141, u'browser_2': 141, u'browser_3': 141}},
'domain': u'domain_name_14', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_18 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 214,
'number_of_visits': 1927, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 162, u'os_2': 162, u'os_1': 162, u'os_0': 162, u'os_4': 162},
'browser': {u'browser_4': 162, u'browser_0': 162, u'browser_1': 162, u'browser_2': 162, u'browser_3': 162}},
'domain': u'domain_name_21', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_19 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 132,
'number_of_visits': 1719, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 192, u'os_2': 192, u'os_1': 192, u'os_0': 192, u'os_4': 192},
'browser': {u'browser_4': 192, u'browser_0': 192, u'browser_1': 192, u'browser_2': 192, u'browser_3': 192}},
'domain': u'domain_name_31', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_20 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 121,
'number_of_visits': 1258, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 117, u'os_2': 117, u'os_1': 117, u'os_0': 117, u'os_4': 117},
'browser': {u'browser_4': 117, u'browser_0': 117, u'browser_1': 117, u'browser_2': 117, u'browser_3': 117}},
'domain': u'domain_name_6', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_21 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 140,
'number_of_visits': 486, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 135, u'os_2': 135, u'os_1': 135, u'os_0': 135, u'os_4': 135},
'browser': {u'browser_4': 135, u'browser_0': 135, u'browser_1': 135, u'browser_2': 135, u'browser_3': 135}},
'domain': u'domain_name_12', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_22 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 158,
'number_of_visits': 1061, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 132, u'os_2': 132, u'os_1': 132, u'os_0': 132, u'os_4': 132},
'browser': {u'browser_4': 132, u'browser_0': 132, u'browser_1': 132, u'browser_2': 132, u'browser_3': 132}},
'domain': u'domain_name_11', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_23 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 111,
'number_of_visits': 2467, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 189, u'os_2': 189, u'os_1': 189, u'os_0': 189, u'os_4': 189},
'browser': {u'browser_4': 189, u'browser_0': 189, u'browser_1': 189, u'browser_2': 189, u'browser_3': 189}},
'domain': u'domain_name_30', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_24 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 81,
'number_of_visits': 1595, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 126, u'os_2': 126, u'os_1': 126, u'os_0': 126, u'os_4': 126},
'browser': {u'browser_4': 126, u'browser_0': 126, u'browser_1': 126, u'browser_2': 126, u'browser_3': 126}},
'domain': u'domain_name_9', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_25 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 17,
'number_of_visits': 1404, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 180, u'os_2': 180, u'os_1': 180, u'os_0': 180, u'os_4': 180},
'browser': {u'browser_4': 180, u'browser_0': 180, u'browser_1': 180, u'browser_2': 180, u'browser_3': 180}},
'domain': u'domain_name_27', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_26 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 174,
'number_of_visits': 1993, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 171, u'os_2': 171, u'os_1': 171, u'os_0': 171, u'os_4': 171},
'browser': {u'browser_4': 171, u'browser_0': 171, u'browser_1': 171, u'browser_2': 171, u'browser_3': 171}},
'domain': u'domain_name_24', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_27 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 222,
'number_of_visits': 1924, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 105, u'os_2': 105, u'os_1': 105, u'os_0': 105, u'os_4': 105},
'browser': {u'browser_4': 105, u'browser_0': 105, u'browser_1': 105, u'browser_2': 105, u'browser_3': 105}},
'domain': u'domain_name_2', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_28 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 211,
'number_of_visits': 1796, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 144, u'os_2': 144, u'os_1': 144, u'os_0': 144, u'os_4': 144},
'browser': {u'browser_4': 144, u'browser_0': 144, u'browser_1': 144, u'browser_2': 144, u'browser_3': 144}},
'domain': u'domain_name_15', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_29 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 126,
'number_of_visits': 1424, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 123, u'os_2': 123, u'os_1': 123, u'os_0': 123, u'os_4': 123},
'browser': {u'browser_4': 123, u'browser_0': 123, u'browser_1': 123, u'browser_2': 123, u'browser_3': 123}},
'domain': u'domain_name_8', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_30 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 259,
'number_of_visits': 1700, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 159, u'os_2': 159, u'os_1': 159, u'os_0': 159, u'os_4': 159},
'browser': {u'browser_4': 159, u'browser_0': 159, u'browser_1': 159, u'browser_2': 159, u'browser_3': 159}},
'domain': u'domain_name_20', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_31 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 101,
'number_of_visits': 792, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 153, u'os_2': 153, u'os_1': 153, u'os_0': 153, u'os_4': 153},
'browser': {u'browser_4': 153, u'browser_0': 153, u'browser_1': 153, u'browser_2': 153, u'browser_3': 153}},
'domain': u'domain_name_18', 'timeperiod': '2001030000'}
EXPECTED_SITE_MONTHLY_32 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 168,
'number_of_visits': 1307, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 111, u'os_2': 111, u'os_1': 111, u'os_0': 111, u'os_4': 111},
'browser': {u'browser_4': 111, u'browser_0': 111, u'browser_1': 111, u'browser_2': 111, u'browser_3': 111}},
'domain': u'domain_name_4', 'timeperiod': '2001030000'}
# pylint: enable=C0301
def generated_site_entries():
return base_fixtures.create_site_stats(COLLECTION_SITE_MONTHLY, QUALIFIER_MONTHLY)
def clean_site_entries():
return base_fixtures.clean_site_entries(COLLECTION_SITE_MONTHLY, QUALIFIER_MONTHLY)
if __name__ == '__main__':
pass | {
"repo_name": "eggsandbeer/scheduler",
"path": "tests/monthly_fixtures.py",
"copies": "1",
"size": "20874",
"license": "bsd-3-clause",
"hash": -5844524425949326000,
"line_mean": 81.8373015873,
"line_max": 117,
"alpha_frac": 0.5452716298,
"autogenerated": false,
"ratio": 2.3377757867622355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8331394840645978,
"avg_score": 0.010330515183251381,
"num_lines": 252
} |
__author__ = 'Bohdan Mushkevych'
from threading import Lock
from datetime import datetime, timedelta
from synergy.conf import settings
from synergy.mq.flopsy import PublishersPool
from synergy.system.decorator import thread_safe
from synergy.scheduler.scheduler_constants import QUEUE_UOW_REPORT
from synergy.workers.abstract_mq_worker import AbstractMqWorker
from synergy.db.model import unit_of_work
from synergy.db.model.synergy_mq_transmission import SynergyMqTransmission
from synergy.db.dao.unit_of_work_dao import UnitOfWorkDao
from synergy.db.model.managed_process_entry import ManagedProcessEntry
from synergy.db.dao.managed_process_dao import ManagedProcessDao
LIFE_SUPPORT_HOURS = 48 # number of hours from UOW creation time to keep UOW re-posting to MQ
REPOST_AFTER_HOURS = 1 # number of hours, GC waits for the worker to pick up the UOW from MQ before re-posting
class GarbageCollectorWorker(AbstractMqWorker):
""" GC is triggered by an empty message from RabbitMQ. It scans for invalid or stalled unit_of_work
and re-triggers them. GC is vital for the health of the system.
Deployment with no running GC is considered invalid """
def __init__(self, process_name):
super(GarbageCollectorWorker, self).__init__(process_name)
self.lock = Lock()
self.publishers = PublishersPool(self.logger)
self.uow_dao = UnitOfWorkDao(self.logger)
self.managed_dao = ManagedProcessDao(self.logger)
self.managed_entries = dict()
def __del__(self):
try:
self.logger.info('Closing Flopsy Publishers Pool...')
self.publishers.close()
except Exception as e:
self.logger.error('Exception caught while closing Flopsy Publishers Pool: %s' % str(e))
super(GarbageCollectorWorker, self).__del__()
@thread_safe
def _mq_callback(self, message):
""" method looks for stale or invalid units of work re-runs them if needed"""
try:
managed_entries = self.managed_dao.get_all()
self._update_managed_entries(managed_entries)
since = settings.settings['synergy_start_timeperiod']
uow_list = self.uow_dao.get_reprocessing_candidates(since)
for uow in uow_list:
if uow.process_name not in self.managed_entries:
self.logger.debug('Process %r is not known to the Synergy Scheduler. Skipping its unit_of_work.'
% uow.process_name)
continue
process_entry = self.managed_entries[uow.process_name]
assert isinstance(process_entry, ManagedProcessEntry)
if not process_entry.is_on:
self.logger.debug('Process %r is inactive at the Synergy Scheduler. Skipping its unit_of_work.'
% uow.process_name)
continue
self._process_single_document(uow)
except LookupError as e:
self.logger.info('Normal behaviour. %r' % e)
except Exception as e:
self.logger.error('_mq_callback: %s' % str(e), exc_info=True)
finally:
self.consumer.acknowledge(message.delivery_tag)
def _update_managed_entries(self, managed_entries):
self.managed_entries = {me.process_name: me for me in managed_entries}
def _process_single_document(self, uow):
""" actually inspects UOW retrieved from the database"""
repost = False
if uow.is_invalid:
repost = True
elif uow.is_in_progress or uow.is_requested:
last_activity = uow.started_at
if last_activity is None:
last_activity = uow.created_at
if datetime.utcnow() - last_activity > timedelta(hours=REPOST_AFTER_HOURS):
repost = True
if repost:
mq_request = SynergyMqTransmission(process_name=uow.process_name,
unit_of_work_id=uow.db_id)
if datetime.utcnow() - uow.created_at < timedelta(hours=LIFE_SUPPORT_HOURS):
uow.state = unit_of_work.STATE_REQUESTED
uow.number_of_retries += 1
self.uow_dao.update(uow)
publisher = self.publishers.get(uow.process_name)
publisher.publish(mq_request.document)
publisher.release()
self.logger.info('UOW marked for re-processing: process %s; timeperiod %s; id %s; attempt %d'
% (uow.process_name, uow.timeperiod, uow.db_id, uow.number_of_retries))
self.performance_ticker.tracker.increment_success()
else:
uow.state = unit_of_work.STATE_CANCELED
self.uow_dao.update(uow)
publisher = self.publishers.get(QUEUE_UOW_REPORT)
publisher.publish(mq_request.document)
publisher.release()
self.logger.info('UOW transferred to STATE_CANCELED: process %s; timeperiod %s; id %s; attempt %d'
% (uow.process_name, uow.timeperiod, uow.db_id, uow.number_of_retries))
if __name__ == '__main__':
from synergy.scheduler.scheduler_constants import PROCESS_GC
source = GarbageCollectorWorker(PROCESS_GC)
source.start()
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/workers/garbage_collector_worker.py",
"copies": "1",
"size": "5371",
"license": "bsd-3-clause",
"hash": 985293410231705600,
"line_mean": 42.314516129,
"line_max": 116,
"alpha_frac": 0.6263265686,
"autogenerated": false,
"ratio": 3.9609144542772863,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5087241022877286,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from threading import Lock
from subprocess import DEVNULL
import psutil
from psutil import TimeoutExpired
from launch import get_python, PROJECT_ROOT, PROCESS_STARTER
from synergy.conf import settings
from synergy.db.dao.box_configuration_dao import BoxConfigurationDao, QUERY_PROCESSES_FOR_BOX_ID
from synergy.supervisor.supervisor_constants import TRIGGER_INTERVAL
from synergy.supervisor.supervisor_configurator import get_box_id
from synergy.system.utils import remove_pid_file
from synergy.system.decorator import thread_safe
from synergy.system.repeat_timer import RepeatTimer
from synergy.system.synergy_process import SynergyProcess
class Supervisor(SynergyProcess):
def __init__(self, process_name):
super(Supervisor, self).__init__(process_name)
self.thread_handlers = dict()
self.lock = Lock()
self.box_id = get_box_id(self.logger)
self.bc_dao = BoxConfigurationDao(self.logger)
self.logger.info(f'Started {self.process_name} with configuration for BOX_ID={self.box_id}')
def __del__(self):
self.logger.info('Shutting down Supervisor...')
for handler in self.thread_handlers:
handler.cancel()
self.thread_handlers.clear()
super(Supervisor, self).__del__()
# **************** Supervisor Methods ************************
def _kill_process(self, box_config):
""" method is called to kill a running process """
try:
self.logger.info(f'kill: {box_config.process_name} {{')
self.logger.info(f'target process pid={box_config.pid}')
if box_config.pid and psutil.pid_exists(box_config.pid):
p = psutil.Process(box_config.pid)
p.kill()
p.wait()
box_config.pid = None
self.bc_dao.update(box_config)
remove_pid_file(box_config.process_name)
except Exception:
self.logger.error(f'Exception on killing: {box_config.process_name}', exc_info=True)
finally:
self.logger.info('}')
def _start_process(self, box_config):
if not self.bc_dao.ds.is_alive():
# ping DB to make sure it is alive.
# otherwise, processes will be spawned uncontrollably
raise UserWarning(f'DB Down Exception: unable to reach db {self.bc_dao.ds}')
try:
self.logger.info(f'start: {box_config.process_name} {{')
p = psutil.Popen([get_python(), PROJECT_ROOT + '/' + PROCESS_STARTER, box_config.process_name],
close_fds=True,
cwd=settings.settings['process_cwd'],
stdin=DEVNULL,
stdout=DEVNULL,
stderr=DEVNULL)
box_config.pid = p.pid
self.logger.info(f'Started {box_config.process_name} with pid = {p.pid}')
except Exception:
box_config.set_process_pid(box_config.process_name, None)
self.logger.error(f'Exception on starting: {box_config.process_name}', exc_info=True)
finally:
self.bc_dao.update(box_config)
self.logger.info('}')
def _poll_process(self, box_config):
""" between killing a process and its actual termination lies poorly documented requirement -
<purging process' io pipes and reading exit status>.
this can be done either by os.wait() or process.wait() """
try:
p = psutil.Process(box_config.pid)
return_code = p.wait(timeout=0.01)
if return_code is None:
# process is already terminated
self.logger.info(f'Process {box_config.process_name} is terminated')
return
else:
# process is terminated; possibly by OS
box_config.pid = None
self.bc_dao.update(box_config)
self.logger.info(f'Process {box_config.process_name} got terminated. Cleaning up')
except TimeoutExpired:
# process is alive and OK
pass
except Exception:
self.logger.error(f'Exception on polling: {box_config.process_name}', exc_info=True)
def start(self, *_):
""" reading box configurations and starting timers to start/monitor/kill processes """
try:
box_configurations = self.bc_dao.run_query(QUERY_PROCESSES_FOR_BOX_ID(self.box_id))
for box_config in box_configurations:
handler = RepeatTimer(TRIGGER_INTERVAL, self.manage_process, args=[box_config.process_name])
self.thread_handlers[box_config.process_name] = handler
handler.start()
self.logger.info(f'Started Supervisor Thread for {box_config.process_name}, '
f'triggering every {TRIGGER_INTERVAL} seconds')
except LookupError as e:
self.logger.error(f'Supervisor failed to start because of: {e}')
@thread_safe
def manage_process(self, *args):
""" reads box configuration and start/kill processes. performs process monitoring """
process_name = args[0]
try:
box_config = self.bc_dao.get_one([self.box_id, process_name])
if not box_config.is_on:
if box_config.pid is not None:
self._kill_process(box_config)
return
if not box_config.pid or not psutil.pid_exists(box_config.pid):
self._start_process(box_config)
elif box_config.pid and psutil.pid_exists(box_config.pid):
self._poll_process(box_config)
except Exception as e:
self.logger.error(f'Exception: {e}', exc_info=True)
if __name__ == '__main__':
from synergy.supervisor.supervisor_constants import PROCESS_SUPERVISOR
source = Supervisor(PROCESS_SUPERVISOR)
source.start()
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/supervisor/synergy_supervisor.py",
"copies": "1",
"size": "6001",
"license": "bsd-3-clause",
"hash": -5375747984924264000,
"line_mean": 42.802919708,
"line_max": 108,
"alpha_frac": 0.6033994334,
"autogenerated": false,
"ratio": 4.003335557038025,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5106734990438025,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from threading import Lock
from synergy.scheduler.scheduler_constants import QUEUE_UOW_STATUS, QUEUE_JOB_STATUS
from synergy.db.model.mq_transmission import MqTransmission
from synergy.mq.flopsy import PublishersPool
from synergy.system.decorator import thread_safe
class MqTransmitter(object):
""" a class hosting several Message Queue helper methods to send MqTransmission """
def __init__(self, logger):
self.logger = logger
self.lock = Lock()
self.publishers = PublishersPool(self.logger)
def __del__(self):
try:
self.logger.info('Closing Flopsy Publishers Pool...')
self.publishers.close()
except Exception as e:
self.logger.error(f'Exception caught while closing Flopsy Publishers Pool: {e}')
@thread_safe
def publish_managed_uow(self, uow):
mq_request = MqTransmission(process_name=uow.process_name, record_db_id=uow.db_id)
publisher = self.publishers.get(uow.process_name)
publisher.publish(mq_request.document)
publisher.release()
@thread_safe
def publish_freerun_uow(self, freerun_entry, uow):
mq_request = MqTransmission(process_name=freerun_entry.process_name,
entry_name=freerun_entry.entry_name,
record_db_id=uow.db_id)
publisher = self.publishers.get(freerun_entry.process_name)
publisher.publish(mq_request.document)
publisher.release()
@thread_safe
def publish_job_status(self, job_record, finished_only=True):
if finished_only and not job_record.is_finished:
return
mq_request = MqTransmission(process_name=job_record.process_name, record_db_id=job_record.db_id)
publisher = self.publishers.get(QUEUE_JOB_STATUS)
publisher.publish(mq_request.document)
publisher.release()
@thread_safe
def publish_uow_status(self, uow):
mq_request = MqTransmission(process_name=uow.process_name, record_db_id=uow.db_id)
publisher = self.publishers.get(QUEUE_UOW_STATUS)
publisher.publish(mq_request.document)
publisher.release()
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/system/mq_transmitter.py",
"copies": "1",
"size": "2217",
"license": "bsd-3-clause",
"hash": 747494494088869600,
"line_mean": 35.95,
"line_max": 104,
"alpha_frac": 0.6666666667,
"autogenerated": false,
"ratio": 3.6523887973640856,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48190554640640854,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from threading import RLock
from bson import ObjectId
from synergy.db.manager import ds_manager
from synergy.db.model import job
from synergy.db.model.job import Job
from synergy.system.decorator import thread_safe
from synergy.system.time_qualifier import *
from synergy.scheduler.scheduler_constants import COLLECTION_JOB_HOURLY, COLLECTION_JOB_DAILY, \
COLLECTION_JOB_MONTHLY, COLLECTION_JOB_YEARLY
from synergy.conf import context
QUERY_GET_LIKE_TIMEPERIOD = lambda timeperiod, unprocessed_only: {
job.TIMEPERIOD: {'$gte': timeperiod},
job.STATE: {'$ne': job.STATE_PROCESSED if unprocessed_only else None}
}
class JobDao(object):
""" Thread-safe Data Access Object from job_XXX collection
above, XXX could stand for hourly, daily, monthly, yearly """
def __init__(self, logger):
super(JobDao, self).__init__()
self.logger = logger
self.lock = RLock()
self.ds = ds_manager.ds_factory(logger)
@thread_safe
def _get_job_collection(self, process_name):
"""jobs are stored in 4 collections: hourly, daily, monthly and yearly;
method looks for the proper job_collection base on process TIME_QUALIFIER"""
qualifier = context.process_context[process_name].time_qualifier
if qualifier == QUALIFIER_HOURLY:
collection = self.ds.connection(COLLECTION_JOB_HOURLY)
elif qualifier == QUALIFIER_DAILY:
collection = self.ds.connection(COLLECTION_JOB_DAILY)
elif qualifier == QUALIFIER_MONTHLY:
collection = self.ds.connection(COLLECTION_JOB_MONTHLY)
elif qualifier == QUALIFIER_YEARLY:
collection = self.ds.connection(COLLECTION_JOB_YEARLY)
else:
raise ValueError('Unknown time qualifier: %s for %s' % (qualifier, process_name))
return collection
@thread_safe
def get_one(self, key, timeperiod):
""" method finds job record and returns it to the caller"""
collection = self._get_job_collection(key)
document = collection.find_one({job.PROCESS_NAME: key, job.TIMEPERIOD: timeperiod})
if document is None:
raise LookupError('MongoDB has no job record in %s collection for (%s, %s)' % (collection, key, timeperiod))
return Job.from_json(document)
@thread_safe
def get_all(self, collection_name, since=None):
""" method returns all job records from a particular collection that are older than <since> """
if since is None:
query = {}
else:
query = {job.TIMEPERIOD: {'$gte': since}}
collection = self.ds.connection(collection_name)
cursor = collection.find(query)
if cursor.count() == 0:
raise LookupError('MongoDB has no job records in %s collection since %r' % (collection_name, since))
return [Job.from_json(document) for document in cursor]
@thread_safe
def run_query(self, collection_name, query):
""" method runs query on a specified collection and return a list of filtered Job records """
cursor = self.ds.filter(collection_name, query)
return [Job.from_json(document) for document in cursor]
@thread_safe
def update(self, instance):
assert isinstance(instance, Job)
collection = self._get_job_collection(instance.process_name)
document = instance.document
if instance.db_id:
document['_id'] = ObjectId(instance.db_id)
instance.db_id = collection.save(document, safe=True)
return instance.db_id
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/db/dao/job_dao.py",
"copies": "1",
"size": "3589",
"license": "bsd-3-clause",
"hash": -4499023065256860700,
"line_mean": 40.2528735632,
"line_max": 120,
"alpha_frac": 0.6675954305,
"autogenerated": false,
"ratio": 3.8968512486427795,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010924200801492162,
"num_lines": 87
} |
__author__ = 'Bohdan Mushkevych'
from threading import RLock
from bson import ObjectId
from db.model.site_statistics import SiteStatistics, DOMAIN_NAME, TIMEPERIOD
from synergy.db.manager import ds_manager
from synergy.system.decorator import thread_safe
class SiteDao(object):
""" Thread-safe Data Access Object for site_XXX table/collection """
def __init__(self, logger):
super(SiteDao, self).__init__()
self.logger = logger
self.lock = RLock()
self.ds = ds_manager.ds_factory(logger)
@thread_safe
def get_one(self, collection_name, domain_name, timeperiod):
collection = self.ds.connection(collection_name)
document = collection.find_one(filter={DOMAIN_NAME: domain_name, TIMEPERIOD: timeperiod})
if document is None:
raise LookupError(f'MongoDB has no site record in {collection_name} for ({domain_name}, {timeperiod})')
return SiteStatistics.from_json(document)
@thread_safe
def update(self, collection_name, instance):
""" method finds Site Statistics record and update it DB representation """
assert isinstance(instance, SiteStatistics)
if instance.db_id:
query = {'_id': ObjectId(instance.db_id)}
else:
query = {DOMAIN_NAME: instance.domain_name,
TIMEPERIOD: instance.timeperiod}
self.ds.update(collection_name, query, instance)
return instance.db_id
@thread_safe
def insert(self, collection_name, instance):
""" inserts a unit of work into MongoDB. """
assert isinstance(instance, SiteStatistics)
collection = self.ds.connection(collection_name)
return collection.insert_one(instance.document).inserted_id
@thread_safe
def remove(self, collection_name, domain_name, timeperiod):
query = {DOMAIN_NAME: domain_name, TIMEPERIOD: timeperiod}
collection = self.ds.connection(collection_name)
collection.delete_one(query)
| {
"repo_name": "mushkevych/scheduler",
"path": "db/dao/site_dao.py",
"copies": "1",
"size": "2001",
"license": "bsd-3-clause",
"hash": -7587061619936760000,
"line_mean": 38.2352941176,
"line_max": 115,
"alpha_frac": 0.6726636682,
"autogenerated": false,
"ratio": 4.075356415478615,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006025398665213211,
"num_lines": 51
} |
__author__ = 'Bohdan Mushkevych'
from threading import RLock
from bson import ObjectId
from odm.document import BaseDocument
from synergy.db.manager import ds_manager
from synergy.system.decorator import thread_safe
def build_db_query(fields_names, field_values):
""" method builds query dictionary by zipping together DB field names with the field values """
if not isinstance(fields_names, (list, tuple)):
fields_names = [fields_names]
if not isinstance(field_values, (list, tuple)):
field_values = [field_values]
if len(fields_names) != len(field_values):
raise ValueError(f'Error: unable to build a primary key query due '
f'to mismatch in number of fields {len(fields_names)} vs {len(field_values)}')
query = dict()
for k, v in zip(fields_names, field_values):
query[k] = v
return query
class BaseDao(object):
""" Thread-safe base Data Access Object """
def __init__(self, logger, collection_name:str, model_class, primary_key=None):
super(BaseDao, self).__init__()
self.logger = logger
self.collection_name = collection_name
self.model_klass = model_class
if not primary_key:
self.primary_key = self.model_klass.key_fields()
self.lock = RLock()
self.ds = ds_manager.ds_factory(logger)
@thread_safe
def get_one(self, key):
""" method finds single record base on the given primary key and returns it to the caller"""
query = build_db_query(self.primary_key, key)
collection = self.ds.connection(self.collection_name)
document = collection.find_one(query)
if document is None:
raise LookupError(f'{self.model_klass.__name__} with key {query} was not found')
return self.model_klass.from_json(document)
@thread_safe
def run_query(self, query):
""" method runs query on a specified collection and return a list of filtered Model records """
collection = self.ds.connection(self.collection_name)
cursor = collection.find(query)
if cursor.count() == 0:
raise LookupError(f'Collection {self.collection_name} has no {self.model_klass.__name__} records')
return [self.model_klass.from_json(entry) for entry in cursor]
@thread_safe
def get_all(self):
return self.run_query({})
@thread_safe
def update(self, instance):
""" this is an upsert method: replaces or creates the DB representation of the model instance """
assert isinstance(instance, self.model_klass)
if instance.db_id:
query = {'_id': ObjectId(instance.db_id)}
else:
query = build_db_query(self.primary_key, instance.key)
self.ds.update(self.collection_name, query, instance)
return instance.db_id
@thread_safe
def remove(self, key):
query = build_db_query(self.primary_key, key)
collection = self.ds.connection(self.collection_name)
collection.delete_one(query)
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/db/dao/base_dao.py",
"copies": "1",
"size": "3055",
"license": "bsd-3-clause",
"hash": -6190684902809434000,
"line_mean": 35.8072289157,
"line_max": 110,
"alpha_frac": 0.6477905074,
"autogenerated": false,
"ratio": 3.8917197452229297,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.503951025262293,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from threading import RLock
from bson import ObjectId
from synergy.conf import context
from synergy.db.manager import ds_manager
from synergy.db.model import job
from synergy.db.model.job import Job
from synergy.scheduler.scheduler_constants import COLLECTION_JOB_HOURLY, COLLECTION_JOB_DAILY, \
COLLECTION_JOB_MONTHLY, COLLECTION_JOB_YEARLY
from synergy.system.decorator import thread_safe
from synergy.system.time_qualifier import *
QUERY_GET_LIKE_TIMEPERIOD = lambda timeperiod, include_running, include_processed, include_noop, include_failed: {
job.TIMEPERIOD: {'$gte': timeperiod},
job.STATE: {'$in': [job.STATE_PROCESSED if include_processed else None,
job.STATE_IN_PROGRESS if include_running else None,
job.STATE_EMBRYO if include_running else None,
job.STATE_FINAL_RUN if include_running else None,
job.STATE_SKIPPED if include_failed else None,
job.STATE_NOOP if include_noop else None]}
}
class JobDao(object):
""" Thread-safe Data Access Object from job_XXX collection
above, XXX could stand for hourly, daily, monthly, yearly """
def __init__(self, logger):
super(JobDao, self).__init__()
self.logger = logger
self.lock = RLock()
self.ds = ds_manager.ds_factory(logger)
@thread_safe
def _get_job_collection_name(self, process_name):
"""jobs are stored in 4 collections: hourly, daily, monthly and yearly;
method looks for the proper job_collection base on process TIME_QUALIFIER"""
qualifier = context.process_context[process_name].time_qualifier
if qualifier == QUALIFIER_HOURLY:
collection_name = COLLECTION_JOB_HOURLY
elif qualifier == QUALIFIER_DAILY:
collection_name = COLLECTION_JOB_DAILY
elif qualifier == QUALIFIER_MONTHLY:
collection_name = COLLECTION_JOB_MONTHLY
elif qualifier == QUALIFIER_YEARLY:
collection_name = COLLECTION_JOB_YEARLY
else:
raise ValueError(f'Unknown time qualifier: {qualifier} for {process_name}')
return collection_name
@thread_safe
def get_by_id(self, process_name, db_id):
""" method finds a single job record and returns it to the caller"""
collection_name = self._get_job_collection_name(process_name)
collection = self.ds.connection(collection_name)
document = collection.find_one({'_id': ObjectId(db_id)})
if document is None:
raise LookupError(f'MongoDB has no job record in collection {collection} for {db_id}')
return Job.from_json(document)
@thread_safe
def get_one(self, process_name, timeperiod):
""" method finds a single job record and returns it to the caller"""
collection_name = self._get_job_collection_name(process_name)
collection = self.ds.connection(collection_name)
document = collection.find_one({job.PROCESS_NAME: process_name, job.TIMEPERIOD: timeperiod})
if document is None:
raise LookupError(f'MongoDB has no job record in collection {collection} for {process_name}@{timeperiod}')
return Job.from_json(document)
@thread_safe
def get_all(self, collection_name, since=None):
""" method returns all job records from a particular collection that are older than <since> """
if since is None:
query = {}
else:
query = {job.TIMEPERIOD: {'$gte': since}}
collection = self.ds.connection(collection_name)
cursor = collection.find(query)
if cursor.count() == 0:
raise LookupError(f'MongoDB has no job records in collection {collection_name} since {since}')
return [Job.from_json(document) for document in cursor]
@thread_safe
def run_query(self, collection_name, query):
""" method runs query on a specified collection and return a list of filtered Job records """
cursor = self.ds.filter(collection_name, query)
return [Job.from_json(document) for document in cursor]
@thread_safe
def update(self, instance):
assert isinstance(instance, Job)
collection_name = self._get_job_collection_name(instance.process_name)
if instance.db_id:
query = {'_id': ObjectId(instance.db_id)}
else:
query = {job.PROCESS_NAME: instance.process_name,
job.TIMEPERIOD: instance.timeperiod}
self.ds.update(collection_name, query, instance)
return instance.db_id
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/db/dao/job_dao.py",
"copies": "1",
"size": "4640",
"license": "bsd-3-clause",
"hash": -7649536650191364000,
"line_mean": 42.3644859813,
"line_max": 118,
"alpha_frac": 0.6564655172,
"autogenerated": false,
"ratio": 3.965811965811966,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5122277483011966,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from threading import RLock
from bson.objectid import ObjectId
from pymongo import ASCENDING
from pymongo.errors import DuplicateKeyError as MongoDuplicateKeyError
from synergy.conf import context
from synergy.db.error import DuplicateKeyError
from synergy.db.manager import ds_manager
from synergy.db.model import unit_of_work
from synergy.db.model.unit_of_work import UnitOfWork
from synergy.scheduler.scheduler_constants import COLLECTION_UNIT_OF_WORK
from synergy.system import time_helper
from synergy.system.decorator import thread_safe
from synergy.system.time_qualifier import *
QUERY_GET_FREERUN_SINCE = lambda timeperiod, include_running, include_processed, include_noop, include_failed: {
unit_of_work.TIMEPERIOD: {'$gte': timeperiod},
unit_of_work.UNIT_OF_WORK_TYPE: unit_of_work.TYPE_FREERUN,
unit_of_work.STATE: {'$in': [unit_of_work.STATE_PROCESSED if include_processed else None,
unit_of_work.STATE_IN_PROGRESS if include_running else None,
unit_of_work.STATE_REQUESTED if include_running else None,
unit_of_work.STATE_CANCELED if include_failed else None,
unit_of_work.STATE_INVALID if include_failed else None,
unit_of_work.STATE_NOOP if include_noop else None]}
}
QUERY_GET_FLOW_FREERUNS = lambda schedulable_stem, timeperiod: {
unit_of_work.TIMEPERIOD: timeperiod,
unit_of_work.UNIT_OF_WORK_TYPE: unit_of_work.TYPE_FREERUN,
unit_of_work.PROCESS_NAME: {'$regex': schedulable_stem + '*', '$options': 'i'}
}
class UnitOfWorkDao(object):
""" Thread-safe Data Access Object from units_of_work table/collection """
def __init__(self, logger):
super(UnitOfWorkDao, self).__init__()
self.logger = logger
self.lock = RLock()
self.ds = ds_manager.ds_factory(logger)
@thread_safe
def get_one(self, key):
""" method finds unit_of_work record and returns it to the caller"""
query = {'_id': ObjectId(key)}
collection = self.ds.connection(COLLECTION_UNIT_OF_WORK)
document = collection.find_one(query)
if document is None:
msg = f'UOW with ID={key} was not found'
self.logger.warning(msg)
raise LookupError(msg)
return UnitOfWork.from_json(document)
@thread_safe
def get_reprocessing_candidates(self, since=None):
""" method queries Unit Of Work whose <start_timeperiod> is younger than <since>
and who could be candidates for re-processing """
collection = self.ds.connection(COLLECTION_UNIT_OF_WORK)
query = {unit_of_work.STATE: {'$in': [unit_of_work.STATE_IN_PROGRESS,
unit_of_work.STATE_INVALID,
unit_of_work.STATE_REQUESTED]},
unit_of_work.UNIT_OF_WORK_TYPE: unit_of_work.TYPE_MANAGED}
if since is None:
cursor = collection.find(query).sort('_id', ASCENDING)
candidates = [UnitOfWork.from_json(document) for document in cursor]
else:
candidates = []
yearly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_YEARLY, since)
query[unit_of_work.START_TIMEPERIOD] = {'$gte': yearly_timeperiod}
cursor = collection.find(query).sort('_id', ASCENDING)
for document in cursor:
uow = UnitOfWork.from_json(document)
if uow.process_name not in context.process_context:
# this is a decommissioned process
continue
time_qualifier = context.process_context[uow.process_name].time_qualifier
if time_qualifier == QUALIFIER_REAL_TIME:
time_qualifier = QUALIFIER_HOURLY
process_specific_since = time_helper.cast_to_time_qualifier(time_qualifier, since)
if process_specific_since <= uow.start_timeperiod:
candidates.append(uow)
if len(candidates) == 0:
raise LookupError('MongoDB has no UOW reprocessing candidates')
return candidates
@thread_safe
def get_by_params(self, process_name, timeperiod, start_id, end_id):
""" method finds unit_of_work record and returns it to the caller"""
query = {unit_of_work.PROCESS_NAME: process_name,
unit_of_work.TIMEPERIOD: timeperiod,
unit_of_work.START_ID: start_id,
unit_of_work.END_ID: end_id}
collection = self.ds.connection(COLLECTION_UNIT_OF_WORK)
document = collection.find_one(query)
if document is None:
raise LookupError(f'UOW satisfying query {query} was not found')
return UnitOfWork.from_json(document)
@thread_safe
def update(self, instance):
""" method finds unit_of_work record and change its status"""
assert isinstance(instance, UnitOfWork)
if instance.db_id:
query = {'_id': ObjectId(instance.db_id)}
else:
query = {unit_of_work.PROCESS_NAME: instance.process_name,
unit_of_work.TIMEPERIOD: instance.timeperiod,
unit_of_work.START_ID: instance.start_id,
unit_of_work.END_ID: instance.end_id}
self.ds.update(COLLECTION_UNIT_OF_WORK, query, instance)
return instance.db_id
@thread_safe
def insert(self, instance):
""" inserts a unit of work into MongoDB.
:raises DuplicateKeyError: if such record already exist """
assert isinstance(instance, UnitOfWork)
collection = self.ds.connection(COLLECTION_UNIT_OF_WORK)
try:
return collection.insert_one(instance.document).inserted_id
except MongoDuplicateKeyError as e:
exc = DuplicateKeyError(instance.process_name,
instance.timeperiod,
instance.start_id,
instance.end_id,
e)
raise exc
@thread_safe
def remove(self, uow_id):
collection = self.ds.connection(COLLECTION_UNIT_OF_WORK)
collection.delete_one({'_id': ObjectId(uow_id)})
@thread_safe
def run_query(self, query):
""" method runs the query and returns a list of filtered UnitOfWork records """
cursor = self.ds.filter(COLLECTION_UNIT_OF_WORK, query)
return [UnitOfWork.from_json(document) for document in cursor]
def recover_from_duplicatekeyerror(self, e):
""" method tries to recover from DuplicateKeyError """
if isinstance(e, DuplicateKeyError):
try:
return self.get_by_params(e.process_name, e.timeperiod, e.start_id, e.end_id)
except LookupError as e:
self.logger.error(f'Unable to recover from DuplicateKeyError error due to {e}', exc_info=True)
else:
msg = 'Unable to recover from DuplicateKeyError due to unspecified UOW primary key'
self.logger.error(msg)
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/db/dao/unit_of_work_dao.py",
"copies": "1",
"size": "7215",
"license": "bsd-3-clause",
"hash": 4244589389484093000,
"line_mean": 43.8136645963,
"line_max": 112,
"alpha_frac": 0.6185724186,
"autogenerated": false,
"ratio": 4.001663893510815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012949117181450555,
"num_lines": 161
} |
__author__ = 'Bohdan Mushkevych'
from threading import Thread
from werkzeug.wrappers import Request
from werkzeug.wsgi import ClosingIterator
from werkzeug.middleware.shared_data import SharedDataMiddleware
from werkzeug.exceptions import HTTPException, NotFound
from werkzeug.serving import run_simple
from synergy.conf import settings
from synergy.system.system_logger import get_logger
from synergy.scheduler.scheduler_constants import PROCESS_MX
from synergy.mx.utils import STATIC_PATH, local, local_manager, url_map, jinja_env
from synergy.mx import views
from flow.mx import views as flow_views
from flow.mx import STATIC_FLOW_ENDPOINT, STATIC_FLOW_PATH
import socket
socket.setdefaulttimeout(10.0) # set default socket timeout at 10 seconds
class MX(object):
""" MX stands for Management Extension and represents HTTP server serving UI front-end for Synergy Scheduler """
def __init__(self, mbean):
local.application = self
self.mx_thread = None
self.mbean = mbean
jinja_env.globals['mbean'] = mbean
self.dispatch = SharedDataMiddleware(self.dispatch, {
f'/scheduler/static': STATIC_PATH,
f'/{STATIC_FLOW_ENDPOINT}': STATIC_FLOW_PATH,
})
# during the get_logger call a 'werkzeug' logger will be created
# later, werkzeug._internal.py -> _log() will assign the logger to global _logger variable
self.logger = get_logger(PROCESS_MX)
def dispatch(self, environ, start_response):
local.application = self
request = Request(environ)
local.url_adapter = adapter = url_map.bind_to_environ(environ)
local.request = request
try:
endpoint, values = adapter.match()
# first - try to read from synergy.mx.views
handler = getattr(views, endpoint, None)
if not handler:
# otherwise - read from flow.mx.views
handler = getattr(flow_views, endpoint)
response = handler(request, **values)
except NotFound:
response = views.not_found(request)
response.status_code = 404
except HTTPException as e:
response = e
return ClosingIterator(response(environ, start_response),
[local_manager.cleanup])
def __call__(self, environ, start_response):
return self.dispatch(environ, start_response)
def start(self, hostname=None, port=None):
""" Spawns a new HTTP server, residing on defined hostname and port
:param hostname: the default hostname the server should listen on.
:param port: the default port of the server.
"""
if hostname is None:
hostname = settings.settings['mx_host']
if port is None:
port = settings.settings['mx_port']
reloader = False # use_reloader: the default setting for the reloader.
debugger = False #
evalex = True # should the exception evaluation feature be enabled?
threaded = False # True if each request is handled in a separate thread
processes = 1 # if greater than 1 then handle each request in a new process
reloader_interval = 1 # the interval for the reloader in seconds.
static_files = None # static_files: optional dict of static files.
extra_files = None # extra_files: optional list of extra files to track for reloading.
ssl_context = None # ssl_context: optional SSL context for running server in HTTPS mode.
self.mx_thread = Thread(target=run_simple(hostname=hostname,
port=port,
application=self,
use_debugger=debugger,
use_evalex=evalex,
extra_files=extra_files,
use_reloader=reloader,
reloader_interval=reloader_interval,
threaded=threaded,
processes=processes,
static_files=static_files,
ssl_context=ssl_context))
self.mx_thread.daemon = True
self.mx_thread.start()
def stop(self):
""" method stops currently running HTTP server, if any
:see: `werkzeug.serving.make_environ`
http://flask.pocoo.org/snippets/67/ """
func = jinja_env.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('MX Error: no Shutdown Function registered for the Werkzeug Server')
func()
if __name__ == '__main__':
from synergy.scheduler.scheduler_constants import PROCESS_SCHEDULER
from synergy.scheduler.synergy_scheduler import Scheduler
scheduler = Scheduler(PROCESS_SCHEDULER)
app = MX(scheduler)
app.start()
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/mx/synergy_mx.py",
"copies": "1",
"size": "5162",
"license": "bsd-3-clause",
"hash": 8293099650437550000,
"line_mean": 43.1196581197,
"line_max": 116,
"alpha_frac": 0.5935683843,
"autogenerated": false,
"ratio": 4.727106227106227,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010032713723135528,
"num_lines": 117
} |
__author__ = 'Bohdan Mushkevych'
from threading import Thread
from werkzeug.wrappers import Request
from werkzeug.wsgi import ClosingIterator, SharedDataMiddleware
from werkzeug.exceptions import HTTPException, NotFound
from werkzeug.serving import run_simple
from synergy.conf import settings
from synergy.mx.utils import STATIC_PATH, local, local_manager, url_map, jinja_env
from synergy.mx import views
class MX(object):
def __init__(self, mbean):
local.application = self
self.mx_thread = None
self.mbean = mbean
jinja_env.globals['mbean'] = mbean
self.dispatch = SharedDataMiddleware(self.dispatch, {
'/static': STATIC_PATH
})
def dispatch(self, environ, start_response):
local.application = self
request = Request(environ)
local.url_adapter = adapter = url_map.bind_to_environ(environ)
try:
endpoint, values = adapter.match()
handler = getattr(views, endpoint)
response = handler(request, **values)
except NotFound:
response = views.not_found(request)
response.status_code = 404
except HTTPException as e:
response = e
return ClosingIterator(response(environ, start_response),
[local_manager.cleanup])
def __call__(self, environ, start_response):
return self.dispatch(environ, start_response)
def start_mx_thread(self, hostname=None, port=None):
""" Spawns a new HTTP server, residing on defined hostname and port
:param hostname: the default hostname the server should listen on.
:param port: the default port of the server.
"""
if hostname is None:
hostname = settings.settings['mx_host']
if port is None:
port = settings.settings['mx_port']
reloader = False # use_reloader: the default setting for the reloader.
debugger = False #
evalex = True # use_evalex: the default setting for the evalex flag of the debugger.
threaded = False # threaded: the default threading setting.
processes = 1 # processes: the default number of processes to start.
reloader_interval = 1
static_files = None # static_files: optional dict of static files.
extra_files = None # extra_files: optional list of extra files to track for reloading.
ssl_context = None # ssl_context: optional SSL context for running server in HTTPS mode.
self.mx_thread = Thread(target=run_simple(hostname=hostname,
port=port,
application=self,
use_debugger=debugger,
use_evalex=evalex,
extra_files=extra_files,
use_reloader=reloader,
reloader_interval=reloader_interval,
threaded=threaded,
processes=processes,
static_files=static_files,
ssl_context=ssl_context))
self.mx_thread.daemon = True
self.mx_thread.start()
if __name__ == '__main__':
from synergy.scheduler.scheduler_constants import PROCESS_SCHEDULER
from synergy.scheduler.synergy_scheduler import Scheduler
source = Scheduler(PROCESS_SCHEDULER)
app = MX(source)
app.start_mx_thread() | {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/mx/synergy_mx.py",
"copies": "1",
"size": "3761",
"license": "bsd-3-clause",
"hash": 8723149822295527000,
"line_mean": 41.75,
"line_max": 102,
"alpha_frac": 0.5618186652,
"autogenerated": false,
"ratio": 4.8717616580310885,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013201961923597738,
"num_lines": 88
} |
__author__ = 'Bohdan Mushkevych'
from threading import Thread
import socket
from amqp import AMQPError
from synergy.conf import settings
from synergy.mq.flopsy import Consumer
from synergy.system.performance_tracker import SimpleTracker
from synergy.system.synergy_process import SynergyProcess
class AbstractMqWorker(SynergyProcess):
"""
class works as an abstract basement for all workers and aggregators
it registers in the mq and awaits for the messages
"""
def __init__(self, process_name):
""":param process_name: id of the process, the worker will be performing """
super(AbstractMqWorker, self).__init__(process_name)
self.mq_timeout_seconds = 0
self._init_mq_timeout_seconds()
self.consumer = None
self._init_mq_consumer()
self.main_thread = None
self.performance_ticker = None
self._init_performance_ticker(self.logger)
msg_suffix = 'in Production Mode'
if settings.settings['under_test']:
msg_suffix = 'in Testing Mode'
self.logger.info('Started %s %s' % (self.process_name, msg_suffix))
def __del__(self):
try:
self.logger.info('Closing Flopsy Consumer...')
self.consumer.close()
except Exception as e:
self.logger.error('Exception caught while closing Flopsy Consumer: %s' % str(e))
try:
self.logger.info('Canceling Performance Tracker...')
self.performance_ticker.cancel()
except Exception as e:
self.logger.error('Exception caught while cancelling the performance_ticker: %s' % str(e))
super(AbstractMqWorker, self).__del__()
# ********************** abstract methods ****************************
def _init_performance_ticker(self, logger):
self.performance_ticker = SimpleTracker(logger)
self.performance_ticker.start()
def _init_mq_consumer(self):
self.consumer = Consumer(self.process_name)
def _init_mq_timeout_seconds(self):
if 'mq_timeout_sec' in settings.settings:
self.mq_timeout_seconds = settings.settings['mq_timeout_sec']
# ********************** thread-related methods ****************************
def _mq_callback(self, message):
""" abstract method to process messages from MQ
:param message: mq message"""
pass
def _run_mq_listener(self):
try:
self.consumer.register(self._mq_callback)
self.consumer.wait(self.mq_timeout_seconds)
except socket.timeout as e:
self.logger.warn('Queue %s is likely empty. Worker exits due to: %s' % (self.consumer.queue, str(e)))
except (AMQPError, IOError) as e:
self.logger.error('AMQPError: %s' % str(e))
finally:
self.__del__()
self.logger.info('Exiting main thread. All auxiliary threads stopped.')
def start(self, *_):
self.main_thread = Thread(target=self._run_mq_listener)
self.main_thread.start()
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/workers/abstract_mq_worker.py",
"copies": "1",
"size": "3048",
"license": "bsd-3-clause",
"hash": -189063442447169340,
"line_mean": 35.2857142857,
"line_max": 113,
"alpha_frac": 0.6158136483,
"autogenerated": false,
"ratio": 4.021108179419525,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007767678888112215,
"num_lines": 84
} |
__author__ = 'Bohdan Mushkevych'
from threading import Thread
from amqp import AMQPError
from synergy.db.model.mq_transmission import MqTransmission
from synergy.db.dao.job_dao import JobDao
from synergy.scheduler.scheduler_constants import QUEUE_JOB_STATUS
from synergy.scheduler.thread_handler import ManagedThreadHandler
from synergy.mq.flopsy import Consumer
class JobStatusListener(object):
""" class instance listens to the QUEUE_JOB_STATUS queue and triggers ManagedThreadHandlers if applicable """
def __init__(self, scheduler):
self.scheduler = scheduler
self.timetable = scheduler.timetable
self.logger = scheduler.logger
self.job_dao = JobDao(self.logger)
self.consumer = Consumer(QUEUE_JOB_STATUS)
self.main_thread = None
def __del__(self):
try:
self.logger.info('JobStatusListener: Closing Flopsy Consumer...')
self.consumer.close()
except Exception as e:
self.logger.error(f'JobStatusListener: Exception caught while closing Flopsy Consumer: {e}')
# ********************** thread-related methods ****************************
def _mq_callback(self, message):
""" method receives a message from Synergy Scheduler notifying of Job completion,
builds up a list of dependant TreeNodes/Jobs and triggers their ManagedThreadHandlers, if applicable
:param message: <MqTransmission> mq message """
try:
self.logger.info('JobStatusListener {')
mq_request = MqTransmission.from_json(message.body)
job_record = self.job_dao.get_by_id(mq_request.process_name, mq_request.record_db_id)
# step 1: identify dependant tree nodes
tree_obj = self.timetable.get_tree(job_record.process_name)
tree_node = tree_obj.get_node(job_record.process_name, job_record.timeperiod)
dependant_nodes = self.timetable._find_dependant_tree_nodes(tree_node)
# step 2: form list of handlers to trigger
handlers_to_trigger = set()
for node in dependant_nodes:
state_machine = self.scheduler.state_machine_for(node.process_name)
if state_machine.run_on_active_timeperiod:
# ignore dependant processes whose state machine can run on an active timeperiod
# to avoid "over-triggering" them
continue
handlers_to_trigger.add(self.scheduler.managed_handlers[node.process_name])
# step 3: iterate the list of handlers and trigger them
for handler in handlers_to_trigger:
assert isinstance(handler, ManagedThreadHandler)
handler.trigger()
except KeyError:
self.logger.error(f'Access error for {message.body}', exc_info=True)
except Exception:
self.logger.error(f'Error during ManagedThreadHandler.trigger call {message.body}', exc_info=True)
finally:
self.consumer.acknowledge(message.delivery_tag)
self.logger.info('JobStatusListener }')
def _run_mq_listener(self):
try:
self.consumer.register(self._mq_callback)
self.logger.info('JobStatusListener: instantiated and activated.')
self.consumer.wait()
except (AMQPError, IOError) as e:
self.logger.error(f'JobStatusListener: AMQPError {e}')
finally:
self.__del__()
self.logger.info('JobStatusListener: Shut down.')
def start(self, *_):
self.main_thread = Thread(target=self._run_mq_listener)
self.main_thread.daemon = True
self.main_thread.start()
def stop(self):
""" method stops currently MQ Consumer listener, if any """
self.__del__()
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/scheduler/job_status_listener.py",
"copies": "1",
"size": "3835",
"license": "bsd-3-clause",
"hash": 4541168738803282000,
"line_mean": 42.0898876404,
"line_max": 113,
"alpha_frac": 0.637809648,
"autogenerated": false,
"ratio": 4.1414686825053995,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001550510985969864,
"num_lines": 89
} |
__author__ = 'Bohdan Mushkevych'
from threading import Thread
from amqp import AMQPError
from synergy.db.model.mq_transmission import MqTransmission
from synergy.db.model import unit_of_work
from synergy.db.dao.unit_of_work_dao import UnitOfWorkDao
from synergy.scheduler.scheduler_constants import QUEUE_UOW_STATUS
from synergy.mq.flopsy import Consumer
class UowStatusListener(object):
""" class instance listens to the QUEUE_UOW_STATUS queue and updates Timetable records correspondingly """
def __init__(self, scheduler):
self.scheduler = scheduler
self.timetable = scheduler.timetable
self.logger = scheduler.logger
self.uow_dao = UnitOfWorkDao(self.logger)
self.consumer = Consumer(QUEUE_UOW_STATUS)
self.main_thread = None
def __del__(self):
try:
self.logger.info('UowStatusListener: Closing Flopsy Consumer...')
self.consumer.close()
except Exception as e:
self.logger.error(f'UowStatusListener: Exception caught while closing Flopsy Consumer: {e}')
# ********************** thread-related methods ****************************
def _mq_callback(self, message):
""" method processes messages from Synergy Worker and updates corresponding Timetable record,
as well as the job itself
:param message: <MqTransmission> mq message """
try:
self.logger.info('UowStatusListener {')
mq_request = MqTransmission.from_json(message.body)
uow = self.uow_dao.get_one(mq_request.record_db_id)
if uow.unit_of_work_type != unit_of_work.TYPE_MANAGED:
self.logger.info('Received transmission from non-managed UOW execution: {0}. Ignoring it.'
.format(uow.unit_of_work_type))
return
tree = self.timetable.get_tree(uow.process_name)
node = tree.get_node(uow.process_name, uow.timeperiod)
if uow.db_id != node.job_record.related_unit_of_work:
self.logger.info('Received transmission is likely outdated. Ignoring it.')
return
if not uow.is_finished:
# rely on Garbage Collector to re-trigger the failing unit_of_work
self.logger.info('Received transmission from {0}@{1} in non-final state {2}. Ignoring it.'
.format(uow.process_name, uow.timeperiod, uow.state))
return
state_machine = self.scheduler.state_machine_for(node.process_name)
self.logger.info('Commencing StateMachine.notify with UOW from {0}@{1} in {2}.'
.format(uow.process_name, uow.timeperiod, uow.state))
state_machine.notify(uow)
except KeyError:
self.logger.error(f'Access error for {message.body}', exc_info=True)
except Exception:
self.logger.error(f'Error during StateMachine.notify call {message.body}', exc_info=True)
finally:
self.consumer.acknowledge(message.delivery_tag)
self.logger.info('UowStatusListener }')
def _run_mq_listener(self):
try:
self.consumer.register(self._mq_callback)
self.logger.info('UowStatusListener: instantiated and activated.')
self.consumer.wait()
except (AMQPError, IOError) as e:
self.logger.error(f'UowStatusListener: AMQPError {e}')
finally:
self.__del__()
self.logger.info('UowStatusListener: Shut down.')
def start(self, *_):
self.main_thread = Thread(target=self._run_mq_listener)
self.main_thread.daemon = True
self.main_thread.start()
def stop(self):
""" method stops currently MQ Consumer listener, if any """
self.__del__()
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/scheduler/uow_status_listener.py",
"copies": "1",
"size": "3858",
"license": "bsd-3-clause",
"hash": 9120610434209990000,
"line_mean": 41.3956043956,
"line_max": 110,
"alpha_frac": 0.6168999482,
"autogenerated": false,
"ratio": 3.916751269035533,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0015271738802411616,
"num_lines": 91
} |
__author__ = 'Bohdan Mushkevych'
from threading import Thread
from amqp import AMQPError
from synergy.db.model.synergy_mq_transmission import SynergyMqTransmission
from synergy.db.dao.unit_of_work_dao import UnitOfWorkDao
from synergy.scheduler.abstract_state_machine import AbstractStateMachine
from synergy.scheduler.scheduler_constants import TYPE_MANAGED
from synergy.scheduler.scheduler_constants import QUEUE_UOW_REPORT
from synergy.mq.flopsy import Consumer
class StatusBusListener(object):
""" class instance listens to the QUEUE_UOW_REPORT queue and updates Timetable records correspondingly """
def __init__(self, scheduler):
self.scheduler = scheduler
self.timetable = scheduler.timetable
self.logger = scheduler.logger
self.uow_dao = UnitOfWorkDao(self.logger)
self.consumer = Consumer(QUEUE_UOW_REPORT)
self.main_thread = None
def __del__(self):
try:
self.logger.info('StatusBusListener: Closing Flopsy Consumer...')
self.consumer.close()
except Exception as e:
self.logger.error('StatusBusListener: Exception caught while closing Flopsy Consumer: %s' % str(e))
# ********************** thread-related methods ****************************
def _mq_callback(self, message):
""" method processes messages from Synergy Worker and updates corresponding Timetable record,
as well as the job itself
:param message: <SynergyMqTransmission> mq message """
try:
self.logger.info('StatusBusListener {')
mq_request = SynergyMqTransmission.from_json(message.body)
uow = self.uow_dao.get_one(mq_request.unit_of_work_id)
if uow.unit_of_work_type != TYPE_MANAGED:
self.logger.info('Received transmission from TYPE_FREERUN execution. Ignoring it.')
return
tree = self.timetable.get_tree(uow.process_name)
node = tree.get_node(uow.process_name, uow.timeperiod)
if uow.db_id != node.job_record.related_unit_of_work:
self.logger.info('Received transmission is likely outdated. Ignoring it.')
return
if not uow.is_finished:
# rely on Garbage Collector to re-trigger the failing unit_of_work
self.logger.info('Received unit_of_work status report from %s at %s in non-final state %s. Ignoring it.'
% (uow.process_name, uow.timeperiod, uow.state))
return
process_entry = self.scheduler.managed_handlers[uow.process_name].process_entry
state_machine = self.scheduler.state_machines[process_entry.state_machine_name]
assert isinstance(state_machine, AbstractStateMachine)
self.logger.info('Commencing shallow state update for unit_of_work from %s at %s in %s.'
% (uow.process_name, uow.timeperiod, uow.state))
state_machine.shallow_state_update(uow)
except KeyError:
self.logger.error('Access error for %s' % str(message.body), exc_info=True)
except LookupError:
self.logger.error('Can not perform shallow state update for %s' % str(message.body), exc_info=True)
except Exception:
self.logger.error('Unexpected error during shallow state update for %s' % str(message.body), exc_info=True)
finally:
self.consumer.acknowledge(message.delivery_tag)
self.logger.info('StatusBusListener }')
def _run_mq_listener(self):
try:
self.consumer.register(self._mq_callback)
self.logger.info('StatusBusListener: instantiated and activated.')
self.consumer.wait()
except (AMQPError, IOError) as e:
self.logger.error('StatusBusListener: AMQPError %s' % str(e))
finally:
self.__del__()
self.logger.info('StatusBusListener: Shutting down... All auxiliary threads stopped.')
def start(self, *_):
self.main_thread = Thread(target=self._run_mq_listener)
self.main_thread.daemon = True
self.main_thread.start()
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/scheduler/status_bus_listener.py",
"copies": "1",
"size": "4195",
"license": "bsd-3-clause",
"hash": 6382535573126743000,
"line_mean": 44.597826087,
"line_max": 120,
"alpha_frac": 0.6407628129,
"autogenerated": false,
"ratio": 3.9990467111534795,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5139809524053479,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from werkzeug.utils import cached_property
from synergy.mx.base_request_handler import BaseRequestHandler
from synergy.mx.rest_model_factory import create_rest_timetable_tree, create_rest_managed_scheduler_entry
class TreeDetails(BaseRequestHandler):
def __init__(self, request, **values):
super(TreeDetails, self).__init__(request, **values)
def _get_tree_details(self, tree_name):
tree_obj = self.scheduler.timetable.trees[tree_name]
rest_tree = create_rest_timetable_tree(self.scheduler.timetable, tree_obj)
for process_name in tree_obj.process_hierarchy:
thread_handler = self.scheduler.managed_handlers[process_name]
rest_process = create_rest_managed_scheduler_entry(thread_handler,
self.scheduler.timetable,
self.scheduler.gc)
rest_tree.processes[process_name] = rest_process.document
return rest_tree
def _all_trees(self):
resp = dict()
for tree_name in self.scheduler.timetable.trees:
resp[tree_name] = self._get_tree_details(tree_name).document
return resp
def _mx_page_trees(self, mx_page):
""" return trees assigned to given MX Page """
resp = dict()
for tree_name, tree in self.scheduler.timetable.trees.items():
if tree.mx_page == mx_page:
resp[tree_name] = self._get_tree_details(tree_name).document
return resp
@property
def trees(self):
mx_page = self.request_arguments.get('mx_page')
return self._mx_page_trees(mx_page) if mx_page else self._all_trees()
@cached_property
def tree_details(self):
tree_name = self.request_arguments.get('tree_name')
if tree_name:
return self._get_tree_details(tree_name).document
else:
return dict()
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/mx/tree_details.py",
"copies": "1",
"size": "1985",
"license": "bsd-3-clause",
"hash": -5495629383499992000,
"line_mean": 39.5102040816,
"line_max": 105,
"alpha_frac": 0.6146095718,
"autogenerated": false,
"ratio": 3.97,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5084609571800001,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from werkzeug.utils import cached_property
from synergy.conf import settings
from synergy.system import time_helper
from synergy.conf import context
from synergy.mx.rest_model import RestTimetableTreeNode, RestJob
from synergy.mx.base_request_handler import BaseRequestHandler, valid_action_request
class TreeNodeDetails(BaseRequestHandler):
def __init__(self, request, **values):
super(TreeNodeDetails, self).__init__(request, **values)
self.process_name = request.args.get('process_name')
self.timeperiod = request.args.get('timeperiod')
self.tree = self.scheduler.timetable.get_tree(self.process_name)
self.is_request_valid = True if self.tree else False
@classmethod
def get_details(cls, node, as_model=False):
"""method returns either RestJob instance or corresponding document, depending on the as_model argument """
rest_job = RestJob(
process_name=node.process_name,
timeperiod=node.timeperiod,
time_qualifier=node.time_qualifier,
number_of_children=len(node.children),
number_of_failures='NA' if not node.job_record else node.job_record.number_of_failures,
state='NA' if not node.job_record else node.job_record.state,
event_log=[] if not node.job_record else node.job_record.event_log)
if as_model:
return rest_job
else:
return rest_job.document
@cached_property
@valid_action_request
def details(self):
rest_node = RestTimetableTreeNode()
if not self.timeperiod:
# return list of yearly nodes OR leafs for linear tree
# limit number of children to return, since a linear tree can holds thousands of nodes
children_keys = list(self.tree.root.children)
sorted_keys = sorted(children_keys, reverse=True)
sorted_keys = sorted_keys[:settings.settings['mx_children_limit']]
for key in sorted_keys:
child = self.tree.root.children[key]
rest_node.children[key] = TreeNodeDetails.get_details(child)
else:
time_qualifier = context.process_context[self.process_name].time_qualifier
self.timeperiod = time_helper.cast_to_time_qualifier(time_qualifier, self.timeperiod)
node = self.tree.get_node(self.process_name, self.timeperiod)
rest_node.node = TreeNodeDetails.get_details(node, as_model=True)
for key, child in node.children.items():
rest_node.children[key] = TreeNodeDetails.get_details(child)
return rest_node.document
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/mx/tree_node_details.py",
"copies": "1",
"size": "2683",
"license": "bsd-3-clause",
"hash": -8314852819497843000,
"line_mean": 43.7166666667,
"line_max": 115,
"alpha_frac": 0.6649273202,
"autogenerated": false,
"ratio": 3.9630723781388477,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010364123357458623,
"num_lines": 60
} |
__author__ = 'Bohdan Mushkevych'
from werkzeug.utils import cached_property
from synergy.mx.base_request_handler import BaseRequestHandler
from synergy.mx.rest_model_factory import create_rest_managed_scheduler_entry, create_rest_freerun_scheduler_entry
from synergy.system.performance_tracker import FootprintCalculator
# Scheduler Entries Details tab
class SchedulerEntries(BaseRequestHandler):
def __init__(self, request, **values):
super(SchedulerEntries, self).__init__(request, **values)
@cached_property
def managed_entries(self):
list_of_rows = []
try:
sorter_keys = sorted(self.scheduler.managed_handlers.keys())
for key in sorter_keys:
thread_handler = self.scheduler.managed_handlers[key]
rest_model = create_rest_managed_scheduler_entry(thread_handler, self.scheduler.timetable)
list_of_rows.append(rest_model.document)
except Exception as e:
self.logger.error('MX Exception %s' % str(e), exc_info=True)
return list_of_rows
@cached_property
def freerun_entries(self):
list_of_rows = []
try:
sorter_keys = sorted(self.scheduler.freerun_handlers.keys())
for key in sorter_keys:
thread_handler = self.scheduler.freerun_handlers[key]
rest_model = create_rest_freerun_scheduler_entry(thread_handler)
list_of_rows.append(rest_model.document)
except Exception as e:
self.logger.error('MX Exception %s' % str(e), exc_info=True)
return list_of_rows
@cached_property
def footprint(self):
try:
calculator = FootprintCalculator()
return calculator.document
except Exception as e:
self.logger.error('MX Exception %s' % str(e), exc_info=True)
return []
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/mx/scheduler_entries.py",
"copies": "1",
"size": "1892",
"license": "bsd-3-clause",
"hash": -4035476388057499000,
"line_mean": 36.84,
"line_max": 114,
"alpha_frac": 0.6427061311,
"autogenerated": false,
"ratio": 3.925311203319502,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5068017334419502,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from werkzeug.utils import cached_property
from synergy.scheduler.scheduler_constants import PROCESS_SCHEDULER, PROCESS_MX
from synergy.mx.base_request_handler import BaseRequestHandler
from synergy.mx.rest_model_factory import create_rest_managed_scheduler_entry, create_rest_freerun_scheduler_entry
from synergy.system.performance_tracker import FootprintCalculator
from synergy.system.system_logger import get_log_filename
from synergy.system.utils import tail_file
# Scheduler Entries Details tab
class SchedulerEntries(BaseRequestHandler):
def __init__(self, request, **values):
super(SchedulerEntries, self).__init__(request, **values)
@cached_property
def managed_entries(self):
list_of_rows = []
try:
handler_keys = list(self.scheduler.managed_handlers)
for key in sorted(handler_keys):
thread_handler = self.scheduler.managed_handlers[key]
rest_model = create_rest_managed_scheduler_entry(thread_handler,
self.scheduler.timetable,
self.scheduler.gc)
list_of_rows.append(rest_model.document)
except Exception as e:
self.logger.error(f'MX Exception {e}', exc_info=True)
return list_of_rows
@cached_property
def freerun_entries(self):
list_of_rows = []
try:
handler_keys = list(self.scheduler.freerun_handlers)
for key in sorted(handler_keys):
thread_handler = self.scheduler.freerun_handlers[key]
rest_model = create_rest_freerun_scheduler_entry(thread_handler)
list_of_rows.append(rest_model.document)
except Exception as e:
self.logger.error(f'MX Exception {e}', exc_info=True)
return list_of_rows
@cached_property
def footprint(self):
try:
calculator = FootprintCalculator()
return calculator.document
except Exception as e:
self.logger.error(f'MX Exception {e}', exc_info=True)
return []
@cached_property
def reprocess_uows(self):
return self.scheduler.gc.reprocess_uows
def tail_scheduler_log(self):
fqfn = get_log_filename(PROCESS_SCHEDULER)
return tail_file(fqfn)
def tail_mx_log(self):
fqfn = get_log_filename(PROCESS_MX)
return tail_file(fqfn)
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/mx/scheduler_entries.py",
"copies": "1",
"size": "2523",
"license": "bsd-3-clause",
"hash": 4042877023461293600,
"line_mean": 36.6567164179,
"line_max": 114,
"alpha_frac": 0.627824019,
"autogenerated": false,
"ratio": 4.0368,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5164624019,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from workers.abstract_mongo_worker import AbstractMongoWorker
class AbstractVerticalWorker(AbstractMongoWorker):
""" Class contains common logic for aggregators that work on sequential flow from the DB """
def __init__(self, process_name):
super(AbstractVerticalWorker, self).__init__(process_name)
# ********************** thread-related methods ****************************
def _process_single_document(self, document):
""" abstract method that actually processes the document from source collection"""
pass
def _process_not_empty_cursor(self, cursor):
""" abstract method to process cursor with result set from DB"""
shall_continue = False
new_start_id = None
for document in cursor:
new_start_id = document['_id']
self.performance_ticker.increment()
self._process_single_document(document)
if new_start_id is not None:
shall_continue = True
return shall_continue, new_start_id
| {
"repo_name": "eggsandbeer/scheduler",
"path": "workers/abstract_vertical_worker.py",
"copies": "1",
"size": "1062",
"license": "bsd-3-clause",
"hash": -2421151498798266000,
"line_mean": 35.6206896552,
"line_max": 96,
"alpha_frac": 0.631826742,
"autogenerated": false,
"ratio": 4.597402597402597,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5729229339402597,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
import atexit
from pymongo import MongoClient, ASCENDING, DESCENDING
from bson.objectid import ObjectId
from synergy.conf import settings
from synergy.db.model.unit_of_work import TIMEPERIOD
from odm.document import BaseDocument
QUERY_GET_ALL = {}
if 'ds_factory' not in globals():
# this block defines module-level variable ds_factory
def factory():
# the only way to implement nonlocal closure variables in Python 2.X
instances = {}
def get_instance(logger):
ds_type = settings.settings['ds_type']
if ds_type not in instances:
if ds_type == "mongo_db":
instances[ds_type] = MongoDbManager(logger)
elif ds_type == "hbase":
instances[ds_type] = HBaseManager(logger)
else:
raise ValueError(f'Unsupported Data Source type: {ds_type}')
atexit.register(instances[ds_type].interpreter_terminating)
return instances[ds_type]
return get_instance
global ds_factory
ds_factory = factory()
class BaseManager(object):
"""
BaseManager holds definition of the Data Source and an interface to read, write, delete and update (CRUD)
models withing the DataSource
"""
def __init__(self, logger):
super(BaseManager, self).__init__()
self.logger = logger
self.interpreter_is_terminating = False
def __str__(self):
raise NotImplementedError(f'method __str__ must be implemented by {self.__class__.__name__}')
def interpreter_terminating(self):
""" method is registered with the atexit hook, and notifies about Python interpreter shutdown sequnce """
self.interpreter_is_terminating = True
def is_alive(self):
""" :return: True if the database server is available. False otherwise """
raise NotImplementedError(f'method is_alive must be implemented by {self.__class__.__name__}')
def get(self, table_name, primary_key):
raise NotImplementedError(f'method get must be implemented by {self.__class__.__name__}')
def filter(self, table_name, query):
raise NotImplementedError(f'method filter must be implemented by {self.__class__.__name__}')
def update(self, table_name, primary_key, instance):
raise NotImplementedError(f'method update must be implemented by {self.__class__.__name__}')
def delete(self, table_name, primary_key):
raise NotImplementedError(f'method delete must be implemented by {self.__class__.__name__}')
def highest_primary_key(self, table_name, timeperiod_low, timeperiod_high):
raise NotImplementedError(f'method highest_primary_key must be implemented by {self.__class__.__name__}')
def lowest_primary_key(self, table_name, timeperiod_low, timeperiod_high):
raise NotImplementedError(f'method lowest_primary_key must be implemented by {self.__class__.__name__}')
def cursor_fine(self,
table_name,
start_id_obj,
end_id_obj,
iteration,
start_timeperiod,
end_timeperiod):
""" method returns DB cursor based on precise boundaries """
raise NotImplementedError(f'method cursor_fine must be implemented by {self.__class__.__name__}')
def cursor_batch(self,
table_name,
start_timeperiod,
end_timeperiod):
""" method returns batched DB cursor """
raise NotImplementedError(f'method cursor_batch must be implemented by {self.__class__.__name__}')
class MongoDbManager(BaseManager):
def __init__(self, logger):
super(MongoDbManager, self).__init__(logger)
self._db_client = MongoClient(settings.settings['mongodb_host_list'])
self._db = self._db_client[settings.settings['mongo_db_name']]
def __del__(self):
try:
self._db_client.close()
except Exception as e:
if self.interpreter_is_terminating:
self.logger.error(f'MongoDbManager cleanup likely followed MongoClient cleanup: {e}')
else:
self.logger.error(f'Exception on closing MongoClient: {e}', exc_info=True)
finally:
self._db = None
self._db_client = None
def __str__(self):
return f'MongoDbManager: {settings.settings["mongodb_host_list"]}@{settings.settings["mongo_db_name"]}'
def is_alive(self):
return self._db_client.admin.command('ping')
def connection(self, table_name):
return self._db[table_name]
def filter(self, table_name, query):
conn = self._db[table_name]
return conn.find(query)
def delete(self, table_name, primary_key: dict):
conn = self._db[table_name]
conn.delete_one(primary_key)
def get(self, table_name, primary_key: dict):
conn = self._db[table_name]
db_entry = conn.find_one(primary_key)
if db_entry is None:
msg = f'Instance with ID={primary_key} was not found'
self.logger.warning(msg)
raise LookupError(msg)
return db_entry
def insert(self, table_name, instance: BaseDocument):
conn = self._db[table_name]
return conn.insert_one(instance.document).inserted_id
def update(self, table_name, primary_key: dict, instance: BaseDocument):
""" replaces document identified by the primary_key or creates one if a matching document does not exist"""
collection = self._db[table_name]
# work with a copy of the document, as the direct type change of the _id field
# is later negated by the `BaseDocument.to_json` method
document = instance.document
if '_id' in document:
document['_id'] = ObjectId(document['_id'])
update_result = collection.replace_one(filter=primary_key, replacement=document, upsert=True)
if update_result.upserted_id:
instance['_id'] = update_result.upserted_id
return update_result.upserted_id
def highest_primary_key(self, table_name, timeperiod_low, timeperiod_high):
query = {TIMEPERIOD: {'$gte': timeperiod_low, '$lt': timeperiod_high}}
conn = self._db[table_name]
asc_search = conn.find(filter=query, projection='_id').sort('_id', ASCENDING).limit(1)
if asc_search.count() == 0:
raise LookupError(
f'No records in timeperiod: [{timeperiod_low} : {timeperiod_high}) in collection {table_name}')
return asc_search[0]['_id']
def lowest_primary_key(self, table_name, timeperiod_low, timeperiod_high):
query = {TIMEPERIOD: {'$gte': timeperiod_low, '$lt': timeperiod_high}}
conn = self._db[table_name]
dec_search = conn.find(filter=query, projection='_id').sort('_id', DESCENDING).limit(1)
if dec_search.count() == 0:
raise LookupError(
f'No records in timeperiod: [{timeperiod_low} : {timeperiod_high}) in collection {table_name}')
return dec_search[0]['_id']
def cursor_fine(self,
table_name,
start_id_obj,
end_id_obj,
iteration,
start_timeperiod,
end_timeperiod):
if iteration == 0:
queue = {'_id': {'$gte': ObjectId(start_id_obj), '$lte': ObjectId(end_id_obj)}}
else:
queue = {'_id': {'$gt': ObjectId(start_id_obj), '$lte': ObjectId(end_id_obj)}}
if start_timeperiod is not None and end_timeperiod is not None:
# remove all accident objects that may be in [start_id_obj : end_id_obj] range
queue[TIMEPERIOD] = {'$gte': start_timeperiod, '$lt': end_timeperiod}
conn = self._db[table_name]
batch_size = settings.settings['batch_size']
return conn.find(queue).sort('_id', ASCENDING).limit(batch_size)
def cursor_batch(self, table_name, start_timeperiod, end_timeperiod):
assert start_timeperiod is not None and end_timeperiod is not None
conn = self._db[table_name]
batch_size = settings.settings['batch_size']
queue = {TIMEPERIOD: {'$gte': start_timeperiod, '$lt': end_timeperiod}}
return conn.find(queue).batch_size(batch_size)
class HBaseManager(BaseManager):
pass
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/db/manager/ds_manager.py",
"copies": "1",
"size": "8459",
"license": "bsd-3-clause",
"hash": -3122381683259775000,
"line_mean": 39.4736842105,
"line_max": 115,
"alpha_frac": 0.6149663081,
"autogenerated": false,
"ratio": 4.0396370582617,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.51546033663617,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
import calendar
from pymongo.errors import AutoReconnect
from db.model.single_session import SingleSession
from db.model.raw_data import *
from db.dao.single_session_dao import SingleSessionDao
from synergy.system import time_helper
from synergy.system.performance_tracker import SessionPerformanceTracker
from synergy.system.time_qualifier import QUALIFIER_REAL_TIME, QUALIFIER_HOURLY
from synergy.workers.abstract_mq_worker import AbstractMqWorker
class SingleSessionWorker(AbstractMqWorker):
""" illustration suite worker:
- reads stream of messages from the RabbitMQ and dumps them into the MongoDB """
def __init__(self, process_name):
super(SingleSessionWorker, self).__init__(process_name)
self.ss_dao = SingleSessionDao(self.logger)
# ********************** abstract methods ****************************
def _init_performance_tracker(self, logger):
self.performance_tracker = SessionPerformanceTracker(logger)
self.performance_tracker.start()
def _mq_callback(self, message):
""" wraps call of abstract method with try/except
in case exception breaks the abstract method, this method:
- catches the exception
- logs the exception
- marks unit of work as INVALID"""
try:
raw_data = RawData.from_json(message.body)
try:
session = self.ss_dao.find_by_session_id(raw_data.domain_name, raw_data.session_id)
# update the click_xxx info
session = self.update_session_body(raw_data, session)
epoch_current = calendar.timegm(raw_data.timestamp.replace(tzinfo=None).utctimetuple())
epoch_start = time_helper.session_to_epoch(session.timeperiod)
session.browsing_history.total_duration = (epoch_current - epoch_start) / 1000
index = session.browsing_history.number_of_entries
self.add_entry(session, index, raw_data)
self.performance_tracker.update.increment_success()
except LookupError:
# insert the record
session = SingleSession()
# input data constraints - both session_id and user_id must be present in MQ message
session.key = (raw_data.domain_name,
time_helper.datetime_to_synergy(QUALIFIER_HOURLY, raw_data.timestamp),
raw_data.session_id)
session.ip = raw_data.ip
session.total_duration = 0
session = self.update_session_body(raw_data, session)
self.add_entry(session, 0, raw_data)
self.performance_tracker.insert.increment_success()
self.ss_dao.update(session)
self.consumer.acknowledge(message.delivery_tag)
except AutoReconnect as e:
self.logger.error(f'MongoDB connection error: {e}\nRe-queueing message & exiting the worker')
self.consumer.reject(message.delivery_tag)
raise e
except (KeyError, IndexError) as e:
self.logger.error(f'Error is considered Unrecoverable: {e}\nCancelled message: {message.body}')
self.consumer.cancel(message.delivery_tag)
except Exception as e:
self.logger.error(f'Error is considered Recoverable: {e}\nRe-queueing message: {message.body}')
self.consumer.reject(message.delivery_tag)
def update_session_body(self, raw_data, session):
if raw_data.browser is not None:
session.user_profile.browser = raw_data.browser
if raw_data.screen_resolution[0] is not None and raw_data.screen_resolution[1] is not None:
session.user_profile.screen_resolution = raw_data.screen_resolution
if raw_data.os is not None:
session.user_profile.os = raw_data.os
if raw_data.language is not None:
session.user_profile.language = raw_data.language
if raw_data.country is not None:
session.user_profile.country = raw_data.country
if raw_data.is_page_view:
session.browsing_history.number_of_pageviews += 1
return session
def add_entry(self, session, index, raw_data):
session.browsing_history.number_of_entries = index + 1
session.browsing_history.set_entry_timestamp(
index, time_helper.datetime_to_synergy(QUALIFIER_REAL_TIME, raw_data.timestamp))
if __name__ == '__main__':
from constants import PROCESS_SESSION_WORKER_00
source = SingleSessionWorker(PROCESS_SESSION_WORKER_00)
source.start()
| {
"repo_name": "mushkevych/scheduler",
"path": "workers/single_session_worker.py",
"copies": "1",
"size": "4670",
"license": "bsd-3-clause",
"hash": 8644365975636366000,
"line_mean": 43.9038461538,
"line_max": 107,
"alpha_frac": 0.644111349,
"autogenerated": false,
"ratio": 4.092900964066608,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5237012313066608,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
import calendar
import collections
from synergy.system import time_helper
from synergy.system.time_qualifier import *
class TimeperiodDict(collections.MutableMapping):
""" module represents a _smart_ dictionary, where key is a timeperiod
TimeperiodDict allows timeperiod grouping, such as *every 3 hours* or *quarterly* or *weekly* """
def __init__(self, time_qualifier, time_grouping, *args, **kwargs):
assert time_qualifier in [QUALIFIER_HOURLY, QUALIFIER_DAILY, QUALIFIER_MONTHLY, QUALIFIER_YEARLY], \
f'time qualifier {time_qualifier} is not supported by TimeperiodDict'
super(TimeperiodDict, self).__init__()
self.time_grouping = time_grouping
self.time_qualifier = time_qualifier
# validation section
upper_boundary = self._get_stem_upper_boundary()
assert 1 <= time_grouping <= upper_boundary
# format: {grouped_timeperiod: value}
self.data = dict()
self.update(dict(*args, **kwargs))
def _get_stem_upper_boundary(self, timeperiod=None):
"""
:param timeperiod: optional parameter, applicable for QUALIFIER_DAILY qualifier only
:return: upper boundary for dictionary's time_qualifier
"""
if self.time_qualifier == QUALIFIER_HOURLY:
upper_boundary = 23
elif self.time_qualifier == QUALIFIER_DAILY:
if timeperiod:
# DAILY upper boundary is month-dependent
# i.e. it is 28 for Feb 2015; and 31 for Mar 2015
year, month, day, hour = time_helper.tokenize_timeperiod(timeperiod)
monthrange_tuple = calendar.monthrange(int(year), int(month))
upper_boundary = monthrange_tuple[1]
else:
upper_boundary = 28
elif self.time_qualifier == QUALIFIER_MONTHLY:
upper_boundary = 12
elif self.time_qualifier == QUALIFIER_YEARLY:
upper_boundary = 1
else:
raise ValueError(f'unknown time qualifier: {self.time_qualifier}')
return upper_boundary
def _do_stem_grouping(self, timeperiod, stem):
"""
method performs *timeperiod's stem grouping*.
:param timeperiod: timeperiod to augment
:param stem: inclusive lower boundary for the timeperiod's corresponding token. For instance:
- for 2015010520 and QUALIFIER_MONTHLY, stem would be 01
- for 2015010520 and QUALIFIER_DAILY, stem would be 05
- for 2015010520 and QUALIFIER_HOURLY, stem would be 20
:return: grouped stem. For instance:
- for 2015010520 and QUALIFIER_MONTHLY and time_grouping=3, stem would be 03
- for 2015010520 and QUALIFIER_DAILY and time_grouping=2, stem would be 06
- for 2015010520 and QUALIFIER_HOURLY and time_grouping=8, stem would be 23
"""
# exclude 00 from lower boundary, unless the grouping == 1
lower_boundary = 0 if self.time_grouping == 1 else 1
upper_boundary = self._get_stem_upper_boundary(timeperiod)
for i in range(lower_boundary, upper_boundary):
candidate = i * self.time_grouping
if stem <= candidate <= upper_boundary:
return candidate
return upper_boundary
def _translate_timeperiod(self, timeperiod):
""" method translates given timeperiod to the grouped timeperiod """
if self.time_grouping == 1:
# no translation is performed for identity grouping
return timeperiod
# step 1: tokenize timeperiod into: (year, month, day, hour)
# for instance: daily 2015031400 -> ('2015', '03', '14', '00')
year, month, day, hour = time_helper.tokenize_timeperiod(timeperiod)
# step 2: perform grouping on the stem
# ex1: stem of 14 with grouping 20 -> 20
# ex2: stem of 21 with grouping 20 -> 23
if self.time_qualifier == QUALIFIER_HOURLY:
stem = self._do_stem_grouping(timeperiod, int(hour))
result = '{0}{1}{2}{3:02d}'.format(year, month, day, stem)
elif self.time_qualifier == QUALIFIER_DAILY:
stem = self._do_stem_grouping(timeperiod, int(day))
result = '{0}{1}{2:02d}{3}'.format(year, month, stem, hour)
else: # self.time_qualifier == QUALIFIER_MONTHLY:
stem = self._do_stem_grouping(timeperiod, int(month))
result = '{0}{1:02d}{2}{3}'.format(year, stem, day, hour)
return result
def __len__(self):
return len(self.data)
def __getitem__(self, key):
grouped_timeperiod = self._translate_timeperiod(key)
return self.data.__getitem__(grouped_timeperiod)
def __setitem__(self, key, value):
grouped_timeperiod = self._translate_timeperiod(key)
self.data.__setitem__(grouped_timeperiod, value)
def __delitem__(self, key):
self.data.__delitem__(key)
def __iter__(self):
return iter(self.data)
def __contains__(self, key):
grouped_timeperiod = self._translate_timeperiod(key)
return self.data.__contains__(grouped_timeperiod)
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/system/timeperiod_dict.py",
"copies": "1",
"size": "5218",
"license": "bsd-3-clause",
"hash": -8424773339265604000,
"line_mean": 41.7704918033,
"line_max": 108,
"alpha_frac": 0.6234189345,
"autogenerated": false,
"ratio": 3.989296636085627,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5112715570585626,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
import collections
from datetime import datetime
from threading import RLock
from synergy.db.dao.job_dao import JobDao
from synergy.db.model.job import Job
from synergy.conf import context
from synergy.conf import settings
from synergy.system import time_helper, utils
from synergy.system.time_qualifier import *
from synergy.system.decorator import thread_safe
from synergy.scheduler.scheduler_constants import COLLECTION_JOB_HOURLY, COLLECTION_JOB_DAILY, \
COLLECTION_JOB_MONTHLY, COLLECTION_JOB_YEARLY
from synergy.scheduler.tree import MultiLevelTree
from synergy.scheduler.state_machine_recomputing import StateMachineRecomputing
from synergy.scheduler.state_machine_continuous import StateMachineContinuous
from synergy.scheduler.state_machine_discrete import StateMachineDiscrete
from synergy.scheduler.state_machine_freerun import StateMachineFreerun
class Timetable(object):
""" Timetable holds all known process trees, where every node presents a timeperiod-driven job"""
def __init__(self, logger):
self.lock = RLock()
self.logger = logger
self.job_dao = JobDao(self.logger)
# state_machines must be constructed before the trees
self.state_machines = self._construct_state_machines()
# self.trees contain all of the trees and manages much of their life cycle
# remember to enlist here all trees the system is working with
self.trees = self._construct_trees_from_context()
self._register_dependencies()
self.load_tree()
self.build_trees()
self.validate()
def _construct_state_machines(self):
""" :return: dict in format <state_machine_common_name: instance_of_the_state_machine> """
state_machines = dict()
for state_machine in [StateMachineRecomputing(self.logger, self),
StateMachineContinuous(self.logger, self),
StateMachineDiscrete(self.logger, self),
StateMachineFreerun(self.logger)]:
state_machines[state_machine.name] = state_machine
return state_machines
def _construct_trees_from_context(self):
trees = dict()
for tree_name, context_entry in context.timetable_context.items():
tree = MultiLevelTree(process_names=context_entry.enclosed_processes,
timetable=self,
tree_name=tree_name,
mx_name=context_entry.mx_name,
mx_page=context_entry.mx_page)
trees[tree_name] = tree
return trees
def _register_dependencies(self):
""" register dependencies between trees"""
for tree_name, context_entry in context.timetable_context.items():
tree = self.trees[tree_name]
assert isinstance(tree, MultiLevelTree)
for dependent_on in context_entry.dependent_on:
dependent_on_tree = self.trees[dependent_on]
assert isinstance(dependent_on_tree, MultiLevelTree)
tree.register_dependent_on(dependent_on_tree)
# *** node manipulation methods ***
def _find_dependant_trees(self, tree_obj):
""" returns list of trees that are dependent_on given tree_obj """
dependant_trees = []
for tree_name, tree in self.trees.items():
if tree_obj in tree.dependent_on:
dependant_trees.append(tree)
return dependant_trees
def _find_dependant_tree_nodes(self, node_a):
dependant_nodes = set()
for tree_b in self._find_dependant_trees(node_a.tree):
node_b = node_a.find_counterpart_in(tree_b)
if node_b is None:
continue
dependant_nodes.add(node_b)
return dependant_nodes
@thread_safe
def reprocess_tree_node(self, tree_node, tx_context=None):
""" method reprocesses the node and all its dependants and parent nodes """
if not tx_context:
# create transaction context if one was not provided
# format: {process_name: {timeperiod: AbstractTreeNode} }
tx_context = collections.defaultdict(dict)
if tree_node.parent is None:
# do not process 'root' - the only node that has None as 'parent'
return tx_context
if tree_node.timeperiod in tx_context[tree_node.process_name]:
# the node has already been marked for re-processing
return tx_context
if tree_node.job_record.is_embryo:
# the node does not require re-processing
pass
else:
state_machine_name = context.process_context[tree_node.process_name].state_machine_name
state_machine = self.state_machines[state_machine_name]
state_machine.reprocess_job(tree_node.job_record)
tx_context[tree_node.process_name][tree_node.timeperiod] = tree_node
self.reprocess_tree_node(tree_node.parent, tx_context)
dependant_nodes = self._find_dependant_tree_nodes(tree_node)
for node in dependant_nodes:
self.reprocess_tree_node(node, tx_context)
return tx_context
@thread_safe
def skip_tree_node(self, tree_node, tx_context=None):
""" method skips the node and all its dependants and child nodes """
if not tx_context:
# create transaction context if one was not provided
# format: {process_name: {timeperiod: AbstractTreeNode} }
tx_context = collections.defaultdict(dict)
if tree_node.timeperiod in tx_context[tree_node.process_name]:
# the node has already been marked for skipping
return tx_context
if tree_node.job_record.is_finished:
# the node is finished and does not require skipping
pass
else:
state_machine_name = context.process_context[tree_node.process_name].state_machine_name
state_machine = self.state_machines[state_machine_name]
state_machine.skip_job(tree_node.job_record)
tx_context[tree_node.process_name][tree_node.timeperiod] = tree_node
for timeperiod, node in tree_node.children.items():
self.skip_tree_node(node, tx_context)
dependant_nodes = self._find_dependant_tree_nodes(tree_node)
for node in dependant_nodes:
self.skip_tree_node(node, tx_context)
return tx_context
@thread_safe
def assign_job_record(self, tree_node):
""" - looks for an existing job record in the DB, and if not found
- creates a job record in STATE_EMBRYO and bind it to the given tree node """
try:
job_record = self.job_dao.get_one(tree_node.process_name, tree_node.timeperiod)
except LookupError:
state_machine_name = context.process_context[tree_node.process_name].state_machine_name
state_machine = self.state_machines[state_machine_name]
job_record = state_machine.create_job(tree_node.process_name, tree_node.timeperiod)
tree_node.job_record = job_record
# *** Tree-manipulation methods ***
@thread_safe
def get_tree(self, process_name):
""" return tree that is managing time-periods for given process"""
for tree_name, tree in self.trees.items():
if process_name in tree:
return tree
@thread_safe
def _build_tree_by_level(self, time_qualifier, collection_name, since):
""" method iterated thru all documents in all job collections and builds a tree of known system state"""
invalid_tree_records = dict()
invalid_tq_records = dict()
try:
job_records = self.job_dao.get_all(collection_name, since)
for job_record in job_records:
tree = self.get_tree(job_record.process_name)
if tree is None:
utils.increment_family_property(job_record.process_name, invalid_tree_records)
continue
job_time_qualifier = context.process_context[job_record.process_name].time_qualifier
if time_qualifier != job_time_qualifier:
utils.increment_family_property(job_record.process_name, invalid_tq_records)
continue
tree.update_node(job_record)
except LookupError:
self.logger.warning(f'No job records in {collection_name}.')
for name, counter in invalid_tree_records.items():
self.logger.warning(f'Skipping {counter} job records for {name} since no tree is handling it.')
for name, counter in invalid_tq_records.items():
self.logger.warning(f'Skipping {counter} job records for {name} since the process '
f'has different time qualifier.')
@thread_safe
def load_tree(self):
""" method iterates thru all objects older than synergy_start_timeperiod parameter in job collections
and loads them into this timetable"""
timeperiod = settings.settings['synergy_start_timeperiod']
yearly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_YEARLY, timeperiod)
monthly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_MONTHLY, timeperiod)
daily_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_DAILY, timeperiod)
hourly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_HOURLY, timeperiod)
self._build_tree_by_level(QUALIFIER_HOURLY, COLLECTION_JOB_HOURLY, since=hourly_timeperiod)
self._build_tree_by_level(QUALIFIER_DAILY, COLLECTION_JOB_DAILY, since=daily_timeperiod)
self._build_tree_by_level(QUALIFIER_MONTHLY, COLLECTION_JOB_MONTHLY, since=monthly_timeperiod)
self._build_tree_by_level(QUALIFIER_YEARLY, COLLECTION_JOB_YEARLY, since=yearly_timeperiod)
@thread_safe
def build_trees(self):
""" method iterates thru all trees and ensures that all time-period nodes are created up till <utc_now>"""
for tree_name, tree in self.trees.items():
tree.build_tree()
@thread_safe
def validate(self):
"""validates that none of nodes in tree is improperly finalized and that every node has job_record"""
for tree_name, tree in self.trees.items():
tree.validate()
@thread_safe
def dependent_on_summary(self, job_record):
""" :return instance of <tree_node.DependencySummary> """
assert isinstance(job_record, Job)
tree = self.get_tree(job_record.process_name)
node = tree.get_node(job_record.process_name, job_record.timeperiod)
return node.dependent_on_summary()
# *** Job manipulation methods ***
@thread_safe
def skip_if_needed(self, job_record):
""" method is called from abstract_state_machine.manage_job to notify about job's failed processing
if should_skip_node returns True - the node's job_record is transferred to STATE_SKIPPED """
tree = self.get_tree(job_record.process_name)
node = tree.get_node(job_record.process_name, job_record.timeperiod)
if tree.should_skip_tree_node(node):
self.skip_tree_node(node)
@thread_safe
def get_next_job_record(self, process_name):
""" :returns: the next job record to work on for the given process"""
tree = self.get_tree(process_name)
node = tree.get_next_node(process_name)
if node.job_record is None:
self.assign_job_record(node)
return node.job_record
@thread_safe
def is_job_record_finalizable(self, job_record):
""" :return: True, if the node and all its children are in [STATE_PROCESSED, STATE_SKIPPED, STATE_NOOP] """
assert isinstance(job_record, Job)
tree = self.get_tree(job_record.process_name)
node = tree.get_node(job_record.process_name, job_record.timeperiod)
return node.is_finalizable()
@thread_safe
def add_log_entry(self, process_name, timeperiod, msg):
""" adds a log entry to the job{process_name@timeperiod}.event_log """
tree = self.get_tree(process_name)
node = tree.get_node(process_name, timeperiod)
node.add_log_entry([datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'), msg])
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/scheduler/timetable.py",
"copies": "1",
"size": "12412",
"license": "bsd-3-clause",
"hash": -6639105178559903000,
"line_mean": 44.4652014652,
"line_max": 115,
"alpha_frac": 0.6475990976,
"autogenerated": false,
"ratio": 3.9478371501272265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012343023170080358,
"num_lines": 273
} |
__author__ = 'Bohdan Mushkevych'
import collections
from threading import Lock
from datetime import datetime, timedelta
from synergy.conf import settings
from synergy.system.system_logger import get_logger
from synergy.system.decorator import thread_safe
from synergy.system.priority_queue import PriorityEntry, PriorityQueue, compute_release_time
from synergy.system.repeat_timer import RepeatTimer
from synergy.system.mq_transmitter import MqTransmitter
from synergy.scheduler.scheduler_constants import PROCESS_GC
from synergy.scheduler.thread_handler import ManagedThreadHandler
from synergy.db.model import unit_of_work
from synergy.db.dao.unit_of_work_dao import UnitOfWorkDao
class GarbageCollector(object):
""" GC is triggered directly by Synergy Scheduler.
It scans for invalid or stalled unit_of_work and re-triggers them.
GC is vital for the health of the system.
Deployment with no GC is considered invalid """
def __init__(self, scheduler):
self.logger = get_logger(PROCESS_GC, append_to_console=False, redirect_stdstream=False)
self.managed_handlers = scheduler.managed_handlers
self.mq_transmitter = MqTransmitter(self.logger)
self.timetable = scheduler.timetable
self.lock = Lock()
self.uow_dao = UnitOfWorkDao(self.logger)
self.reprocess_uows = collections.defaultdict(PriorityQueue)
self.timer = RepeatTimer(settings.settings['gc_run_interval'], self._run)
@thread_safe
def scan_uow_candidates(self):
""" method performs two actions:
- enlist stale or invalid units of work into reprocessing queue
- cancel UOWs that are older than 2 days and have been submitted more than 1 hour ago """
try:
since = settings.settings['synergy_start_timeperiod']
uow_list = self.uow_dao.get_reprocessing_candidates(since)
except LookupError as e:
self.logger.info(f'flow: no UOW candidates found for reprocessing: {e}')
return
for uow in uow_list:
try:
if uow.process_name not in self.managed_handlers:
self.logger.debug('process {0} is not known to the Synergy Scheduler. Skipping its UOW.'
.format(uow.process_name))
continue
thread_handler = self.managed_handlers[uow.process_name]
assert isinstance(thread_handler, ManagedThreadHandler)
if not thread_handler.process_entry.is_on:
self.logger.debug(f'process {uow.process_name} is inactive. Skipping its UOW.')
continue
entry = PriorityEntry(uow)
if entry in self.reprocess_uows[uow.process_name]:
# given UOW is already registered in the reprocessing queue
continue
# ASSUMPTION: UOW is re-created by a state machine during reprocessing
# thus - any UOW older 2 days could be marked as STATE_CANCELED
# NOTE: canceling UOW is not identical to cancelling a Job.
# The Job lifecycle is managed by:
# - synergy.scheduler.abstract_state_machine.AbstractStateMachine.notify
# - synergy.scheduler.abstract_state_machine.AbstractStateMachine.manage_job
# - synergy.scheduler.timetable.Timetable.validate (via GarbageCollector._run)
if datetime.utcnow() - uow.created_at > timedelta(hours=settings.settings['gc_life_support_hours']):
self._cancel_uow(uow)
continue
# if the UOW has been idle for more than 1 hour - resubmit it
if datetime.utcnow() - uow.submitted_at > timedelta(hours=settings.settings['gc_resubmit_after_hours'])\
or uow.is_invalid:
# enlist the UOW into the reprocessing queue
self.reprocess_uows[uow.process_name].put(entry)
except Exception as e:
self.logger.error(f'flow exception: {e}', exc_info=True)
def _flush_queue(self, q: PriorityQueue, ignore_priority=False):
"""
:param q: PriorityQueue instance holding GarbageCollector entries
:param ignore_priority: If True - all GarbageCollector entries should be resubmitted
If False - only those entries whose waiting time has expired will be resubmitted
"""
current_timestamp = compute_release_time(lag_in_minutes=0)
for _ in range(len(q)):
entry = q.pop()
assert isinstance(entry, PriorityEntry)
if ignore_priority or entry.release_time < current_timestamp:
self._resubmit_uow(entry.entry)
else:
q.put(entry)
break
@thread_safe
def flush(self, ignore_priority=False):
""" method iterates over each reprocessing queues and re-submits UOW whose waiting time has expired """
for process_name, q in self.reprocess_uows.items():
self._flush_queue(q, ignore_priority)
@thread_safe
def validate(self):
""" method iterates over the reprocessing queue and synchronizes state of every UOW with the DB
should it change via the MX to STATE_CANCELED - remove the UOW from the queue """
for process_name, q in self.reprocess_uows.items():
if not q:
continue
invalid_entries = list()
for entry in q.queue:
assert isinstance(entry, PriorityEntry)
uow = self.uow_dao.get_one(entry.entry.db_id)
if uow.is_canceled:
invalid_entries.append(entry)
thread_handler = self.managed_handlers[uow.process_name]
assert isinstance(thread_handler, ManagedThreadHandler)
if not thread_handler.process_entry.is_on:
invalid_entries.append(entry)
for entry in invalid_entries:
q.queue.remove(entry)
self.logger.info('reprocessing queue validated')
@thread_safe
def flush_one(self, process_name, ignore_priority=False):
""" method iterates over the reprocessing queue for the given process
and re-submits UOW whose waiting time has expired """
q = self.reprocess_uows[process_name]
self._flush_queue(q, ignore_priority)
def _resubmit_uow(self, uow):
# re-read UOW from the DB, in case it was STATE_CANCELLED by MX
uow = self.uow_dao.get_one(uow.db_id)
if uow.is_canceled:
self.logger.info('suppressed re-submission of UOW {0} for {1}@{2} in {3};'
.format(uow.db_id, uow.process_name, uow.timeperiod, uow.state))
return
thread_handler = self.managed_handlers[uow.process_name]
assert isinstance(thread_handler, ManagedThreadHandler)
if not thread_handler.process_entry.is_on:
self.logger.debug('suppressed re-submission of UOW {0} for {1}@{2} in {3}, since the process is inactive.'
.format(uow.db_id, uow.process_name, uow.timeperiod, uow.state))
return
if uow.is_invalid:
uow.number_of_retries += 1
uow.state = unit_of_work.STATE_REQUESTED
uow.submitted_at = datetime.utcnow()
self.uow_dao.update(uow)
self.mq_transmitter.publish_managed_uow(uow)
self.logger.info('re-submitted UOW {0} for {1}@{2}; attempt {3}'
.format(uow.db_id, uow.process_name, uow.timeperiod, uow.number_of_retries))
def _cancel_uow(self, uow):
uow.state = unit_of_work.STATE_CANCELED
self.uow_dao.update(uow)
self.mq_transmitter.publish_uow_status(uow)
self.logger.info('canceled UOW {0} for {1}@{2}; attempt {3}; created at {4}'
.format(uow.db_id, uow.process_name, uow.timeperiod, uow.number_of_retries, uow.created_at))
def _run(self):
try:
self.logger.info('run {')
self.logger.debug('step 1: validate existing queue entries')
self.validate()
self.logger.debug('step 2: scan reprocessing candidates')
self.scan_uow_candidates()
self.logger.debug('step 3: repost after timeout')
self.flush()
self.logger.debug('step 4: timetable housekeeping')
self.timetable.build_trees()
self.logger.debug('step 5: timetable validation')
self.timetable.validate()
except Exception as e:
self.logger.error(f'GC run exception: {e}')
finally:
self.logger.info('}')
def start(self):
self.timer.start()
def stop(self):
self.timer.cancel()
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/scheduler/garbage_collector.py",
"copies": "1",
"size": "8902",
"license": "bsd-3-clause",
"hash": 2949330332909956600,
"line_mean": 43.0693069307,
"line_max": 120,
"alpha_frac": 0.6194113682,
"autogenerated": false,
"ratio": 4.02987777274785,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5149289140947849,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
import collections
from synergy.system import time_helper
from synergy.conf import context
from synergy.mx.base_request_handler import valid_action_request
from synergy.mx.abstract_action_handler import AbstractActionHandler
from synergy.mx.tree_node_details import TreeNodeDetails
class ManagedActionHandler(AbstractActionHandler):
def __init__(self, request, **values):
super(ManagedActionHandler, self).__init__(request, **values)
self.process_name = self.request_arguments.get('process_name')
self.timeperiod = self.request_arguments.get('timeperiod')
self.is_request_valid = True if self.process_name and self.timeperiod else False
if self.is_request_valid:
self.process_name = self.process_name.strip()
self.timeperiod = self.timeperiod.strip()
def _get_tree_node(self):
tree = self.scheduler.timetable.get_tree(self.process_name)
if tree is None:
raise UserWarning(f'No Timetable tree is registered for process {self.process_name}')
time_qualifier = context.process_context[self.process_name].time_qualifier
self.timeperiod = time_helper.cast_to_time_qualifier(time_qualifier, self.timeperiod)
node = tree.get_node(self.process_name, self.timeperiod)
return node
@AbstractActionHandler.thread_handler.getter
def thread_handler(self):
handler_key = self.process_name
return self.scheduler.managed_handlers[handler_key]
@AbstractActionHandler.process_entry.getter
def process_entry(self):
return self.thread_handler.process_entry
@AbstractActionHandler.uow_id.getter
def uow_id(self):
node = self._get_tree_node()
return None if not node.job_record else node.job_record.related_unit_of_work
@valid_action_request
def reprocess_tree_node(self):
node = self._get_tree_node()
msg = f'MX: requesting REPROCESS for {self.process_name} in timeperiod {self.timeperiod}'
self.scheduler.timetable.add_log_entry(self.process_name, self.timeperiod, msg)
self.logger.info(msg + ' {')
tx_context = self.scheduler.timetable.reprocess_tree_node(node)
self.scheduler.gc.validate()
resp = collections.defaultdict(dict)
for process_name, nodes_context in tx_context.items():
for timeperiod, node in nodes_context.items():
resp[process_name][timeperiod] = TreeNodeDetails.get_details(node)
self.logger.info('MX }')
return resp
@valid_action_request
def skip_tree_node(self):
node = self._get_tree_node()
msg = f'MX: requesting SKIP for {self.process_name} in timeperiod {self.timeperiod}'
self.scheduler.timetable.add_log_entry(self.process_name, self.timeperiod, msg)
self.logger.info(msg + ' {')
tx_context = self.scheduler.timetable.skip_tree_node(node)
self.scheduler.gc.validate()
resp = collections.defaultdict(dict)
for process_name, nodes_context in tx_context.items():
for timeperiod, node in nodes_context.items():
resp[process_name][timeperiod] = TreeNodeDetails.get_details(node)
self.logger.info('MX }')
return resp
@valid_action_request
def get_event_log(self):
node = self._get_tree_node()
return {'event_log': [] if not node.job_record else node.job_record.event_log}
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/mx/managed_action_handler.py",
"copies": "1",
"size": "3474",
"license": "bsd-3-clause",
"hash": -5368164516215626000,
"line_mean": 39.3953488372,
"line_max": 97,
"alpha_frac": 0.6784686241,
"autogenerated": false,
"ratio": 3.7475728155339807,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.492604143963398,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
import copy
from concurrent.futures import ThreadPoolExecutor, as_completed
from flow.conf import flows
from flow.flow_constants import *
from flow.db.model import step, flow
from flow.core.abstract_cluster import AbstractCluster
from flow.core.emr_cluster import EmrCluster
from flow.core.flow_graph import FlowGraph
from flow.core.execution_context import ExecutionContext
from flow.core.ephemeral_cluster import EphemeralCluster
def launch_cluster(logger, context, cluster_name):
if context.settings['cluster_type'] == 'emr':
cluster = EmrCluster(cluster_name, context)
else:
cluster = EphemeralCluster(cluster_name, context)
cluster.launch()
return cluster
def parallel_flow_execution(logger, context, cluster, flow_graph_obj):
""" function fetches next available GraphNode/Step
from the FlowGraph and executes it on the given cluster """
assert isinstance(context, ExecutionContext)
assert isinstance(cluster, AbstractCluster)
assert isinstance(flow_graph_obj, FlowGraph)
for step_name in flow_graph_obj:
try:
graph_node = flow_graph_obj[step_name]
graph_node.set_context(context)
graph_node.run(cluster)
except Exception:
logger.error('Exception during Step {0}'.format(step_name), exc_info=True)
raise
class ExecutionEngine(object):
""" Engine that triggers and supervises execution of the flow:
- spawning multiple Execution Clusters (such as AWS EMR)
- assigns execution steps to the clusters and monitor their progress
- tracks dependencies and terminate execution should the Flow Critical Path fail """
def __init__(self, logger, flow_name):
assert isinstance(flow_name, FlowGraph)
self.logger = logger
if flow_name not in flows.flows:
raise ValueError('workflow {0} not registered among workflows: {1}'
.format(flow_name, list(flows.flows.keys())))
self.flow_graph_obj = copy.deepcopy(flows.flows[flow_name])
# list of execution clusters (such as AWS EMR) available for processing
self.execution_clusters = list()
def _spawn_clusters(self, context):
self.logger.info('spawning clusters...')
with ThreadPoolExecutor(max_workers=context.number_of_clusters) as executor:
future_to_cluster = [executor.submit(launch_cluster, self.logger, context,
'EmrComputingCluster-{0}'.format(i))
for i in range(context.number_of_clusters)]
for future in as_completed(future_to_cluster):
try:
cluster = future.result()
self.execution_clusters.append(cluster)
except Exception as exc:
self.logger.error('Cluster launch generated an exception: {0}'.format(exc))
def _run_engine(self, context):
self.logger.info('starting engine...')
with ThreadPoolExecutor(max_workers=len(self.execution_clusters)) as executor:
# Start the GraphNode/Step as soon as the step is unblocked and available for run
# each future is marked with the execution_cluster
future_to_worker = {executor.submit(parallel_flow_execution, self.logger,
context, cluster, self.flow_graph_obj): cluster
for cluster in self.execution_clusters}
for future in as_completed(future_to_worker):
cluster = future_to_worker[future]
try:
is_step_complete = future.result()
if not is_step_complete:
self.logger.error('Execution failed at cluster {0}'.format(cluster))
except Exception as exc:
self.logger.error('Execution generated an exception at worker {0}: {1}'
.format(cluster, exc))
def run(self, context):
""" method executes the flow by:
- spawning clusters
- traversing the FlowGraph and assigning
steps for concurrent execution (if permitted by the Graph layout)
- terminating clusters after the flow has completed or failed
"""
self.logger.info('starting Engine in {0}: {{'.format(flow.RUN_MODE_NOMINAL))
try:
self.flow_graph_obj.set_context(context)
self.flow_graph_obj.clear_steps()
self.flow_graph_obj.mark_start()
self._spawn_clusters(context)
self._run_engine(context)
self.flow_graph_obj.mark_success()
except Exception:
self.logger.error('Exception on starting Engine', exc_info=True)
self.flow_graph_obj.mark_failure()
finally:
# TODO: do not terminate failed cluster to be able to retrieve and analyze the processing errors
for cluster in self.execution_clusters:
cluster.terminate()
self.logger.info('}')
def recover(self, context):
""" method tries to recover the failed flow by:
- verifying that the flow has failed before
- spawning clusters
- locating the failed steps and resetting their state
- starting the flow processing from the last known successful step
- terminating clusters after the flow has completed or failed
"""
self.logger.info('starting Engine in {0}: {{'.format(flow.RUN_MODE_RECOVERY))
try:
self.flow_graph_obj.set_context(context)
self.flow_graph_obj.load_steps()
self.flow_graph_obj.mark_start()
self._spawn_clusters(context)
self._run_engine(context)
self.flow_graph_obj.mark_success()
except Exception:
self.logger.error('Exception on starting Engine', exc_info=True)
self.flow_graph_obj.mark_failure()
finally:
# TODO: do not terminate failed cluster to be able to retrieve and analyze the processing errors
for cluster in self.execution_clusters:
cluster.terminate()
self.logger.info('}')
def run_one(self, context, step_name):
""" method tries to execute a single step:
- verifying that the flow has steps preceding to the one completed
- spawning at most 1 cluster
- resetting state for the requested node
- starting the step processing
- terminating clusters after the step has completed or failed
"""
self.logger.info('starting Engine in {0}: {{'.format(RUN_MODE_RUN_ONE))
try:
self.flow_graph_obj.set_context(context)
self.flow_graph_obj.load_steps()
if not self.flow_graph_obj.is_step_unblocked(step_name):
raise ValueError('can not execute step {0}, as it is blocked by unprocessed dependencies'
.format(step_name))
# resetting requested step state
graph_node = self.flow_graph_obj[step_name]
if graph_node.step_entry:
graph_node.step_entry = step.STATE_EMBRYO
# overriding number of clusters to spawn to 1
context.number_of_clusters = 1
self.flow_graph_obj.mark_start()
self._spawn_clusters(context)
cluster = self.execution_clusters[0]
self.logger.info('cluster spawned. starting step {0} execution'.format(step_name))
graph_node = self.flow_graph_obj[step_name]
graph_node.set_context(context)
graph_node.run(cluster)
self.flow_graph_obj.mark_success()
except Exception:
self.logger.error('Exception on starting Engine', exc_info=True)
self.flow_graph_obj.mark_failure()
finally:
# TODO: do not terminate failed cluster to be able to retrieve and analyze the processing errors
for cluster in self.execution_clusters:
cluster.terminate()
self.logger.info('}')
def run_from(self, context, step_name):
""" method tries to execute this and all sequential steps:
- verifying that the flow has steps preceding to the one completed
- resetting state for the requested node
- locating the failed steps and resetting their state
- locating all steps derived from this step and resetting their states
- computing the number of steps to process and spawning clusters as ratio:
cluster_number = max(1, (steps_to_run/total_steps) * nominal_cluster_number)
- starting the flow processing from the given step
- terminating clusters after the flow has completed or failed
"""
self.logger.info('starting Engine in {0}: {{'.format(RUN_MODE_RUN_FROM))
try:
self.flow_graph_obj.set_context(context)
self.flow_graph_obj.load_steps()
if not self.flow_graph_obj.is_step_unblocked(step_name):
raise ValueError('can not start execution from step {0}, as it is blocked by unprocessed dependencies'
.format(step_name))
steps_to_reset = self.flow_graph_obj.all_dependant_steps(step_name)
steps_to_reset.append(step_name)
# resetting requested step state
for reset_step_name in steps_to_reset:
graph_node = self.flow_graph_obj[reset_step_name]
if graph_node.step_entry:
graph_node.step_entry = step.STATE_EMBRYO
# overriding number of clusters to spawn
context.number_of_clusters *= float(len(steps_to_reset)) / len(self.flow_graph_obj)
context.number_of_clusters = max(1, context.number_of_clusters)
self.flow_graph_obj.mark_start()
self._spawn_clusters(context)
self._run_engine(context)
self.flow_graph_obj.mark_success()
except Exception:
self.logger.error('Exception on starting Engine', exc_info=True)
self.flow_graph_obj.mark_failure()
finally:
# TODO: do not terminate failed cluster to be able to retrieve and analyze the processing errors
for cluster in self.execution_clusters:
cluster.terminate()
self.logger.info('}')
| {
"repo_name": "mushkevych/synergy_flow",
"path": "flow/core/execution_engine.py",
"copies": "1",
"size": "10621",
"license": "bsd-3-clause",
"hash": -7947795780359732000,
"line_mean": 44.1957446809,
"line_max": 118,
"alpha_frac": 0.6133132473,
"autogenerated": false,
"ratio": 4.473883740522325,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5587196987822325,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
import copy
from datetime import datetime
from flow.core.abstract_action import AbstractAction
from flow.core.flow_graph import FlowGraph
from flow.core.flow_graph_node import FlowGraphNode
from flow.core.step_executor import Actionset
from flow.db.model import flow, step
from flow.flow_constants import STEP_NAME_START, STEP_NAME_FINISH
from flow.mx.rest_model import *
from flow.mx.terminal_graph_node import TerminalGraphNode
def create_rest_action(action_obj):
assert isinstance(action_obj, AbstractAction)
rest_model = RestAction(
action_name=action_obj.action_name,
kwargs=action_obj.kwargs
)
return rest_model
def create_rest_actionset(actionset_obj):
assert isinstance(actionset_obj, Actionset)
rest_model = RestActionset(
state=actionset_obj.state,
actions=[create_rest_action(x).document for x in actionset_obj.actions],
)
return rest_model
def create_rest_step(graph_node_obj):
assert isinstance(graph_node_obj, FlowGraphNode)
step_exec = graph_node_obj.step_executor
rest_model = RestStep(
step_name=graph_node_obj.step_name,
pre_actionset=create_rest_actionset(step_exec.pre_actionset),
main_actionset=create_rest_actionset(step_exec.main_actionset),
post_actionset=create_rest_actionset(step_exec.post_actionset),
previous_nodes=[x.step_name for x in graph_node_obj._prev],
next_nodes=[x.step_name for x in graph_node_obj._next]
)
def _compute_duration():
if step_entry.started_at and step_entry.finished_at:
# step has finished
delta = step_entry.finished_at - step_entry.started_at
return 3600 * 24 * delta.days + delta.seconds
elif step_entry.started_at and step_entry.is_in_progress:
# step is still running
delta = datetime.utcnow() - step_entry.started_at
return 3600 * 24 * delta.days + delta.seconds
else:
# step has never ran
return 0
step_entry = graph_node_obj.step_entry
if step_entry:
rest_model.db_id = step_entry.db_id
rest_model.flow_name = step_entry.flow_name
rest_model.timeperiod = step_entry.timeperiod
rest_model.state = step_entry.state
rest_model.created_at = step_entry.created_at
rest_model.started_at = step_entry.started_at
rest_model.finished_at = step_entry.finished_at
rest_model.related_flow = step_entry.related_flow
rest_model.duration = _compute_duration()
else:
# defaults
rest_model.state = step.STATE_EMBRYO
rest_model.duration = 0
return rest_model
def create_rest_flow(flow_graph_obj):
assert isinstance(flow_graph_obj, FlowGraph)
steps = dict()
for step_name, graph_node_obj in flow_graph_obj._dict.items():
rest_model = create_rest_step(graph_node_obj)
steps[step_name] = rest_model.document
graph = dict()
graph[STEP_NAME_START] = create_rest_step(TerminalGraphNode(STEP_NAME_START)).document
graph[STEP_NAME_FINISH] = create_rest_step(TerminalGraphNode(STEP_NAME_FINISH)).document
for step_name, rest_step_doc in steps.items():
graph[step_name] = copy.deepcopy(rest_step_doc)
if not graph[step_name].get(FIELD_PREVIOUS_NODES):
graph[step_name][FIELD_PREVIOUS_NODES] = [STEP_NAME_START]
graph[STEP_NAME_START][FIELD_NEXT_NODES].append(step_name)
if not graph[step_name].get(FIELD_NEXT_NODES):
graph[step_name][FIELD_NEXT_NODES] = [STEP_NAME_FINISH]
graph[STEP_NAME_FINISH][FIELD_PREVIOUS_NODES].append(step_name)
rest_model = RestFlow(
flow_name=flow_graph_obj.flow_name,
timeperiod='NA' if not flow_graph_obj.context.timeperiod else flow_graph_obj.context.timeperiod,
steps=steps,
graph=graph
)
flow_entry = flow_graph_obj.context.flow_entry
if flow_entry:
rest_model.db_id = flow_entry.db_id
rest_model.created_at = flow_entry.created_at
rest_model.started_at = flow_entry.started_at
rest_model.finished_at = flow_entry.finished_at
rest_model.state = flow_entry.state
else:
# defaults
rest_model.state = flow.STATE_EMBRYO
return rest_model
| {
"repo_name": "mushkevych/synergy_flow",
"path": "flow/mx/rest_model_factory.py",
"copies": "1",
"size": "4345",
"license": "bsd-3-clause",
"hash": 7424350987022254000,
"line_mean": 35.8220338983,
"line_max": 104,
"alpha_frac": 0.6708860759,
"autogenerated": false,
"ratio": 3.4456780333068995,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46165641092068993,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
import copy
from flow.core.abstract_action import AbstractAction
from flow.core.execution_context import ContextDriven, get_step_logger, valid_context
# NOTICE: actionset states carry different names (pending vs embryo, etc),
# as they have no persistence and have different CSS coloring schema
ACTIONSET_PENDING = 'actionset_pending'
ACTIONSET_RUNNING = 'actionset_running'
ACTIONSET_COMPLETE = 'actionset_complete'
ACTIONSET_FAILED = 'actionset_failed'
def validate_action_param(param, klass):
assert isinstance(param, (tuple, list)), \
'Expected list of {0} or an empty list. Instead got {1}'.format(klass.__name__, param.__class__.__name__)
assert all(isinstance(p, klass) for p in param), \
'Expected list of {0}. Not all elements of the list were of this type'.format(klass.__name__)
class Actionset(ContextDriven):
""" set of Actions to be performed together """
def __init__(self, actions, step_name):
super(Actionset, self).__init__()
if actions is None: actions = []
validate_action_param(actions, AbstractAction)
self.step_name = step_name
self.actions = copy.deepcopy(actions)
self.state = ACTIONSET_PENDING
def get_logger(self):
return get_step_logger(self.flow_name, self.step_name, self.settings)
@valid_context
def do(self, execution_cluster):
self.state = ACTIONSET_RUNNING
for action in self.actions:
try:
action.set_context(self.context, step_name=self.step_name)
action.do(execution_cluster)
except Exception as e:
self.state = ACTIONSET_FAILED
self.logger.error('Execution Error: {0}'.format(e), exc_info=True)
raise
finally:
action.cleanup()
self.state = ACTIONSET_COMPLETE
class StepExecutor(ContextDriven):
""" Step runner class for the GraphNode, encapsulating means to run and track execution progress
NOTICE: during __init__ all actions are cloned
so that set_context can be applied to an action in concurrency-safe manner """
def __init__(self, step_name, main_action, pre_actions=None, post_actions=None, skip=False):
super(StepExecutor, self).__init__()
assert isinstance(main_action, AbstractAction)
self.step_name = step_name
self.pre_actionset = Actionset(pre_actions, step_name)
self.main_actionset = Actionset([main_action], step_name)
self.post_actionset = Actionset(post_actions, step_name)
self.skip = skip
def get_logger(self):
return get_step_logger(self.flow_name, self.step_name, self.settings)
@property
def is_complete(self):
return self.pre_actionset.state == ACTIONSET_COMPLETE \
and self.main_actionset.state == ACTIONSET_COMPLETE \
and self.post_actionset.state == ACTIONSET_COMPLETE
@valid_context
def do(self, execution_cluster):
if self.skip:
for block in [self.pre_actionset, self.main_actionset, self.post_actionset]:
block.state.is_success = ACTIONSET_COMPLETE
return
try:
for block in [self.pre_actionset, self.main_actionset, self.post_actionset]:
block.set_context(self.context)
block.do(execution_cluster)
except Exception as e:
pass
| {
"repo_name": "mushkevych/synergy_flow",
"path": "flow/core/step_executor.py",
"copies": "1",
"size": "3471",
"license": "bsd-3-clause",
"hash": -6626659680924903000,
"line_mean": 38.8965517241,
"line_max": 113,
"alpha_frac": 0.6473638721,
"autogenerated": false,
"ratio": 3.926470588235294,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5073834460335294,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
import datetime
import random
import time
import math
from threading import Thread
from amqp import AMQPError
from db.model.raw_data import RawData
from synergy.mq.flopsy import Publisher
from synergy.system.performance_tracker import SimpleTracker
from synergy.system.synergy_process import SynergyProcess
SLEEP_TIME = 0.03
TICK_INTERVAL = 10
class EventStreamGenerator(SynergyProcess):
def __init__(self, process_name):
super(EventStreamGenerator, self).__init__(process_name)
self.main_thread = None
self.publisher = Publisher(process_name)
self.performance_ticker = SimpleTracker(self.logger)
self.previous_tick = time.time()
self.thread_is_running = True
utc_date = datetime.datetime.utcnow()
self.number_of_groups = utc_date.year * math.pow(10, 12) + \
utc_date.month * math.pow(10, 10) + \
utc_date.day * math.pow(10, 8) + \
utc_date.hour * math.pow(10, 6) + \
utc_date.minute * math.pow(10, 4) + \
utc_date.second * math.pow(10, 2)
self.logger.info('Started %s' % self.process_name)
def __del__(self):
self.publisher.close()
self.performance_ticker.cancel()
super(EventStreamGenerator, self).__del__()
self.logger.info('Exiting main thread. All auxiliary threads stopped.')
def _generate_key(self):
_id = random.randint(0, 100000)
domain_name = 'domain%d.com' % _id
return domain_name, time.time()
def _run_stream_generation(self):
self.logger.info('Stream Generator: ON. Expected rate: %d/s, %d/m, %d/h, %d/d' %
(1 / SLEEP_TIME, 1 / SLEEP_TIME * 60, 1 / SLEEP_TIME * 3600, 1 / SLEEP_TIME * 86400))
self.performance_ticker.start()
random.seed('RANDOM_SEED_OBJECT')
document = RawData()
while self.thread_is_running:
if time.time() - self.previous_tick > TICK_INTERVAL:
#increment group number every TICK_INTERVAL seconds
self.number_of_groups += 100
self.previous_tick = time.time()
try:
key = self._generate_key()
document.key = (key[0], key[1])
session_no = self.number_of_groups + random.randint(0, 99)
document.session_id = 'session_%d' % session_no
document.ip = '%d.%d.%d.%d' % (random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255))
document.screen_res = (random.randrange(340, 1080, 100), random.randrange(240, 980, 100))
if self.performance_ticker.tracker.success.per_tick % 7 == 0:
document.os = 'OSX'
document.browser = 'Safari-1.0'
document.language = 'en_us'
document.country = 'usa'
elif self.performance_ticker.tracker.success.per_tick % 5 == 0:
document.os = 'Linux'
document.browser = 'FireFox-4.0'
document.language = 'en_ca'
document.country = 'canada'
elif self.performance_ticker.tracker.success.per_tick % 3 == 0:
document.os = 'Windows'
document.browser = 'IE-6.0'
document.language = 'ge_de'
document.country = 'germany'
else:
document.os = 'Android'
document.browser = 'FireMini-2.0'
document.language = 'es'
document.country = 'eu'
document.is_page_view = True
self.publisher.publish(document.document)
self.performance_ticker.tracker.increment_success()
time.sleep(SLEEP_TIME)
except (AMQPError, IOError) as e:
self.thread_is_running = False
self.performance_ticker.cancel()
self.logger.error('AMQPError: %s' % str(e))
except Exception as e:
self.performance_ticker.tracker.increment_failure()
self.logger.info('safety fuse: %s' % str(e))
def start(self, *_):
self.main_thread = Thread(target=self._run_stream_generation)
self.main_thread.start()
def cancel(self):
self.thread_is_running = False
if __name__ == '__main__':
from constants import PROCESS_STREAM_GEN
generator = EventStreamGenerator(PROCESS_STREAM_GEN)
generator.start()
| {
"repo_name": "eggsandbeer/scheduler",
"path": "workers/event_stream_generator.py",
"copies": "1",
"size": "4739",
"license": "bsd-3-clause",
"hash": -1850698089942780400,
"line_mean": 38.4916666667,
"line_max": 110,
"alpha_frac": 0.5503270732,
"autogenerated": false,
"ratio": 3.936046511627907,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49863735848279067,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
import functools
from werkzeug.wrappers import Request
from synergy.mx.utils import jinja_env
def valid_action_request(method):
""" wraps method with verification for is_request_valid"""
@functools.wraps(method)
def _wrapper(self, *args, **kwargs):
assert isinstance(self, BaseRequestHandler)
if not self.is_request_valid:
return self.reply_bad_request()
try:
return method(self, *args, **kwargs)
except UserWarning as e:
return self.reply_server_error(e)
except Exception as e:
return self.reply_server_error(e)
return _wrapper
class BaseRequestHandler(object):
def __init__(self, request, **values):
assert isinstance(request, Request)
self.scheduler = jinja_env.globals['mbean']
self.logger = self.scheduler.logger
self.request = request
self.values = values
self.request_arguments = request.args if request.args else request.form
self.is_request_valid = False
def reply_ok(self):
return {'status': 'OK'}
def reply_bad_request(self):
self.logger.error('Bad request: {0}'.format(self.request))
return {}
def reply_server_error(self, e):
self.logger.error('MX Processing Exception: {0}'.format(e), exc_info=True)
return {'status': 'Server Internal Error'}
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/mx/base_request_handler.py",
"copies": "1",
"size": "1418",
"license": "bsd-3-clause",
"hash": 1843886393094308900,
"line_mean": 29.170212766,
"line_max": 82,
"alpha_frac": 0.6396332863,
"autogenerated": false,
"ratio": 3.9719887955182074,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002563445270443476,
"num_lines": 47
} |
__author__ = 'Bohdan Mushkevych'
import gc
from synergy.conf import context
from synergy.db.model import unit_of_work
from synergy.db.manager import ds_manager
from synergy.workers.abstract_uow_aware_worker import AbstractUowAwareWorker
class AbstractMongoWorker(AbstractUowAwareWorker):
""" Abstract class is inherited by all workers of the illustration suite
Module holds logic on handling unit_of_work and declaration of abstract methods """
def __init__(self, process_name):
super(AbstractMongoWorker, self).__init__(process_name, perform_db_logging=True)
self.aggregated_objects = dict()
self.source = context.process_context[self.process_name].source
self.sink = context.process_context[self.process_name].sink
self.ds = ds_manager.ds_factory(self.logger)
def __del__(self):
self._flush_aggregated_objects()
super(AbstractMongoWorker, self).__del__()
# **************** Abstract Methods ************************
def _flush_aggregated_objects(self):
""" method inserts aggregated objects into MongoDB
:return number_of_aggregated_objects """
if len(self.aggregated_objects) == 0:
# nothing to do
return 0
number_of_aggregated_objects = len(self.aggregated_objects)
self.logger.info(f'Aggregated {number_of_aggregated_objects} documents. Performing flush.')
for key in self.aggregated_objects:
document = self.aggregated_objects[key]
mongo_pk = self._mongo_sink_key(*key)
self.ds.update(self.sink, mongo_pk, document)
self.logger.info('Flush successful.')
del self.aggregated_objects
self.aggregated_objects = dict()
gc.collect()
return number_of_aggregated_objects
def _get_aggregated_object(self, composite_key):
""" method talks with the map of instances of aggregated objects
:param composite_key presents tuple, comprising of domain_name and timeperiod"""
if composite_key not in self.aggregated_objects:
self.aggregated_objects[composite_key] = self._init_sink_object(composite_key)
return self.aggregated_objects[composite_key]
def _init_sink_key(self, *args):
""" abstract method to create composite key from source compounds like domain_name and timeperiod"""
pass
def _mongo_sink_key(self, *args):
""" abstract method to create MongoDB primary key from source compounds like domain_name and timeperiod"""
pass
def _init_sink_object(self, composite_key):
""" abstract method to instantiate new object that will be holding aggregated data """
pass
def _init_source_object(self, document):
""" abstract method to initialise object with map from source collection """
pass
# ********************** thread-related methods ****************************
def _process_single_document(self, document):
""" abstract method that actually processes the document from source collection"""
pass
def _cursor_exploited(self):
""" abstract method notifying users that cursor was exploited """
pass
def _run_custom_data_engine(self, start_id_obj, end_id_obj, start_timeperiod, end_timeperiod):
""" fine-tuned data engine. MongoDB legacy """
collection_name = context.process_context[self.process_name].source
iteration = 0
while True:
cursor = self.ds.cursor_fine(collection_name,
start_id_obj,
end_id_obj,
iteration,
start_timeperiod,
end_timeperiod)
if iteration == 0 and cursor.count(with_limit_and_skip=True) == 0:
msg = f'No entries in {collection_name} at range [{start_id_obj} : {end_id_obj}]'
self.logger.warning(msg)
break
start_id_obj = None
for document in cursor:
start_id_obj = document['_id']
self._process_single_document(document)
self.performance_tracker.increment_success()
if start_id_obj is None:
break
iteration += 1
self._cursor_exploited()
msg = f'Cursor exploited after {iteration} iterations'
self.logger.info(msg)
def _run_data_engine(self, start_timeperiod, end_timeperiod):
""" regular data engine """
collection_name = context.process_context[self.process_name].source
cursor = self.ds.cursor_batch(collection_name,
start_timeperiod,
end_timeperiod)
for document in cursor:
self._process_single_document(document)
self.performance_tracker.increment_success()
self._cursor_exploited()
msg = f'Cursor exploited after fetching {self.performance_tracker.success_per_job} documents'
self.logger.info(msg)
def _process_uow(self, uow):
if not uow.start_id or not uow.end_id:
self._run_data_engine(uow.start_timeperiod, uow.end_timeperiod)
else:
self._run_custom_data_engine(uow.start_id, uow.end_id, uow.start_timeperiod, uow.end_timeperiod)
self._flush_aggregated_objects()
return self.performance_tracker.success_per_job, unit_of_work.STATE_PROCESSED
| {
"repo_name": "mushkevych/scheduler",
"path": "workers/abstract_mongo_worker.py",
"copies": "1",
"size": "5572",
"license": "bsd-3-clause",
"hash": 3716970972911907000,
"line_mean": 41.534351145,
"line_max": 114,
"alpha_frac": 0.6109117014,
"autogenerated": false,
"ratio": 4.282859338970023,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012881541874825366,
"num_lines": 131
} |
__author__ = 'Bohdan Mushkevych'
import heapq
from datetime import datetime, timedelta
from synergy.conf import settings
from synergy.system import time_helper
from synergy.system.time_qualifier import QUALIFIER_REAL_TIME
def compute_release_time(lag_in_minutes):
future_dt = datetime.utcnow() + timedelta(minutes=lag_in_minutes)
release_time_str = time_helper.datetime_to_synergy(QUALIFIER_REAL_TIME, future_dt)
return int(release_time_str)
class PriorityEntry(object):
""" an entry for Priority Queue, where priority is *entry creation time* + *waiting time* """
# Creation counter keeps track of PriorityEntry declaration order
# Each time an instance is created the counter should be increased
creation_counter = 0
def __init__(self, entry, lag_in_minutes=settings.settings['gc_release_lag_minutes']):
""" :param entry: the unit_of_work to reprocess """
self.entry = entry
self.release_time = compute_release_time(lag_in_minutes) # SYNERGY_SESSION_PATTERN: time in the future
self.creation_counter = PriorityEntry.creation_counter + 1
PriorityEntry.creation_counter += 1
def __eq__(self, other):
return self.entry == other.entry
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
"""Defines behavior for the less-than operator, <."""
if self.release_time == other.release_time:
return self.creation_counter < other.creation_counter
else:
return self.release_time < other.release_time
def __gt__(self, other):
"""Defines behavior for the greater-than operator, >."""
if self.release_time == other.release_time:
return self.creation_counter > other.creation_counter
else:
return self.release_time > other.release_time
def __le__(self, other):
"""Defines behavior for the less-than-or-equal-to operator, <=."""
if self.release_time == other.release_time:
return self.creation_counter <= other.creation_counter
else:
return self.release_time <= other.release_time
def __ge__(self, other):
"""Defines behavior for the greater-than-or-equal-to operator, >=."""
if self.release_time == other.release_time:
return self.creation_counter >= other.creation_counter
else:
return self.release_time >= other.release_time
def __hash__(self):
return hash((self.release_time, self.creation_counter))
class PriorityQueue(object):
""" Priority Queue that retrieves entries in the priority order (lowest first) """
def __init__(self):
self.queue = list()
def __len__(self):
return len(self.queue)
def __contains__(self, item):
return item in self.queue
def put(self, item):
heapq.heappush(self.queue, item)
def pop(self):
""" :return: minimal element is removed from the queue and returned to the caller """
return heapq.heappop(self.queue)
def peek(self):
""" :return: minimal element without being removed from the queue """
return min(self.queue)
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/system/priority_queue.py",
"copies": "1",
"size": "3195",
"license": "bsd-3-clause",
"hash": -3134111781814840000,
"line_mean": 34.5,
"line_max": 111,
"alpha_frac": 0.6478873239,
"autogenerated": false,
"ratio": 3.983790523690773,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007083163074411373,
"num_lines": 90
} |
__author__ = 'Bohdan Mushkevych'
import json
from os import path
from datetime import datetime
from jinja2 import Environment, FileSystemLoader
from werkzeug.local import Local, LocalManager
from werkzeug.wrappers import Response
from werkzeug.routing import Map, Rule
from synergy.conf import context
from synergy.conf import settings
from flow.mx import STATIC_FLOW_ENDPOINT, STATIC_FLOW_PATH
TEMPLATE_PATH = path.join(path.dirname(__file__), 'templates')
STATIC_PATH = path.join(path.dirname(__file__), 'static')
local = Local()
local_manager = LocalManager([local])
# Synergy MX map of URL routing
url_map = Map()
url_map.add(Rule('/scheduler/static/<file>', endpoint='scheduler/static', build_only=True))
url_map.add(Rule(f'/{STATIC_FLOW_ENDPOINT}/<file>', endpoint=STATIC_FLOW_ENDPOINT, build_only=True))
# tree/group of trees will be shown on a separate page defined by tree property MX_PAGE
# mx_page_context is a dictionary in format: {MX_PAGE: MX PAGE}
mx_page_context = {tree_entry.mx_page: tree_entry.mx_page.replace('_', ' ')
for tree_entry in context.timetable_context.values()}
# loop sets a Rule per every mx_page from mx_page_context to be processed by
# 'mx_page_tiles' method from mx.views.py
# NOTICE: given approach renders template snippet {{ url_for ('function_name') }} invalid,
# since all mx_page are processed by the single function 'mx_page_tiles'
for rule in mx_page_context:
url_map.add(Rule(f'/scheduler/{rule}/', endpoint='mx_page_tiles'))
def expose(rule, methods=None, **kw):
def decorate(f):
url_map.add(Rule(rule, methods=methods, endpoint=f.__name__))
return f
return decorate
def url_for(endpoint, _external=False, **values):
return local.url_adapter.build(endpoint, values, force_external=_external)
def render_template(template, **context):
return Response(jinja_env.get_template(template).render(**context), mimetype='text/html')
def scheduler_uptime():
time_diff = datetime.utcnow() - settings.settings['process_start_time']
d = {'days': time_diff.days}
d['hours'], rem = divmod(time_diff.seconds, 3600)
d['minutes'], d['seconds'] = divmod(rem, 60)
return '{days:02d}:{hours:02d}:{minutes:02d}:{seconds:02d}'.format(**d)
jinja_env = Environment(loader=FileSystemLoader([TEMPLATE_PATH, STATIC_FLOW_PATH]), autoescape=True)
jinja_env.add_extension('jinja2.ext.do')
jinja_env.globals['url_for'] = url_for
jinja_env.globals['local'] = local
jinja_env.globals['get_current_time'] = lambda: datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S %Z')
jinja_env.globals['scheduler_version'] = lambda: settings.settings['version']
jinja_env.globals['scheduler_uptime'] = scheduler_uptime
jinja_env.globals['mx_processing_context'] = mx_page_context
jinja_env.globals['mx_title'] = settings.settings['mx_title']
jinja_env.globals['synergy_process_context'] = context.process_context
jinja_env.filters['jsonify'] = json.dumps
jinja_env.filters['lstrip_slash'] = lambda x: x.lstrip('/')
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/mx/utils.py",
"copies": "1",
"size": "3003",
"license": "bsd-3-clause",
"hash": 3282229977534730000,
"line_mean": 39.04,
"line_max": 100,
"alpha_frac": 0.7212787213,
"autogenerated": false,
"ratio": 3.3855693348365277,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46068480561365277,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
import json
from rest_client.restful_lib import Connection
from synergy.db.model.unit_of_work import TIMEPERIOD
from synergy.conf import settings
class ConnectionPool(object):
def __init__(self, logger, login, pwd, hosts):
self.logger = logger
self.index = 0
self.connection_pool = []
for host in hosts:
try:
connection = Connection(host, username=login, password=pwd)
connection.h.disable_ssl_certificate_validation = True
self.connection_pool.append(connection)
except Exception as e:
self.logger.error('Exception occurred while connecting to %s:%s ' % (host, str(e)), exc_info=True)
def get_connection(self):
pool_len = len(self.connection_pool)
if pool_len == 0:
raise EnvironmentError('ConnectionPool is empty. Unable to serve connection')
if self.index >= pool_len:
self.index = 0
current = self.connection_pool[self.index]
self.index += 1
return current
class RestClient(object):
""" RestClient performs REST-protocol communication with the remote REST tier """
REQUEST_CLIENT = '/admin/clients'
ARGUMENT_DOMAINS = 'domains'
ARGUMENT_TIMEPERIOD = TIMEPERIOD
def __init__(self, logger):
login = settings.settings['construction_login']
pwd = settings.settings['construction_password']
hosts = settings.settings['construction_hosts']
self.logger = logger
self.connection_pool = ConnectionPool(logger, login, pwd, hosts)
def _perform_communication(self, request, body_as_dict):
conn = self.connection_pool.get_connection()
resp = conn.request_post(request,
body=json.dumps(body_as_dict),
headers={'content-type': 'application/json', 'accept': 'application/json'})
status = resp[u'headers']['status']
# check that we either got a successful response (200) or a previously retrieved, but still valid response (304)
if status == '200' or status == '304':
return json.loads(resp[u'body'])
else:
self.logger.error('Request failed with status %s' % str(status))
return dict()
def get_client_mapping(self, timeperiod, domain_list):
""" :return: dict in format {<string> domain_name: <string> client_id} """
body_as_dict = {self.ARGUMENT_TIMEPERIOD: timeperiod,
self.ARGUMENT_DOMAINS: domain_list}
return self._perform_communication(self.REQUEST_CLIENT, body_as_dict)
if __name__ == '__main__':
pass
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/system/restful_client.py",
"copies": "1",
"size": "2714",
"license": "bsd-3-clause",
"hash": 3401535397039502000,
"line_mean": 36.6944444444,
"line_max": 120,
"alpha_frac": 0.6186440678,
"autogenerated": false,
"ratio": 4.1753846153846155,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008461338579079936,
"num_lines": 72
} |
__author__ = 'Bohdan Mushkevych'
import json
import httplib
from werkzeug.wrappers import Response
from synergy.mx.freerun_action_handler import FreerunActionHandler
from synergy.mx.managed_action_handler import ManagedActionHandler
from synergy.mx.scheduler_entries import SchedulerEntries
from synergy.mx.dashboard_handler import DashboardHandler
from synergy.mx.utils import render_template, expose
from synergy.mx.tree_node_details import TreeNodeDetails
from synergy.mx.tree_details import TreeDetails
@expose('/entries/managed/')
def scheduler_managed_entries(request, **values):
details = SchedulerEntries(request, **values)
return render_template('scheduler_managed_entries.html', details=details)
@expose('/entries/freerun/')
def scheduler_freerun_entries(request, **values):
details = SchedulerEntries(request, **values)
return render_template('scheduler_freerun_entries.html', details=details)
@expose('/open_schedulable_form/')
def open_schedulable_form(request, **values):
if 'is_new_entry' in request.args and request.args['is_new_entry'] in ('True', 'true', '1'):
handler = None
else:
handler = FreerunActionHandler(request, **values)
return render_template('schedulable_form.html', handler=handler)
@expose('/')
@expose('/dashboard/managed/')
def dashboard_managed(request, **values):
details = DashboardHandler(request, **values)
return render_template('dashboard_managed.html', details=details)
@expose('/dashboard/freeruns/')
def dashboard_freeruns(request, **values):
details = DashboardHandler(request, **values)
return render_template('dashboard_freeruns.html', details=details)
@expose('/details/tree_nodes/')
def details_tree_nodes(request, **values):
details = TreeNodeDetails(request, **values)
return Response(response=json.dumps(details.details),
mimetype='application/json')
@expose('/details/trees/')
def details_trees(request, **values):
details = TreeDetails(request, **values)
return Response(response=json.dumps(details.mx_page_entries),
mimetype='application/json')
@expose('/action/update_freerun_entry/')
def action_update_freerun_entry(request, **values):
handler = FreerunActionHandler(request, **values)
handler.action_update_entry()
return Response(status=httplib.NO_CONTENT)
@expose('/action/reprocess/')
def action_reprocess(request, **values):
handler = ManagedActionHandler(request, **values)
handler.action_reprocess()
return Response(status=httplib.NO_CONTENT)
@expose('/action/skip/')
def action_skip(request, **values):
handler = ManagedActionHandler(request, **values)
handler.action_skip()
return Response(status=httplib.NO_CONTENT)
@expose('/action/cancel_uow/')
def action_cancel_uow(request, **values):
handler = FreerunActionHandler(request, **values)
handler.action_cancel_uow()
return Response(status=httplib.NO_CONTENT)
@expose('/action/get_uow/')
def action_get_uow(request, **values):
handler = get_action_handler(request, **values)
return Response(response=json.dumps(handler.action_get_uow()),
mimetype='application/json')
@expose('/action/get_log/')
def action_get_log(request, **values):
handler = get_action_handler(request, **values)
return Response(response=json.dumps(handler.action_get_log()),
mimetype='application/json')
@expose('/action/change_interval/')
def action_change_interval(request, **values):
handler = get_action_handler(request, **values)
handler.action_change_interval()
return Response(status=httplib.NO_CONTENT)
@expose('/action/trigger_now/')
def action_trigger_now(request, **values):
handler = get_action_handler(request, **values)
handler.action_trigger_now()
return Response(status=httplib.NO_CONTENT)
@expose('/action/deactivate_trigger/')
def action_deactivate_trigger(request, **values):
handler = get_action_handler(request, **values)
handler.action_deactivate_trigger()
return Response(status=httplib.NO_CONTENT)
@expose('/action/activate_trigger/')
def action_activate_trigger(request, **values):
handler = get_action_handler(request, **values)
handler.action_activate_trigger()
return Response(status=httplib.NO_CONTENT)
def get_action_handler(request, **values):
if 'is_freerun' in request.args and request.args['is_freerun'] in ('True', 'true', '1'):
handler = FreerunActionHandler(request, **values)
else:
handler = ManagedActionHandler(request, **values)
return handler
@expose('/object_viewer/')
def object_viewer(request, **values):
return render_template('object_viewer.html')
@expose('/mx_page_tiles/')
def mx_page_tiles(request, **values):
details = TreeDetails(request, **values)
return render_template('mx_page_tiles.html', details=details.mx_page_entries)
# referenced from mx.synergy_mx.py module
def not_found(request, **values):
return render_template('not_found.html')
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/mx/views.py",
"copies": "1",
"size": "5039",
"license": "bsd-3-clause",
"hash": -3845456215408023000,
"line_mean": 31.7207792208,
"line_max": 96,
"alpha_frac": 0.7207779321,
"autogenerated": false,
"ratio": 3.6330209084354723,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48537988405354726,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
import json
from synergy.db.model.freerun_process_entry import FreerunProcessEntry
from synergy.db.dao.freerun_process_dao import FreerunProcessDao
from synergy.mx.base_request_handler import valid_action_request
from synergy.mx.abstract_action_handler import AbstractActionHandler
from synergy.scheduler.scheduler_constants import STATE_MACHINE_FREERUN
class FreerunActionHandler(AbstractActionHandler):
def __init__(self, request, **values):
super(FreerunActionHandler, self).__init__(request, **values)
self.process_name = self.request_arguments.get('process_name')
self.entry_name = self.request_arguments.get('entry_name')
self.freerun_process_dao = FreerunProcessDao(self.logger)
self.is_request_valid = True if self.process_name and self.entry_name else False
if self.is_request_valid:
self.process_name = self.process_name.strip()
self.entry_name = self.entry_name.strip()
self.is_requested_state_on = self.request_arguments.get('is_on') == 'on'
@AbstractActionHandler.thread_handler.getter
def thread_handler(self):
handler_key = (self.process_name, self.entry_name)
return self.scheduler.freerun_handlers[handler_key]
@AbstractActionHandler.process_entry.getter
def process_entry(self):
return self.thread_handler.process_entry
@AbstractActionHandler.uow_id.getter
def uow_id(self):
return self.process_entry.related_unit_of_work
@valid_action_request
def cancel_uow(self):
freerun_state_machine = self.scheduler.timetable.state_machines[STATE_MACHINE_FREERUN]
freerun_state_machine.cancel_uow(self.process_entry)
return self.reply_ok()
@valid_action_request
def get_event_log(self):
return {'event_log': self.process_entry.event_log}
@valid_action_request
def create_entry(self):
process_entry = FreerunProcessEntry()
process_entry.process_name = self.process_name
process_entry.entry_name = self.entry_name
if self.request_arguments['arguments']:
arguments = self.request_arguments['arguments']
if isinstance(arguments, bytes):
arguments = arguments.decode('unicode-escape')
process_entry.arguments = json.loads(arguments)
else:
process_entry.arguments = {}
process_entry.description = self.request_arguments['description']
process_entry.is_on = self.is_requested_state_on
process_entry.trigger_frequency = self.request_arguments['trigger_frequency']
self.freerun_process_dao.update(process_entry)
self.scheduler._register_process_entry(process_entry, self.scheduler.fire_freerun_worker)
return self.reply_ok()
@valid_action_request
def delete_entry(self):
handler_key = (self.process_name, self.entry_name)
self.thread_handler.deactivate()
self.freerun_process_dao.remove(handler_key)
del self.scheduler.freerun_handlers[handler_key]
self.logger.info(f'MX: Deleted FreerunThreadHandler for {handler_key}')
return self.reply_ok()
@valid_action_request
def update_entry(self):
is_interval_changed = self.process_entry.trigger_frequency != self.request_arguments['trigger_frequency']
if self.request_arguments['arguments']:
arguments = self.request_arguments['arguments']
if isinstance(arguments, bytes):
arguments = arguments.decode('unicode-escape')
self.process_entry.arguments = json.loads(arguments)
else:
self.process_entry.arguments = {}
self.process_entry.description = self.request_arguments['description']
self.process_entry.is_on = self.is_requested_state_on
self.process_entry.trigger_frequency = self.request_arguments['trigger_frequency']
self.freerun_process_dao.update(self.process_entry)
if is_interval_changed:
self.change_interval()
if self.process_entry.is_on != self.is_requested_state_on:
if self.is_requested_state_on:
self.activate_trigger()
else:
self.deactivate_trigger()
return self.reply_ok()
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/mx/freerun_action_handler.py",
"copies": "1",
"size": "4312",
"license": "bsd-3-clause",
"hash": -7808365063927433000,
"line_mean": 40.4615384615,
"line_max": 113,
"alpha_frac": 0.6783395176,
"autogenerated": false,
"ratio": 3.8091872791519434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9983915262802376,
"avg_score": 0.0007223067899132311,
"num_lines": 104
} |
__author__ = 'Bohdan Mushkevych'
import json
from synergy.db.model import unit_of_work
from synergy.db.model.freerun_process_entry import FreerunProcessEntry
from synergy.db.dao.unit_of_work_dao import UnitOfWorkDao
from synergy.db.dao.freerun_process_dao import FreerunProcessDao
from synergy.mx.base_request_handler import valid_action_request
from synergy.mx.abstract_action_handler import AbstractActionHandler
class FreerunActionHandler(AbstractActionHandler):
def __init__(self, request, **values):
super(FreerunActionHandler, self).__init__(request, **values)
self.process_name = self.request_arguments.get('process_name')
self.entry_name = self.request_arguments.get('entry_name')
self.freerun_process_dao = FreerunProcessDao(self.logger)
self.uow_dao = UnitOfWorkDao(self.logger)
self.is_request_valid = True if self.process_name and self.entry_name else False
if self.is_request_valid:
self.process_name = self.process_name.strip()
self.entry_name = self.entry_name.strip()
self.is_requested_state_on = 'is_on' in self.request_arguments and self.request_arguments['is_on']
@AbstractActionHandler.thread_handler.getter
def thread_handler(self):
handler_key = (self.process_name, self.entry_name)
return self.scheduler.freerun_handlers[handler_key]
@AbstractActionHandler.process_entry.getter
def process_entry(self):
return self.thread_handler.process_entry
@valid_action_request
def action_cancel_uow(self):
uow_id = self.process_entry.related_unit_of_work
if uow_id is None:
resp = {'response': 'no related unit_of_work'}
else:
uow = self.uow_dao.get_one(uow_id)
uow.state = unit_of_work.STATE_CANCELED
self.uow_dao.update(uow)
resp = {'response': 'updated unit_of_work %r' % uow_id}
return resp
@valid_action_request
def action_get_uow(self):
uow_id = self.process_entry.related_unit_of_work
if uow_id is None:
resp = {'response': 'no related unit_of_work'}
else:
resp = self.uow_dao.get_one(uow_id).document
for key in resp:
resp[key] = str(resp[key])
return resp
@valid_action_request
def action_get_log(self):
return {'log': self.process_entry.log}
@valid_action_request
def action_update_entry(self):
if 'insert_button' in self.request_arguments:
process_entry = FreerunProcessEntry()
process_entry.process_name = self.process_name
process_entry.entry_name = self.entry_name
if self.request_arguments['arguments']:
arguments = self.request_arguments['arguments'].decode('unicode-escape')
process_entry.arguments = json.loads(arguments)
else:
process_entry.arguments = {}
process_entry.description = self.request_arguments['description']
process_entry.is_on = self.is_requested_state_on
process_entry.trigger_frequency = self.request_arguments['trigger_frequency']
self.freerun_process_dao.update(process_entry)
self.scheduler._register_process_entry(process_entry, self.scheduler.fire_freerun_worker)
elif 'update_button' in self.request_arguments:
is_interval_changed = self.process_entry.trigger_frequency != self.request_arguments['trigger_frequency']
if self.request_arguments['arguments']:
arguments = self.request_arguments['arguments'].decode('unicode-escape')
self.process_entry.arguments = json.loads(arguments)
else:
self.process_entry.arguments = {}
self.process_entry.description = self.request_arguments['description']
self.process_entry.is_on = self.is_requested_state_on
self.process_entry.trigger_frequency = self.request_arguments['trigger_frequency']
self.freerun_process_dao.update(self.process_entry)
if is_interval_changed:
self.action_change_interval()
if self.process_entry.is_on != self.is_requested_state_on:
if self.is_requested_state_on:
self.action_activate_trigger()
else:
self.action_deactivate_trigger()
elif 'delete_button' in self.request_arguments:
handler_key = (self.process_name, self.entry_name)
self.thread_handler.deactivate()
self.freerun_process_dao.remove(handler_key)
del self.scheduler.freerun_handlers[handler_key]
elif 'cancel_button' in self.request_arguments:
pass
else:
self.logger.error('Unknown action requested by schedulable_form.html')
return self.reply_ok()
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/mx/freerun_action_handler.py",
"copies": "1",
"size": "4937",
"license": "bsd-3-clause",
"hash": -6075300587448770000,
"line_mean": 40.8389830508,
"line_max": 117,
"alpha_frac": 0.6420903383,
"autogenerated": false,
"ratio": 3.836052836052836,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9973620751282491,
"avg_score": 0.000904484614068777,
"num_lines": 118
} |
__author__ = 'Bohdan Mushkevych'
import logging
from datetime import datetime
from synergy.db.model.log_recording import LogRecording
from synergy.db.dao.log_recording_dao import LogRecordingDao
class LogRecordingHandler(logging.Handler):
def __init__(self, logger, parent_object_id):
super(LogRecordingHandler, self).__init__()
self.logger = logger
self.parent_object_id = parent_object_id
self.log_recording_dao = LogRecordingDao(logger)
def attach(self):
""" method clears existing log_recorder entries for given parent_object_id,
creates a new one and attaches this handler to the logger
from this moment every log record will be recorded in the DB """
log_recording = LogRecording(parent_object_id=self.parent_object_id, created_at=datetime.utcnow())
self.log_recording_dao.remove(self.parent_object_id)
self.log_recording_dao.update(log_recording)
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
self.setFormatter(formatter)
self.logger.addHandler(self)
def detach(self):
""" method detaches this handler from the logger """
self.logger.removeHandler(self)
def emit(self, record):
msg = self.format(record)
try:
self.log_recording_dao.append_log(self.parent_object_id, msg.rstrip())
except Exception as e:
self.detach()
self.logger.error(f'Detached LogRecordingHandler. Exception on LogRecordingDao.append_log: {e}',
exc_info=True)
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/system/log_recording_handler.py",
"copies": "1",
"size": "1676",
"license": "bsd-3-clause",
"hash": 8932302223393531000,
"line_mean": 39.8780487805,
"line_max": 108,
"alpha_frac": 0.6473747017,
"autogenerated": false,
"ratio": 4.019184652278177,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013228723628616597,
"num_lines": 41
} |
__author__ = 'Bohdan Mushkevych'
import mock
import types
import unittest
from settings import enable_test_mode
enable_test_mode()
import process_starter
from six import class_types, PY2, PY3
def main_function(*args):
return args
class OldClass:
def starter_method(self, *args):
return args
class NewClass(object):
def starter_method(self, *args):
return args
class TestProcessStarter(unittest.TestCase):
def test_type_old_class(self):
t, m, starter = process_starter.get_class('tests.test_process_starter.OldClass')
self.assertIn(t, class_types)
self.assertIsInstance(m, class_types)
self.assertIsNone(starter)
def test_type_new_class(self):
t, m, starter = process_starter.get_class('tests.test_process_starter.NewClass')
self.assertIn(t, class_types)
self.assertIsInstance(m, class_types)
self.assertIsNone(starter)
def test_type_function(self):
t, m, starter = process_starter.get_class('tests.test_process_starter.main_function')
self.assertEqual(t, types.FunctionType)
self.assertIsInstance(m, types.FunctionType)
self.assertIsNone(starter)
def test_old_class_method(self):
t, m, starter = process_starter.get_class('tests.test_process_starter.OldClass.starter_method')
self.assertIn(t, class_types)
self.assertIsInstance(m, class_types)
self.assertEqual(starter, 'starter_method')
def test_not_class(self):
t, m, starter = process_starter.get_class('tests.test_process_starter.main_function')
self.assertEqual(t, types.FunctionType)
self.assertIsInstance(m, types.FunctionType)
self.assertNotIsInstance(m, class_types)
self.assertIsNone(starter)
def test_starter_method(self):
t, m, starter = process_starter.get_class('tests.test_process_starter.NewClass.starter_method')
self.assertIn(t, class_types)
self.assertIsInstance(m, class_types)
self.assertEqual(starter, 'starter_method')
self.assertIsInstance(getattr(m(), starter), types.MethodType)
if PY2:
self.assertIsInstance(getattr(m, starter), types.MethodType)
if PY3:
self.assertIsInstance(getattr(m, starter), types.FunctionType)
@mock.patch('synergy.workers.abstract_mq_worker.SimpleTracker')
@mock.patch('synergy.workers.abstract_mq_worker.Consumer')
def test_starting_method(self, mock_tracker, mock_consumer):
"""
performance_ticker and Flopsy consumer must be mocked
otherwise they will instantiate threads
and cause Unit Tests to fail to finish
"""
from tests.ut_context import PROCESS_CLASS_EXAMPLE
process_starter.start_by_process_name(PROCESS_CLASS_EXAMPLE, None)
def test_starting_function(self):
from tests.ut_context import PROCESS_SCRIPT_EXAMPLE
process_starter.start_by_process_name(PROCESS_SCRIPT_EXAMPLE, 'parameters')
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "eggsandbeer/scheduler",
"path": "tests/test_process_starter.py",
"copies": "1",
"size": "3056",
"license": "bsd-3-clause",
"hash": 2725260517317970000,
"line_mean": 33.3370786517,
"line_max": 103,
"alpha_frac": 0.6842277487,
"autogenerated": false,
"ratio": 3.6642685851318944,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4848496333831894,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
import mock
import unittest
from settings import enable_test_mode
enable_test_mode()
from constants import PROCESS_SITE_HOURLY
from synergy.db.dao.unit_of_work_dao import UnitOfWorkDao
from synergy.db.model import job, unit_of_work
from synergy.db.manager.ds_manager import BaseManager
from synergy.system.data_logging import get_logger
from synergy.scheduler.timetable import Timetable
from synergy.scheduler.state_machine_dicrete import StateMachineDiscrete
from tests.state_machine_testing_utils import *
from tests.base_fixtures import create_unit_of_work
from tests.ut_context import PROCESS_UNIT_TEST
class DiscreteSMUnitTest(unittest.TestCase):
def setUp(self):
self.logger = get_logger(PROCESS_UNIT_TEST)
self.time_table_mocked = mock.create_autospec(Timetable)
self.uow_dao_mocked = mock.create_autospec(UnitOfWorkDao)
self.ds_mocked = mock.create_autospec(BaseManager)
self.sm_real = StateMachineDiscrete(self.logger, self.time_table_mocked)
self.sm_real.uow_dao = self.uow_dao_mocked
self.sm_real.ds = self.ds_mocked
self.sm_real._process_state_final_run = mock.Mock(
side_effect=self.sm_real._process_state_final_run)
self.sm_real._process_state_in_progress = mock.Mock(
side_effect=self.sm_real._process_state_in_progress)
def tearDown(self):
pass
def test_state_embryo(self):
""" method tests job records in STATE_EMBRYO state"""
self.sm_real.insert_and_publish_uow = then_return_uow
self.ds_mocked.highest_primary_key = mock.MagicMock(return_value=1)
self.ds_mocked.lowest_primary_key = mock.MagicMock(return_value=0)
job_record = get_job_record(job.STATE_EMBRYO, TEST_PRESET_TIMEPERIOD, PROCESS_SITE_HOURLY)
self.sm_real.manage_job(job_record)
self.time_table_mocked.update_job_record.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY)
def test_duplicatekeyerror_state_embryo(self):
""" method tests job records in STATE_EMBRYO state"""
self.sm_real._insert_uow = then_raise_uw
job_record = get_job_record(job.STATE_EMBRYO, TEST_PRESET_TIMEPERIOD, PROCESS_SITE_HOURLY)
try:
self.sm_real.manage_job(job_record)
self.assertTrue(False, 'UserWarning exception should have been thrown')
except UserWarning:
self.assertTrue(True)
def test_future_timeperiod_state_in_progress(self):
""" method tests timetable records in STATE_IN_PROGRESS state"""
job_record = get_job_record(job.STATE_IN_PROGRESS, TEST_FUTURE_TIMEPERIOD, PROCESS_SITE_HOURLY)
manual_uow = create_unit_of_work(PROCESS_SITE_HOURLY, 0, 1, None)
self.uow_dao_mocked.get_one = mock.MagicMock(return_value=manual_uow)
self.time_table_mocked.is_job_record_finalizable = mock.MagicMock(return_value=True)
self.sm_real.insert_and_publish_uow = then_raise_uw
self.sm_real.manage_job(job_record)
self.assertTrue(self.time_table_mocked.update_job_record.call_args_list == []) # called 0 times
def test_preset_timeperiod_state_in_progress(self):
""" method tests timetable records in STATE_IN_PROGRESS state"""
self.time_table_mocked.is_job_record_finalizable = mock.MagicMock(return_value=True)
self.uow_dao_mocked.get_one = mock.MagicMock(
side_effect=lambda *_: create_unit_of_work(PROCESS_SITE_HOURLY, 0, 1, None))
self.sm_real.insert_and_publish_uow = then_return_uow
job_record = get_job_record(job.STATE_IN_PROGRESS, TEST_PRESET_TIMEPERIOD, PROCESS_SITE_HOURLY)
self.sm_real.manage_job(job_record)
self.assertEqual(len(self.time_table_mocked.update_job_record.call_args_list), 0)
self.assertEqual(len(self.sm_real._process_state_final_run.call_args_list), 0)
def test_transfer_to_final_state_from_in_progress(self):
""" method tests timetable records in STATE_IN_PROGRESS state"""
self.time_table_mocked.is_job_record_finalizable = mock.MagicMock(return_value=True)
self.uow_dao_mocked.get_one = mock.MagicMock(
side_effect=lambda *_: create_unit_of_work(PROCESS_SITE_HOURLY, 1, 1, None, unit_of_work.STATE_PROCESSED))
self.sm_real.insert_and_publish_uow = then_return_duplicate_uow
job_record = get_job_record(job.STATE_IN_PROGRESS, TEST_PRESET_TIMEPERIOD, PROCESS_SITE_HOURLY)
self.sm_real.manage_job(job_record)
self.assertEqual(len(self.time_table_mocked.update_job_record.call_args_list), 1)
self.assertEqual(len(self.sm_real._process_state_in_progress.call_args_list), 1)
self.assertEqual(len(self.sm_real._process_state_final_run.call_args_list), 0)
def test_retry_state_in_progress(self):
""" method tests timetable records in STATE_IN_PROGRESS state"""
self.time_table_mocked.is_job_record_finalizable = mock.MagicMock(return_value=True)
self.uow_dao_mocked.get_one = mock.MagicMock(
side_effect=lambda *_: create_unit_of_work(PROCESS_SITE_HOURLY, 1, 1, None, unit_of_work.STATE_PROCESSED))
self.sm_real.insert_and_publish_uow = then_return_uow
job_record = get_job_record(job.STATE_IN_PROGRESS, TEST_PRESET_TIMEPERIOD, PROCESS_SITE_HOURLY)
self.sm_real.manage_job(job_record)
self.assertEqual(len(self.time_table_mocked.update_job_record.call_args_list), 1)
self.assertEqual(len(self.sm_real._process_state_in_progress.call_args_list), 1)
self.assertEqual(len(self.sm_real._process_state_final_run.call_args_list), 0)
def test_processed_state_final_run(self):
"""method tests timetable records in STATE_FINAL_RUN state"""
self.uow_dao_mocked.get_one = mock.MagicMock(
side_effect=lambda *_: create_unit_of_work(PROCESS_SITE_HOURLY, 1, 1, None, unit_of_work.STATE_PROCESSED))
job_record = get_job_record(job.STATE_FINAL_RUN, TEST_PRESET_TIMEPERIOD, PROCESS_SITE_HOURLY)
self.sm_real.manage_job(job_record)
self.assertEqual(len(self.time_table_mocked.update_job_record.call_args_list), 1)
self.assertEqual(len(self.time_table_mocked.get_tree.call_args_list), 1)
def test_cancelled_state_final_run(self):
"""method tests timetable records in STATE_FINAL_RUN state"""
self.uow_dao_mocked.get_one = mock.MagicMock(
side_effect=lambda *_: create_unit_of_work(PROCESS_SITE_HOURLY, 1, 1, None, unit_of_work.STATE_CANCELED))
job_record = get_job_record(job.STATE_FINAL_RUN, TEST_PRESET_TIMEPERIOD, PROCESS_SITE_HOURLY)
self.sm_real.manage_job(job_record)
self.assertEqual(len(self.time_table_mocked.update_job_record.call_args_list), 1)
self.assertEqual(len(self.time_table_mocked.get_tree.call_args_list), 1)
def test_state_skipped(self):
"""method tests timetable records in STATE_SKIPPED state"""
job_record = get_job_record(job.STATE_SKIPPED, TEST_PRESET_TIMEPERIOD, PROCESS_SITE_HOURLY)
self.sm_real.manage_job(job_record)
self.assertEqual(len(self.time_table_mocked.update_job_record.call_args_list), 0)
self.assertEqual(len(self.time_table_mocked.get_tree.call_args_list), 0)
def test_state_processed(self):
"""method tests timetable records in STATE_PROCESSED state"""
job_record = get_job_record(job.STATE_PROCESSED, TEST_PRESET_TIMEPERIOD, PROCESS_SITE_HOURLY)
self.sm_real.manage_job(job_record)
self.assertEqual(len(self.time_table_mocked.update_job_record.call_args_list), 0)
self.assertEqual(len(self.time_table_mocked.get_tree.call_args_list), 0)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "eggsandbeer/scheduler",
"path": "tests/test_state_machine_discrete.py",
"copies": "1",
"size": "7751",
"license": "bsd-3-clause",
"hash": -6679468696990141000,
"line_mean": 47.748427673,
"line_max": 118,
"alpha_frac": 0.69694233,
"autogenerated": false,
"ratio": 3.2095238095238097,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9387312113383877,
"avg_score": 0.003830805227986729,
"num_lines": 159
} |
__author__ = 'Bohdan Mushkevych'
import os
from flow.conf import global_flows
from synergy.conf import ImproperlyConfigured, LazyObject, Settings, empty
ENVIRONMENT_FLOWS_VARIABLE = 'SYNERGY_FLOWS_MODULE'
class LazyFlows(LazyObject):
""" A lazy proxy for Synergy Flows """
def _setup(self):
"""
Load the flows definition module pointed to by the environment variable.
This is used the first time we need any flows at all, if the user has not
previously configured the flows manually.
"""
flows_module = os.environ.get(ENVIRONMENT_FLOWS_VARIABLE, 'flows')
if not flows_module:
raise ImproperlyConfigured(
'Requested flows module points to an empty variable. '
'You must either define the environment variable {0} '
'or call flows.configure() before accessing the settings.'
.format(ENVIRONMENT_FLOWS_VARIABLE))
self._wrapped = Settings(flows_module, default_settings=global_flows)
def __getattr__(self, name):
if self._wrapped is empty:
self._setup()
return getattr(self._wrapped, name)
flows = LazyFlows()
| {
"repo_name": "mushkevych/synergy_flow",
"path": "flow/conf/__init__.py",
"copies": "1",
"size": "1195",
"license": "bsd-3-clause",
"hash": -103765259714162050,
"line_mean": 33.1428571429,
"line_max": 81,
"alpha_frac": 0.6510460251,
"autogenerated": false,
"ratio": 4.222614840989399,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007011657418161483,
"num_lines": 35
} |
__author__ = 'Bohdan Mushkevych'
import os
import functools
from synergy.system.system_logger import Logger
LOGS = dict()
def get_flow_logger(flow_name, settings):
if flow_name in LOGS:
return LOGS[flow_name].get_logger()
# make sure the path exist
log_folder = os.path.join(settings['log_directory'], flow_name)
if not os.path.exists(log_folder):
os.makedirs(log_folder)
log_file = os.path.join(settings['log_directory'], flow_name, '{0}.log'.format(flow_name))
append_to_console = settings['under_test'],
redirect_stdstream = not settings['under_test']
LOGS[flow_name] = Logger(log_file, flow_name, append_to_console, redirect_stdstream)
return LOGS[flow_name].get_logger()
def get_cluster_logger(flow_name, cluster_name, settings):
logger = get_flow_logger(flow_name, settings)
return logger.getChild(cluster_name)
def get_step_logger(flow_name, step_name, settings):
fqlt = '{0}.{1}'.format(flow_name, step_name)
if fqlt in LOGS:
return LOGS[fqlt].get_logger()
log_file = os.path.join(settings['log_directory'], flow_name, '{0}.log'.format(step_name))
append_to_console = settings['under_test'],
redirect_stdstream = not settings['under_test']
LOGS[fqlt] = Logger(log_file, step_name, append_to_console, redirect_stdstream)
return LOGS[fqlt].get_logger()
def get_action_logger(flow_name, step_name, action_name, settings):
logger = get_step_logger(flow_name, step_name, settings)
return logger.getChild(action_name)
def valid_context(method):
""" wraps method with verification for is_context_set """
@functools.wraps(method)
def _wrapper(self, *args, **kwargs):
assert isinstance(self, ContextDriven)
assert self.is_context_set is True, \
'ERROR: Calling {0}.{1} without initialized context'.format(self.__class__.__name__, method.__name__)
return method(self, *args, **kwargs)
return _wrapper
class ExecutionContext(object):
""" set of attributes that identify Flow execution:
- timeperiod boundaries of the run
- environment-specific settings, where the flow is run
"""
def __init__(self, flow_name, timeperiod, start_timeperiod, end_timeperiod,
settings, number_of_clusters=2, flow_entry=None):
"""
:param flow_name: name of the flow
:param timeperiod: job's timeperiod
:param start_timeperiod: lower inclusive boundary of time-window to process
:param end_timeperiod: upper exclusive boundary of time-window to process
:param settings: key-value dictionary of environment-specific settings
:param number_of_clusters: number of clusters to spawn
:param flow_entry: data model (db record) representing flow state
"""
assert isinstance(settings, dict)
self.flow_name = flow_name
self.start_timeperiod = start_timeperiod
self.end_timeperiod = end_timeperiod
self.timeperiod = timeperiod
self.settings = settings
self.number_of_clusters = number_of_clusters
self.flow_entry = flow_entry
@property
def flow_id(self):
return self.flow_entry.db_id
class ContextDriven(object):
""" common ancestor for all types that require *context*,
and perform same set of initialization of it """
def __init__(self):
self.context = None
self.flow_name = None
self.start_timeperiod = None
self.end_timeperiod = None
self.timeperiod = None
self.settings = None
self.logger = None
self.is_context_set = False
def set_context(self, context, **kwargs):
assert isinstance(context, ExecutionContext)
self.context = context
self.flow_name = context.flow_name
self.start_timeperiod = context.start_timeperiod
self.end_timeperiod = context.end_timeperiod
self.timeperiod = context.timeperiod
self.settings = context.settings
self.logger = self.get_logger()
self.is_context_set = True
def get_logger(self):
raise NotImplementedError('method get_logger must be implemented by {0}'.format(self.__class__.__name__))
| {
"repo_name": "mushkevych/synergy_flow",
"path": "flow/core/execution_context.py",
"copies": "1",
"size": "4235",
"license": "bsd-3-clause",
"hash": 8393979438382824000,
"line_mean": 35.5086206897,
"line_max": 113,
"alpha_frac": 0.6632821724,
"autogenerated": false,
"ratio": 3.78125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49445321723999996,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
import os
import random
import shutil
import string
import tempfile
import unittest
import socket
from synergy.conf import settings
from synergy.system.data_logging import get_logger
from synergy.system import time_helper
from synergy.system.time_qualifier import *
from workers.abstract_file_collector_worker import AbstractFileCollectorWorker
from tests.ut_context import PROCESS_UNIT_TEST
def string_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
class DummyFileCollector(AbstractFileCollectorWorker):
def _get_file_pattern(self, timeperiod):
return '%s-*.gz' % timeperiod
def _get_source_folder(self):
return settings.settings['remote_source_folder']
class FileCollectorUnitTest(unittest.TestCase):
"""
Following steps are required at OS-level to run this Unit Test on local box:
1. ssh-keygen -t rsa
Press enter for each line
2. cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
3. chmod 600 ~/.ssh/authorized_keys
To run this unit test against remote box, it has to contain public .ssh keys:
1. cat ~/.ssh/id_rsa.pub
copy that key
On remote machine:
1. mkdir ~/.ssh && chmod 700 ~/.ssh
2. touch ~/.ssh/authorized_keys2 && chmod 600 ~/.ssh/authorized_keys2
3. Paste copied key into authorized_keys2
"""
ACTUAL_TIMEPERIOD = time_helper.actual_timeperiod(QUALIFIER_HOURLY)
TABLES = ['table_alpha', 'table_beta', 'table_gama']
TEST_FILE_SIZE = 1024
TEST_FILE_LIST = []
TEST_HEADER_LIST = []
for table_name in TABLES:
TEST_FILE_LIST += [ACTUAL_TIMEPERIOD + '-' + table_name + '-host_%r.domain.com.log.gz' % i for i in range(10)]
TEST_HEADER_LIST += [table_name + '-host_%r.domain.com.header' % i for i in range(10)]
def create_file_collector(self):
return DummyFileCollector(PROCESS_UNIT_TEST)
def setUp(self):
self.original_source_host_list = settings.settings['remote_source_host_list']
self.original_source_folder = settings.settings['remote_source_folder']
self.logger = get_logger(PROCESS_UNIT_TEST)
self.worker = self.create_file_collector()
self.actual_timeperiod = self.ACTUAL_TIMEPERIOD
# switch off auxiliary threads
self.worker.performance_ticker.cancel()
self.worker._create_directories()
self.tempdir_copying = tempfile.mkdtemp()
fqsf = os.path.join(self.tempdir_copying, self.actual_timeperiod[:-2])
if not os.path.exists(fqsf):
os.makedirs(fqsf)
fqhf = os.path.join(self.tempdir_copying, AbstractFileCollectorWorker.HEADER_FOLDER)
if not os.path.exists(fqhf):
os.makedirs(fqhf)
for file_name in self.TEST_FILE_LIST:
output = open(os.path.join(fqsf, file_name), 'w')
output.write(string_generator(self.TEST_FILE_SIZE))
output.close()
for file_name in self.TEST_HEADER_LIST:
output = open(os.path.join(fqhf, file_name), 'w')
output.write(','.join(['column_%r' % x for x in range(5)]))
output.close()
settings.settings['remote_source_host_list'] = {socket.getfqdn(): ''}
settings.settings['remote_source_folder'] = self.tempdir_copying
def tearDown(self):
settings.settings['remote_source_host_list'] = self.original_source_host_list
settings.settings['remote_source_folder'] = self.original_source_folder
# killing the worker
self.worker.performance_ticker.cancel()
del self.worker
if self.tempdir_copying:
self.logger.info('Cleaning up %r' % self.tempdir_copying)
shutil.rmtree(self.tempdir_copying, True)
self.tempdir_copying = None
def test_copying(self):
# remote_source_host_list defines where the source files are located
copied_files = self.worker.copy_archives_from_source(self.actual_timeperiod)
self.assertEqual(len(copied_files), len(self.TEST_FILE_LIST))
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "eggsandbeer/scheduler",
"path": "tests/test_abstract_file_collector.py",
"copies": "1",
"size": "4155",
"license": "bsd-3-clause",
"hash": 2452010759459439600,
"line_mean": 33.625,
"line_max": 118,
"alpha_frac": 0.6657039711,
"autogenerated": false,
"ratio": 3.6479367866549604,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48136407577549606,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
import os
import shutil
import tempfile
import fabric.operations
from synergy.system.utils import compute_gzip_md5
from synergy.conf import settings
from synergy.workers.abstract_uow_aware_worker import AbstractUowAwareWorker
from synergy.db.model import unit_of_work
class AbstractFileCollectorWorker(AbstractUowAwareWorker):
"""
module holds common logic to process unit_of_work, access remote locations and copy files to temporary local folder
individual files are later passed to child classes for processing
"""
HEADER_FOLDER = "header"
def __init__(self, process_name):
super(AbstractFileCollectorWorker, self).__init__(process_name)
self.tempdir_copying = None
def __del__(self):
self._clean_up()
super(AbstractFileCollectorWorker, self).__del__()
# **************** Abstract Methods ************************
def _get_source_folder(self):
""" Abstract method: identifies a folder with source files """
pass
def _get_file_pattern(self, timeperiod):
""" Abstract method: identifies file pattern"""
pass
def _get_header_file_pattern(self, timeperiod):
""" Abstract method: identifies header file pattern"""
pass
def copy_header_files_from_source(self, timeperiod):
""" method accesses remote location and copies files, specified by _get_source_folder
and _get_header_file_pattern to local temporary folder
:return: list of file names that were copied to local file system and are available for further processing
:raise LookupError: in case no file names on remote location were found or copied requested date"""
fabric.operations.env.warn_only = True
fabric.operations.env.abort_on_prompts = True
fqsf = os.path.join(self._get_source_folder(), self.HEADER_FOLDER)
for host_name in settings.settings['remote_source_host_list']:
self.logger.info('Initiating header files copy procedure from source location %s:%s'
% (host_name, self._get_source_folder()))
fabric.operations.env.host_string = host_name
file_list = fabric.operations.get(os.path.join(fqsf, self._get_header_file_pattern(timeperiod)),
self.tempdir_copying)
if len(file_list) > 0:
self.logger.info('Copied %d header files from remote location: %s' % (len(file_list), host_name))
return file_list
else:
raise LookupError('No header files found at %s' % host_name + '/' + fqsf)
def copy_archives_from_source(self, timeperiod):
""" method accesses remote location and copies files, specified by _get_source_folder and _get_file_pattern
to local temporary folder
:return: list of file names that were copied to local file system and are available for further processing
:raise LookupError: in case no file names on remote location were found or copied requested date"""
fabric.operations.env.warn_only = True
fabric.operations.env.abort_on_prompts = True
summary_file_list = []
fqsf = os.path.join(self._get_source_folder(), timeperiod[:-2])
for host_name in settings.settings['remote_source_host_list']:
self.logger.info('Initiating copy procedure from source location %s:%s' % (host_name, fqsf))
fabric.operations.env.host_string = host_name
file_list = fabric.operations.get(os.path.join(fqsf, self._get_file_pattern(timeperiod)),
self.tempdir_copying)
if len(file_list) > 0:
self.logger.info('Copied %d files from remote location: %s' % (len(file_list), host_name))
summary_file_list.extend(file_list)
else:
self.logger.info('No data files found for %s at %s' % (timeperiod, host_name + fqsf))
return summary_file_list
def _parse_metadata(self, file_name):
""" Abstract method: parses metadata from filename (such as hostname, timeperiod, client_id, etc)"""
pass
def _parse_header_metadata(self, file_name):
""" Abstract method: parses metadata from header filename (such as hostname, timeperiod, client_id, etc)"""
pass
def process_report_archive(self, file_name, metadata):
""" Abstract method: takes archived report and process it
:return number of document processed in this report """
pass
def process_header_file(self, file_name, metadata):
""" Abstract method: takes header file and process it
:return None """
pass
def perform_post_processing(self, timeperiod):
""" abstract method to perform post-processing """
pass
def _process_uow(self, uow):
self._create_directories()
number_of_aggregated_objects = 0
processed_log = dict()
fqsf = os.path.join(self._get_source_folder(), uow.start_timeperiod)
list_of_archives = self.copy_archives_from_source(uow.start_timeperiod)
list_of_headers = self.copy_header_files_from_source(uow.start_timeperiod)
for file_name in list_of_headers:
metadata = self._parse_header_metadata(file_name)
self.process_header_file(os.path.join(fqsf, file_name), metadata)
for file_name in list_of_archives:
metadata = self._parse_metadata(file_name)
number_of_processed_docs = self.process_report_archive(os.path.join(fqsf, file_name), metadata)
number_of_aggregated_objects += number_of_processed_docs
self.performance_ticker.increment()
tiny_log = dict()
if settings.settings['compute_gzip_md5']:
tiny_log[unit_of_work.MD5] = compute_gzip_md5(os.path.join(fqsf, file_name))
tiny_log[unit_of_work.FILE_NAME] = file_name
tiny_log[unit_of_work.NUMBER_OF_PROCESSED_DOCUMENTS] = number_of_processed_docs
processed_log[file_name.replace('.', '-')] = tiny_log
self.perform_post_processing(uow.start_timeperiod)
return number_of_aggregated_objects, unit_of_work.STATE_PROCESSED
def _create_directories(self):
""" method creates temporary directories:
- to store files copied from remote locations to local filesystem
- uncompressed files """
self.tempdir_copying = tempfile.mkdtemp()
def _clean_up(self):
""" method verifies if temporary folder exists and removes it (and nested content) """
if self.tempdir_copying:
self.logger.info('Cleaning up %r' % self.tempdir_copying)
shutil.rmtree(self.tempdir_copying, True)
self.tempdir_copying = None
| {
"repo_name": "eggsandbeer/scheduler",
"path": "workers/abstract_file_collector_worker.py",
"copies": "1",
"size": "6860",
"license": "bsd-3-clause",
"hash": -9159970598490079000,
"line_mean": 43.5454545455,
"line_max": 119,
"alpha_frac": 0.6421282799,
"autogenerated": false,
"ratio": 4.061574896388396,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013805015895103998,
"num_lines": 154
} |
__author__ = 'Bohdan Mushkevych'
import os
import shutil
import tempfile
import psycopg2
from flow.core.execution_context import valid_context
from flow.core.abstract_action import AbstractAction
class ExportAction(AbstractAction):
""" performs UNLOAD from the selected Postgres DB
to the local filesystem and to S3 afterwards """
def __init__(self, table_name, **kwargs):
super(ExportAction, self).__init__('postgres->s3 export action', kwargs)
self.table_name = table_name
self.tempdir_copying = tempfile.mkdtemp()
def set_context(self, context, step_name=None, **kwargs):
super(ExportAction, self).set_context(context, step_name, **kwargs)
def cleanup(self):
""" method verifies if temporary folder exists and removes it (and nested content) """
if self.tempdir_copying:
self.logger.info('Cleaning up {0}'.format(self.tempdir_copying))
shutil.rmtree(self.tempdir_copying, True)
self.tempdir_copying = None
def get_file(self):
file_uri = os.path.join(self.tempdir_copying, self.table_name + '.csv')
return open(file_uri, 'w+') # writing and reading
def table_to_file(self):
""" method connects to the remote PostgreSQL and copies requested table into a local file """
self.logger.info('Executing COPY_TO command for {0}.{1}\n.'
.format(self.settings['aws_redshift_db'], self.table_name))
with psycopg2.connect(host=self.settings['aws_postgres_host'],
database=self.settings['aws_postgres_db'],
user=self.settings['aws_postgres_user'],
password=self.settings['aws_postgres_password'],
port=self.settings['aws_postgres_port']) as conn:
with conn.cursor() as cursor:
try:
f = self.get_file()
# http://initd.org/psycopg/docs/cursor.html#cursor.copy_to
cursor.copy_to(file=f, table=self.table_name, sep=',', null='null')
self.logger.info('SUCCESS for {0}.{1} COPY_TO command. Status message: {2}'
.format(self.settings['aws_redshift_db'], self.table_name,
cursor.statusmessage))
return f
except Exception:
self.logger.error('FAILURE for {0}.{1} COPY command.'
.format(self.settings['aws_redshift_db'], self.table_name), exc_info=True)
return None
@valid_context
def run(self, execution_cluster):
file_uri = self.table_to_file()
if not file_uri:
raise UserWarning('Table {0} was not exported. Aborting the action'.format(self.table_name))
target_file_uri = '{0}/{1}.csv'.format(self.timeperiod, self.table_name)
execution_cluster.filesystem.mkdir(self.timeperiod)
execution_cluster.filesystem.copyFromLocal(file_uri, target_file_uri)
| {
"repo_name": "mushkevych/synergy_flow",
"path": "flow/core/aws_actions.py",
"copies": "1",
"size": "3108",
"license": "bsd-3-clause",
"hash": -8778975961634092000,
"line_mean": 45.3880597015,
"line_max": 112,
"alpha_frac": 0.5904118404,
"autogenerated": false,
"ratio": 4.132978723404255,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016023378807171426,
"num_lines": 67
} |
__author__ = 'Bohdan Mushkevych'
import os
import sys
import gzip
import hashlib
from collections import deque
from synergy.conf import settings
from synergy.conf import context
def create_s3_file_uri(s3_bucket, timeperiod, file_name):
return 's3://{0}/{1}/{2}'.format(s3_bucket, timeperiod, file_name)
def break_s3_file_uri(fully_qualified_file):
"""
:param fully_qualified_file: in form s3://{0}/{1}/{2}
where {0} is bucket name, {1} is timeperiod and {2} - file name
:return: tuple (s3://{0}, {1}/{2})
"""
tokens = fully_qualified_file.rsplit('/', 2)
return tokens[0], '{0}/{1}'.format(tokens[1], tokens[2])
def unicode_truncate(s, length, encoding='utf-8'):
encoded = s.encode(encoding)[:length]
return encoded.decode(encoding, errors='ignore')
def compute_gzip_md5(fqfn):
""" method traverses compressed file and calculates its MD5 checksum """
md5 = hashlib.md5()
file_obj = gzip.open(fqfn, 'rb')
for chunk in iter(lambda: file_obj.read(8192), ''):
md5.update(chunk)
file_obj.close()
return md5.hexdigest()
def increment_family_property(key, family):
if key is None:
return
if not isinstance(key, str):
key = str(key)
if key in family:
family[key] += 1
else:
family[key] = 1
def copy_and_sum_families(family_source, family_target):
""" methods iterates thru source family and copies its entries to target family
in case key already exists in both families - then the values are added"""
for every in family_source:
if every not in family_target:
family_target[every] = family_source[every]
else:
family_target[every] += family_source[every]
def ensure_dir(fqdp):
"""
:param fqdp: fully qualified directory path
"""
if os.path.isdir(fqdp):
# directory exists - nothing to do
return
try:
print(f'Attempting to create a dirs: {fqdp}...', file=sys.stdout)
os.makedirs(fqdp)
print(f'Path {fqdp} created successfully', file=sys.stdout)
except OSError as e:
print(f'Unable to create path: {fqdp}, because of: {e}', file=sys.stderr)
def get_pid_filename(process_name):
"""method returns path for the PID FILENAME """
return os.path.join(settings.settings['pid_directory'], context.process_context[process_name].pid_filename)
def create_pid_file(process_name):
""" creates pid file and writes os.pid() in there """
pid_filename = get_pid_filename(process_name)
try:
with open(pid_filename, mode='w') as pid_file:
pid_file.write(str(os.getpid()))
except Exception as e:
print(f'Unable to create pid file at: {pid_filename}, because of: {e}', file=sys.stderr)
def remove_pid_file(process_name):
""" removes pid file """
pid_filename = get_pid_filename(process_name)
if not os.path.exists(pid_filename):
# pid file does not exist - nothing to do
return
try:
os.remove(pid_filename)
print(f'Removed pid file at: {pid_filename}', file=sys.stdout)
except Exception as e:
print(f'Unable to remove pid file at: {pid_filename}, because of: {e}', file=sys.stderr)
def tail_file(fqfn, num_lines=128):
with open(fqfn) as log_file:
dq = deque(log_file, maxlen=num_lines)
return [l.replace('\n', '') for l in dq]
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/system/utils.py",
"copies": "1",
"size": "3401",
"license": "bsd-3-clause",
"hash": -3335119729580708400,
"line_mean": 28.8333333333,
"line_max": 111,
"alpha_frac": 0.6395177889,
"autogenerated": false,
"ratio": 3.4423076923076925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4581825481207692,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
import os
import sys
import logging
import logging.handlers
from synergy.conf import context, settings
from synergy.db.model.daemon_process_entry import DaemonProcessEntry
from synergy.db.model.freerun_process_entry import FreerunProcessEntry
from synergy.db.model.managed_process_entry import ManagedProcessEntry
class Logger(object):
""" Logger presents wrapper around standard API enriched with formaters and roto handlers """
def __init__(self, file_name, log_tag, append_to_console, redirect_stdstream):
"""
:param file_name: path+name of the output file
:param log_tag: tag that is printed ahead of every logged message
:param append_to_console: True if messages should be printed to the terminal console
:param redirect_stdstream: True if stdout and stderr should be redirected to this Logger instance
"""
self.logger = logging.getLogger(log_tag)
if append_to_console:
# ATTENTION: while running as stand-alone process, stdout and stderr must be muted and redirected to file
# otherwise the their pipes get overfilled, and process halts
stream_handler = logging.StreamHandler()
stream_formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
stream_handler.setFormatter(stream_formatter)
self.logger.addHandler(stream_handler)
if settings.settings['debug']:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
# ROTO FILE HANDLER:
roto_file_handler = logging.handlers.RotatingFileHandler(file_name, maxBytes=2097152, backupCount=10)
roto_file_formatter = logging.Formatter(fmt='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
roto_file_handler.setFormatter(roto_file_formatter)
self.logger.addHandler(roto_file_handler)
if redirect_stdstream:
# While under_test, tools as xml_unittest_runner are doing complex sys.stdXXX reassignments
sys.stderr = self
sys.stdout = self
def get_logger(self):
return self.logger
def write(self, msg, level=logging.INFO):
""" method implements stream write interface, allowing to redirect stdout to logger """
if msg is not None and len(msg.strip()) > 0:
self.logger.log(level, msg)
def flush(self):
""" method implements stream flush interface, allowing to redirect stdout to logger """
for handler in self.logger.handlers:
handler.flush()
def isatty(self):
""" is the sys.stdout attached to the terminal?
python -c "import sys; print(sys.stdout.isatty())" (should write True)
python -c "import sys; print(sys.stdout.isatty())" | grep . (should write False).
:return: False, indicating that the output is pipped or redirected
"""
return False
# holds Logger instance per process name (and optional suffix)
logger_pool = dict()
def get_logger(process_name, append_to_console=None, redirect_stdstream=None):
""" method returns initiated logger"""
if append_to_console is None:
append_to_console = settings.settings['under_test']
if redirect_stdstream is None:
redirect_stdstream = not settings.settings['under_test']
if process_name not in logger_pool:
file_name = get_log_filename(process_name)
log_tag = get_log_tag(process_name)
logger_pool[process_name] = Logger(file_name, log_tag,
append_to_console=append_to_console,
redirect_stdstream=redirect_stdstream)
return logger_pool[process_name].get_logger()
def get_log_filename(process_name):
"""method returns path for the Log filename"""
return os.path.join(settings.settings['log_directory'], context.process_context[process_name].log_filename)
def get_log_tag(process_name):
"""method returns tag that all messages will be preceded with"""
process_obj = context.process_context[process_name]
if isinstance(process_obj, FreerunProcessEntry):
return str(process_obj.token)
elif isinstance(process_obj, ManagedProcessEntry):
return str(process_obj.token) + str(process_obj.time_qualifier)
elif isinstance(process_obj, DaemonProcessEntry):
return str(process_obj.token)
else:
raise ValueError(f'Unknown process type: {process_obj.__class__.__name__}')
if __name__ == '__main__':
from tests.ut_context import PROCESS_UNIT_TEST, register_processes
register_processes()
logger = get_logger(PROCESS_UNIT_TEST)
logger.info('test_message')
print('regular print message')
sys.stdout.flush()
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/system/system_logger.py",
"copies": "1",
"size": "4887",
"license": "bsd-3-clause",
"hash": 9196501485024233000,
"line_mean": 40.4152542373,
"line_max": 117,
"alpha_frac": 0.664825046,
"autogenerated": false,
"ratio": 4.065723793677205,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001310942178906569,
"num_lines": 118
} |
__author__ = 'Bohdan Mushkevych'
import os
import time
import threading
from invoke import Context
from fabric2 import Connection
from datetime import datetime
from synergy.conf import settings
from synergy.workers.abstract_mq_worker import AbstractMqWorker
from synergy.workers.worker_constants import *
from synergy.db.model import unit_of_work
from synergy.db.model.mq_transmission import MqTransmission
from synergy.db.dao.unit_of_work_dao import UnitOfWorkDao
class BashRunnable(threading.Thread):
"""Process starts remote or local bash script job, supervises its execution and updates mq"""
def __init__(self, logger, message, consumer, performance_tracker):
self.logger = logger
self.message = message
self.mq_request = MqTransmission.from_json(message.body)
self.consumer = consumer
self.performance_tracker = performance_tracker
self.alive = False
self.return_code = -1
self.uow_dao = UnitOfWorkDao(self.logger)
self.thread_name = str(self.mq_request)
super(BashRunnable, self).__init__(name=self.thread_name)
def _poll_process(self):
return self.alive, self.return_code
def _start_process(self):
try:
uow = self.uow_dao.get_one(self.mq_request.record_db_id)
if not uow.is_requested:
# accept only UOW in STATE_REQUESTED
self.logger.warning(f'Skipping UOW: id {self.message.body}; state {uow.state};', exc_info=False)
self.consumer.acknowledge(self.message.delivery_tag)
return
except Exception:
self.logger.error(f'Safety fuse. Can not identify UOW {self.message.body}', exc_info=True)
self.consumer.acknowledge(self.message.delivery_tag)
return
try:
self.logger.info(f'start: {self.thread_name} {{')
self.alive = True
uow.state = unit_of_work.STATE_IN_PROGRESS
uow.started_at = datetime.utcnow()
self.uow_dao.update(uow)
command = os.path.join(uow.arguments[ARGUMENT_CMD_PATH],
uow.arguments[ARGUMENT_CMD_FILE])
command += ' {0}'.format(uow.arguments[ARGUMENT_CMD_ARGS])
# Fabric1
# fabric.operations.env.warn_only = True
# fabric.operations.env.abort_on_prompts = True # removed
# fabric.operations.env.use_ssh_config = True # True by default
# fabric.operations.env.host_string = uow.arguments[ARGUMENT_CMD_HOST]
# run_result = fabric.operations.run(command, pty=False)
if uow.arguments[ARGUMENT_CMD_HOST]:
runner = Connection(host=uow.arguments[ARGUMENT_CMD_HOST])
else:
runner = Context()
run_result = runner.run(command, warn=True, pty=False)
if run_result.succeeded:
self.return_code = 0
uow.finished_at = datetime.utcnow()
uow.state = unit_of_work.STATE_PROCESSED
self.uow_dao.update(uow)
self.logger.info(f'Completed {self.thread_name} with result = {self.return_code}')
except Exception:
self.logger.error(f'Exception on starting: {self.thread_name}', exc_info=True)
uow.state = unit_of_work.STATE_INVALID
self.uow_dao.update(uow)
finally:
self.logger.info('}')
self.alive = False
def run(self):
try:
self._start_process()
code = None
alive = True
while alive:
alive, code = self._poll_process()
time.sleep(0.1)
if code == 0:
self.performance_tracker.tracker.increment_success()
else:
self.performance_tracker.tracker.increment_failure()
self.logger.info(f'BashDriver for {self.thread_name} return code is {code}')
except Exception as e:
self.performance_tracker.tracker.increment_failure()
self.logger.error(f'Safety fuse while processing request {self.message.body}: {e}', exc_info=True)
finally:
self.consumer.acknowledge(self.message.delivery_tag)
class BashDriver(AbstractMqWorker):
"""Process facilitates threads running local or remote bash scripts"""
def __init__(self, process_name):
super(BashDriver, self).__init__(process_name)
self.initial_thread_count = threading.active_count()
def _mq_callback(self, message):
""" reads JSON request from the mq message and delivers it for processing """
while threading.active_count() > settings.settings['bash_runnable_count'] + self.initial_thread_count:
time.sleep(0.1)
t = BashRunnable(self.logger, message, self.consumer, self.performance_tracker)
t.daemon = True
t.start()
if __name__ == '__main__':
source = BashDriver(PROCESS_BASH_DRIVER)
source.start()
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/workers/bash_driver.py",
"copies": "1",
"size": "5027",
"license": "bsd-3-clause",
"hash": -7359011735083419000,
"line_mean": 37.0833333333,
"line_max": 112,
"alpha_frac": 0.6176646111,
"autogenerated": false,
"ratio": 3.9090202177293936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5026684828829393,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
import os
import time
import threading
import fabric.operations
from datetime import datetime
from synergy.conf import settings
from synergy.workers.abstract_mq_worker import AbstractMqWorker
from synergy.workers.worker_constants import *
from synergy.db.model import unit_of_work
from synergy.db.model.synergy_mq_transmission import SynergyMqTransmission
from synergy.db.dao.unit_of_work_dao import UnitOfWorkDao
class BashRunnable(threading.Thread):
"""Process starts remote or local bash script job, supervises its execution and updates mq"""
def __init__(self, logger, message, consumer, performance_ticker):
self.logger = logger
self.message = message
self.mq_request = SynergyMqTransmission.from_json(message.body)
self.consumer = consumer
self.performance_ticker = performance_ticker
self.alive = False
self.return_code = -1
self.uow_dao = UnitOfWorkDao(self.logger)
self.thread_name = str(self.mq_request)
super(BashRunnable, self).__init__(name=self.thread_name)
def _poll_process(self):
return self.alive, self.return_code
def _start_process(self):
try:
uow = self.uow_dao.get_one(self.mq_request.unit_of_work_id)
if not uow.is_requested:
# accept only UOW in STATE_REQUESTED
self.logger.warn('Skipping unit_of_work: id %s; state %s;' % (str(self.message.body), uow.state),
exc_info=False)
self.consumer.acknowledge(self.message.delivery_tag)
return
except Exception:
self.logger.error('Safety fuse. Can not identify unit_of_work %s' % str(self.message.body), exc_info=True)
self.consumer.acknowledge(self.message.delivery_tag)
return
try:
self.logger.info('start: %s {' % self.thread_name)
self.alive = True
uow.state = unit_of_work.STATE_IN_PROGRESS
uow.started_at = datetime.utcnow()
self.uow_dao.update(uow)
fabric.operations.env.warn_only = True
fabric.operations.env.abort_on_prompts = True
fabric.operations.env.use_ssh_config = True
fabric.operations.env.host_string = uow.arguments[ARGUMENT_CMD_HOST]
command = os.path.join(uow.arguments[ARGUMENT_CMD_PATH],
uow.arguments[ARGUMENT_CMD_FILE])
command += ' %s' % uow.arguments[ARGUMENT_CMD_ARGS]
run_result = fabric.operations.run(command, pty=False)
if run_result.succeeded:
self.return_code = 0
uow.finished_at = datetime.utcnow()
uow.state = unit_of_work.STATE_PROCESSED
self.uow_dao.update(uow)
self.logger.info('Completed %s with result = %r' % (self.thread_name, self.return_code))
except Exception:
self.logger.error('Exception on starting: %s' % self.thread_name, exc_info=True)
uow.state = unit_of_work.STATE_INVALID
self.uow_dao.update(uow)
finally:
self.logger.info('}')
self.alive = False
def run(self):
try:
self._start_process()
code = None
alive = True
while alive:
alive, code = self._poll_process()
time.sleep(0.1)
if code == 0:
self.performance_ticker.tracker.increment_success()
else:
self.performance_ticker.tracker.increment_failure()
self.logger.info('BashDriver for %s return code is %r' % (self.thread_name, code))
except Exception as e:
self.performance_ticker.tracker.increment_failure()
self.logger.error('Safety fuse while processing request %r: %r' % (self.message.body, e), exc_info=True)
finally:
self.consumer.acknowledge(self.message.delivery_tag)
class BashDriver(AbstractMqWorker):
"""Process facilitates threads running local or remote bash scripts"""
def __init__(self, process_name):
super(BashDriver, self).__init__(process_name)
self.initial_thread_count = threading.active_count()
def _mq_callback(self, message):
""" reads JSON request from the mq message and delivers it for processing """
while threading.active_count() > settings.settings['bash_runnable_count'] + self.initial_thread_count:
time.sleep(0.1)
t = BashRunnable(self.logger, message, self.consumer, self.performance_ticker)
t.daemon = True
t.start()
if __name__ == '__main__':
source = BashDriver(PROCESS_BASH_DRIVER)
source.start()
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/workers/bash_driver.py",
"copies": "1",
"size": "4781",
"license": "bsd-3-clause",
"hash": -3750884610695120000,
"line_mean": 37.248,
"line_max": 118,
"alpha_frac": 0.616398243,
"autogenerated": false,
"ratio": 3.871255060728745,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9983189974797507,
"avg_score": 0.0008926657862475961,
"num_lines": 125
} |
__author__ = 'Bohdan Mushkevych'
import os
from system.system_logger import Logger
from settings import settings
# User fields
# Framework fields
_NAME = 'process_name'
_LOG_FILENAME = 'log_filename'
_LOG_TAG = 'log_tag'
_PID_FILENAME = 'pid_filename'
_CLASSNAME = 'classname'
def create_context_entry(process_name,
classname,
token,
pid_file=None,
log_file=None):
""" creates process context entry """
pid_file = pid_file if pid_file is not None else '{0}.pid'.format(token)
log_file = log_file if log_file is not None else '{0}.log'.format(token)
return {
_NAME: process_name,
_PID_FILENAME: os.path.join(settings['pid_directory'], pid_file),
_CLASSNAME: classname,
_LOG_FILENAME: os.path.join(settings['log_directory'], log_file),
_LOG_TAG: token,
}
class ProcessContext(object):
# format: "process_name": {
# process_name
# pid_filename
# classname
# log_filename
# log_tag
# }
CONTEXT = {
}
# format: {"process_name" : system_logger.Logger}
LOGGER_POOL = dict()
@classmethod
def create_pid_file(cls, process_name, process_id=None):
""" creates pid file and writes os.pid() in there """
pid_filename = cls.get_pid_filename(process_name, process_id)
try:
with open(pid_filename, mode='w') as pid_file:
pid_file.write(str(os.getpid()))
except Exception as e:
cls.get_logger(process_name).error('Unable to create pid file at: {0}, because of: {1}'.
format(pid_filename, e))
@classmethod
def remove_pid_file(cls, process_name, process_id=None):
""" removes pid file """
pid_filename = cls.get_pid_filename(process_name, process_id)
try:
os.remove(pid_filename)
cls.get_logger(process_name).info('Removed pid file at: {0}'.format(pid_filename))
except Exception as e:
cls.get_logger(process_name).error('Unable to remove pid file at: {0}, because of: {1}'.
format(pid_filename, e))
@classmethod
def get_logger(cls, process_name, process_id=None):
""" method returns initiated logger"""
if process_name not in cls.LOGGER_POOL:
file_name = cls.get_log_filename(process_name)
tag = cls.get_log_tag(process_name)
cls.LOGGER_POOL[process_name] = Logger(file_name, tag)
logger = cls.LOGGER_POOL[process_name].get_logger()
if process_id:
return logger.getChild(str(process_id))
else:
return logger
@classmethod
def get_pid_filename(cls, process_name, process_id=None):
"""method returns path for the PID FILENAME """
pid_filename = cls.CONTEXT[process_name][_PID_FILENAME]
if process_id:
pid_filename = pid_filename[:-4] + str(process_id) + pid_filename[-4:]
return pid_filename
@classmethod
def get_classname(cls, process_name):
""" method returns fully qualified classname of the instance running as process"""
return cls.CONTEXT[process_name][_CLASSNAME]
@classmethod
def get_log_filename(cls, process_name):
"""method returns path for the Log filename"""
return cls.CONTEXT[process_name][_LOG_FILENAME]
@classmethod
def get_log_tag(cls, process_name):
"""method returns tag that all logging messages will be marked with"""
return cls.CONTEXT[process_name][_LOG_TAG]
if __name__ == '__main__':
pass
| {
"repo_name": "mushkevych/launch.py",
"path": "system/process_context.py",
"copies": "1",
"size": "3724",
"license": "bsd-3-clause",
"hash": -8514572814111157000,
"line_mean": 32.5495495495,
"line_max": 100,
"alpha_frac": 0.5888829216,
"autogenerated": false,
"ratio": 3.843137254901961,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4932020176501961,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
import os
try:
# python 2.x
import subprocess32 as subprocess
except ImportError:
# python 3.3+
import subprocess
from flow.core.abstract_cluster import AbstractCluster
from flow.core.ephemeral_filesystem import EphemeralFilesystem
class EphemeralCluster(AbstractCluster):
""" implementation of the abstract API for the local, non-distributed environment """
def __init__(self, name, context, **kwargs):
super(EphemeralCluster, self).__init__(name, context, **kwargs)
self._filesystem = EphemeralFilesystem(self.logger, context, **kwargs)
@property
def filesystem(self):
return self._filesystem
def _run(self, command):
""" `https://docs.python.org/3.2/library/subprocess.html#frequently-used-arguments` """
output = subprocess.check_output(command, stderr=subprocess.STDOUT, universal_newlines=True, shell=True)
return output.split(os.linesep)
def run_pig_step(self, uri_script, **kwargs):
step_args = []
for k, v in kwargs.items():
step_args.append('-p')
step_args.append('{0}={1}'.format(k, v))
return self._run('pig -f {0} {1}'.format(uri_script, ' '.join(step_args)))
def run_spark_step(self, uri_script, **kwargs):
step_args = []
for k, v in kwargs.items():
step_args.append('{0} {1}'.format(k, v))
return self._run('spark-submit {0}'.format(uri_script, ' '.join(step_args)))
def run_hadoop_step(self, uri_script, **kwargs):
step_args = []
for k, v in kwargs.items():
step_args.append('-D')
step_args.append('{0}={1}'.format(k, v))
return self._run('hadoop jar {0} {1}'.format(uri_script, ' '.join(step_args)))
def run_shell_command(self, uri_script, **kwargs):
step_args = []
for k, v in kwargs.items():
step_args.append('{0} {1}'.format(k, v))
return self._run('{0} {1}'.format(uri_script, ' '.join(step_args)))
| {
"repo_name": "mushkevych/synergy_flow",
"path": "flow/core/ephemeral_cluster.py",
"copies": "1",
"size": "2032",
"license": "bsd-3-clause",
"hash": 6338233446974770000,
"line_mean": 35.9454545455,
"line_max": 112,
"alpha_frac": 0.6126968504,
"autogenerated": false,
"ratio": 3.5524475524475525,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9659173085863288,
"avg_score": 0.0011942633968527667,
"num_lines": 55
} |
__author__ = 'Bohdan Mushkevych'
import pymongo
from synergy.db.manager import ds_manager
from synergy.db.model.freerun_process_entry import ENTRY_NAME
from synergy.db.model.managed_process_entry import PROCESS_NAME, ManagedProcessEntry
from synergy.db.model.unit_of_work import TIMEPERIOD, START_ID, END_ID
from synergy.db.model.log_recording import PARENT_OBJECT_ID, CREATED_AT
from synergy.db.dao.managed_process_dao import ManagedProcessDao
from synergy.conf import context, settings
from synergy.scheduler.scheduler_constants import *
from synergy.system.system_logger import get_logger
from flow.db import db_manager
def synch_db():
""" function reads managed_process and updates context entries appropriately """
logger = get_logger(PROCESS_SCHEDULER)
managed_process_dao = ManagedProcessDao(logger)
try:
process_entries = managed_process_dao.get_all()
except LookupError:
logger.error('Synergy DB is not initialized. Aborting.')
exit(1)
for process_entry in process_entries:
process_name = process_entry.process_name
if process_name not in context.process_context:
logger.warning(f'Process {process_name} has no reflection in the context. Skipping it.')
continue
if not isinstance(context.process_context[process_name], ManagedProcessEntry):
logger.error('Process entry {0} of non-managed type {1} found in managed_process table. Skipping it.'
.format(process_name, context.process_context[process_name].__class__.__name__))
continue
context.process_context[process_name] = process_entry
logger.info(f'Context updated with process entry {process_entry.key}.')
def update_db():
""" writes to managed_process table records from the context.process_context """
logger = get_logger(PROCESS_SCHEDULER)
managed_process_dao = ManagedProcessDao(logger)
managed_process_dao.clear()
for process_name, process_entry in context.process_context.items():
if not isinstance(process_entry, ManagedProcessEntry):
continue
managed_process_dao.update(process_entry)
logger.info(f'Updated DB with process entry {process_entry.key} from the context.')
def reset_db():
""" drops the *scheduler* database, resets schema """
logger = get_logger(PROCESS_SCHEDULER)
logger.info('Starting *scheduler* DB reset')
ds = ds_manager.ds_factory(logger)
ds._db_client.drop_database(settings.settings['mongo_db_name'])
logger.info('*scheduler* db has been dropped')
connection = ds.connection(COLLECTION_MANAGED_PROCESS)
connection.create_index([(PROCESS_NAME, pymongo.ASCENDING)], unique=True)
connection = ds.connection(COLLECTION_FREERUN_PROCESS)
connection.create_index([(PROCESS_NAME, pymongo.ASCENDING), (ENTRY_NAME, pymongo.ASCENDING)], unique=True)
connection = ds.connection(COLLECTION_UNIT_OF_WORK)
connection.create_index([(PROCESS_NAME, pymongo.ASCENDING),
(TIMEPERIOD, pymongo.ASCENDING),
(START_ID, pymongo.ASCENDING),
(END_ID, pymongo.ASCENDING)], unique=True)
connection = ds.connection(COLLECTION_LOG_RECORDING)
connection.create_index([(PARENT_OBJECT_ID, pymongo.ASCENDING)], unique=True)
# expireAfterSeconds: <int> Used to create an expiring (TTL) collection.
# MongoDB will automatically delete documents from this collection after <int> seconds.
# The indexed field must be a UTC datetime or the data will not expire.
ttl_seconds = settings.settings['db_log_ttl_days'] * 86400 # number of seconds for TTL
connection.create_index(CREATED_AT, expireAfterSeconds=ttl_seconds)
for collection_name in [COLLECTION_JOB_HOURLY, COLLECTION_JOB_DAILY,
COLLECTION_JOB_MONTHLY, COLLECTION_JOB_YEARLY]:
connection = ds.connection(collection_name)
connection.create_index([(PROCESS_NAME, pymongo.ASCENDING), (TIMEPERIOD, pymongo.ASCENDING)], unique=True)
# reset Synergy Flow tables
db_manager.reset_db()
logger.info('*scheduler* db has been recreated')
if __name__ == '__main__':
pass
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/db/manager/db_manager.py",
"copies": "1",
"size": "4240",
"license": "bsd-3-clause",
"hash": -6010987381347007000,
"line_mean": 41.8282828283,
"line_max": 114,
"alpha_frac": 0.7002358491,
"autogenerated": false,
"ratio": 3.9368616527390903,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5137097501839091,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
import pymongo
from synergy.db.manager import ds_manager
from synergy.scheduler.scheduler_constants import PROCESS_SCHEDULER
from synergy.system.system_logger import get_logger
from flow.db.model.flow import FLOW_NAME, TIMEPERIOD
from flow.db.model.step import STEP_NAME
from flow.flow_constants import COLLECTION_FLOW, COLLECTION_STEP
def reset_db():
""" drops *synergy.flow* tables and re-creates them """
logger = get_logger(PROCESS_SCHEDULER)
logger.info('Starting *synergy.flow* tables reset')
ds = ds_manager.ds_factory(logger)
ds._db.drop_collection(COLLECTION_STEP)
ds._db.drop_collection(COLLECTION_FLOW)
connection = ds.connection(COLLECTION_STEP)
connection.create_index([(FLOW_NAME, pymongo.ASCENDING),
(STEP_NAME, pymongo.ASCENDING),
(TIMEPERIOD, pymongo.ASCENDING)], unique=True)
connection = ds.connection(COLLECTION_FLOW)
connection.create_index([(FLOW_NAME, pymongo.ASCENDING),
(TIMEPERIOD, pymongo.ASCENDING)], unique=True)
logger.info('*synergy.flow* tables have been recreated')
if __name__ == '__main__':
pass
| {
"repo_name": "mushkevych/synergy_flow",
"path": "flow/db/db_manager.py",
"copies": "1",
"size": "1209",
"license": "bsd-3-clause",
"hash": -3921409849704019500,
"line_mean": 33.5428571429,
"line_max": 75,
"alpha_frac": 0.6865177833,
"autogenerated": false,
"ratio": 3.697247706422018,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9883765489722018,
"avg_score": 0,
"num_lines": 35
} |
__author__ = 'Bohdan Mushkevych'
import random
from db.model.site_statistics import SiteStatistics, TIMEPERIOD
from db.model.client_statistics import ClientStatistics, CLIENT_ID
from synergy.system.utils import copy_and_sum_families
from synergy.system import time_helper
from workers.abstract_mongo_worker import AbstractMongoWorker
random.seed(9001)
class ClientDailyAggregator(AbstractMongoWorker):
""" illustration suite worker:
- an aggregator from the site_daily into the client_daily """
def __init__(self, process_name):
super(ClientDailyAggregator, self).__init__(process_name)
def _init_sink_key(self, *args):
return args[0], time_helper.hour_to_day(args[1])
def _mongo_sink_key(self, *args):
return {CLIENT_ID: args[0], TIMEPERIOD: args[1]}
def _init_source_object(self, document):
return SiteStatistics.from_json(document)
def _init_sink_object(self, composite_key):
obj = ClientStatistics()
obj.key = composite_key
return obj
def _process_single_document(self, document):
source_obj = self._init_source_object(document)
try:
client_id = random.randint(1, 100)
composite_key = self._init_sink_key(client_id, source_obj.timeperiod)
target_obj = self._get_aggregated_object(composite_key)
target_obj.number_of_visits += source_obj.number_of_visits
target_obj.number_of_pageviews += source_obj.number_of_pageviews
target_obj.total_duration += source_obj.total_duration
copy_and_sum_families(source_obj.os, target_obj.os)
copy_and_sum_families(source_obj.browsers, target_obj.browsers)
copy_and_sum_families(source_obj.screen_resolution, target_obj.screen_resolution)
copy_and_sum_families(source_obj.languages, target_obj.languages)
copy_and_sum_families(source_obj.countries, target_obj.countries)
except KeyError:
self.logger.error(f'domain name {source_obj.key[0]} has no valid owner client_id')
if __name__ == '__main__':
from constants import PROCESS_CLIENT_DAILY
source = ClientDailyAggregator(PROCESS_CLIENT_DAILY)
source.start()
| {
"repo_name": "mushkevych/scheduler",
"path": "workers/client_daily_aggregator.py",
"copies": "1",
"size": "2234",
"license": "bsd-3-clause",
"hash": -4316907288298963000,
"line_mean": 36.8644067797,
"line_max": 94,
"alpha_frac": 0.681736795,
"autogenerated": false,
"ratio": 3.6090468497576738,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9787956548519281,
"avg_score": 0.0005654192476785359,
"num_lines": 59
} |
__author__ = 'Bohdan Mushkevych'
import re
import decimal
import datetime
from odm.errors import ValidationError
DEFAULT_DT_FORMAT = '%Y-%m-%d %H:%M:%S'
class BaseField:
"""A base class for fields in a Synergy ODM document. Instances of this class
may be added to subclasses of `Document` to define a document's schema. """
# Creation counter keeps track of Fields declaration order in the Document
# Each time a Field instance is created the counter should be increased
creation_counter = 0
def __init__(self, name:str=None, default=None, choices=None, verbose_name:str=None, null:bool=False):
"""
:param name: (optional) name of the field in the JSON document
if not set, variable name will be taken as the name
i.e. `a = Field() -> a.name == 'a'`
:param default: (optional) The default value for this field if no value
has been set (or if the value has been unset). It can be a
callable.
:param choices: (optional) The valid choices
:param verbose_name: (optional) The human readable, verbose name for the field.
:param null: (optional) Is the field value can be null. If no and there is a default value
then the default value is set
"""
self.name = name
self._default = default
self.choices = choices
self.verbose_name = verbose_name
self.null = null
self.creation_counter = BaseField.creation_counter + 1
BaseField.creation_counter += 1
@property
def default(self):
if self._default is None:
return None
value = self._default
if callable(value):
value = value()
return value
def initialized(self, instance):
if instance is None:
# Document class being used rather than a document object. Guessing True
return True
return instance._data.get(self.name) is not None
def __get__(self, instance, owner):
""" Descriptor for retrieving a value from a field in a document. """
if instance is None:
# Document class being used rather than a document object
return self
# retrieve value from a BaseDocument instance if available
value = instance._data.get(self.name)
if value is not None or self.null:
return value
# value is None at this point
if self.default is not None:
value = self.default
self.validate(value)
instance._data[self.name] = value
return value
def __set__(self, instance, value):
""" Descriptor for assigning a value to a field in a document. """
if value is not None:
self.validate(value)
instance._data[self.name] = value
elif self.null:
# value is None and self.null is True
# skip validation; force setting value to None
instance._data[self.name] = value
elif self.default is not None:
# value is None and self.null is False and self.default is not None
value = self.default
self.validate(value)
instance._data[self.name] = value
else:
# value is None and self.null is False and self.default is None
# let the self.validate take care of reporting the exception
self.validate(value)
instance._data[self.name] = value
def __delete__(self, instance):
if self.name in instance._data:
del instance._data[self.name]
def __set_name__(self, owner, name):
if hasattr(self, 'name') and self.name is not None:
# field was initialized with a custom name
pass
else:
self.name = name
def raise_error(self, message='', errors=None, name=None):
"""Raises a ValidationError. """
raise ValidationError(message, errors=errors, field_name=name if name else self.name)
def from_json(self, value):
"""Convert a JSON-variable to a Python type. """
return value
def to_json(self, value):
"""Convert a Python type to a JSON-friendly type. """
return self.from_json(value)
def validate(self, value):
"""Performs validation of the value.
:param value: value to validate
:raise ValidationError if the value is invalid"""
# check choices
if self.choices:
if isinstance(self.choices[0], (list, tuple)):
option_keys = [k for k, v in self.choices]
if value not in option_keys:
msg = f'Value {value} is not listed among valid choices {option_keys}'
self.raise_error(msg)
elif value not in self.choices:
msg = f'Value {value} is not listed among valid choices {self.choices}'
self.raise_error(msg)
class NestedDocumentField(BaseField):
""" Field wraps a stand-alone Document """
def __init__(self, nested_klass, **kwargs):
"""
:param nested_klass: BaseDocument-derived class
:param kwargs: standard set of arguments from the BaseField
"""
self.nested_klass = nested_klass
kwargs.setdefault('default', lambda: nested_klass())
super(NestedDocumentField, self).__init__(**kwargs)
def validate(self, value):
"""Make sure that value is of the right type """
if not isinstance(value, self.nested_klass):
self.raise_error('NestedClass is of the wrong type: {0} vs expected {1}'
.format(value.__class__.__name__, self.nested_klass.__name__))
super(NestedDocumentField, self).validate(value)
class ListField(BaseField):
""" Field represents standard Python collection `list` """
def __init__(self, **kwargs):
kwargs.setdefault('default', lambda: [])
super(ListField, self).__init__(**kwargs)
def validate(self, value):
"""Make sure that the inspected value is of type `list` or `tuple` """
if not isinstance(value, (list, tuple)):
self.raise_error(f'Only lists and tuples may be used in the ListField vs provided {type(value).__name__}')
super(ListField, self).validate(value)
class DictField(BaseField):
"""A dictionary field that wraps a standard Python dictionary. This is
similar to an embedded document, but the structure is not defined. """
def __init__(self, **kwargs):
kwargs.setdefault('default', lambda: {})
super(DictField, self).__init__(**kwargs)
def validate(self, value):
"""Make sure that the inspected value is of type `dict` """
if not isinstance(value, dict):
self.raise_error(f'Only Python dict may be used in the DictField vs provided {type(value).__name__}')
super(DictField, self).validate(value)
class StringField(BaseField):
"""A unicode string field. """
def __init__(self, regex=None, min_length=None, max_length=None, **kwargs):
self.regex = re.compile(regex) if regex else None
self.min_length, self.max_length = min_length, max_length
super(StringField, self).__init__(**kwargs)
def __set__(self, instance, value):
value = self.from_json(value)
super(StringField, self).__set__(instance, value)
def from_json(self, value):
if value is None:
# NoneType values are not jsonified by BaseDocument
return value
if isinstance(value, str):
return value
elif not isinstance(value, (bytes, str)):
return str(value)
else:
try:
value = value.decode('utf-8')
except:
pass
return value
def validate(self, value):
if not isinstance(value, (bytes, str)):
self.raise_error(f'Only string types may be used in the StringField vs provided {type(value).__name__}')
if self.max_length is not None and len(value) > self.max_length:
self.raise_error('StringField value {0} length {1} is longer than max_length {2}'
.format(value, len(value), self.max_length))
if self.min_length is not None and len(value) < self.min_length:
self.raise_error('StringField value {0} length {1} is shorter than min_length {2}'
.format(value, len(value), self.min_length))
if self.regex is not None and self.regex.match(value) is None:
self.raise_error(f'StringField value "{value}" did not match validation regex "{self.regex}"')
super(StringField, self).validate(value)
class IntegerField(BaseField):
""" An integer field. """
def __init__(self, min_value=None, max_value=None, **kwargs):
self.min_value, self.max_value = min_value, max_value
super(IntegerField, self).__init__(**kwargs)
def __set__(self, instance, value):
value = self.from_json(value)
super(IntegerField, self).__set__(instance, value)
def from_json(self, value):
if value is None:
# NoneType values are not jsonified by BaseDocument
return value
if isinstance(value, int):
return value
try:
value = int(value)
except ValueError:
pass
return value
def validate(self, value):
try:
value = int(value)
except:
self.raise_error(f'Could not parse {value} into an Integer')
if self.min_value is not None and value < self.min_value:
self.raise_error(f'IntegerField value {value} is lower than min value {self.min_value}')
if self.max_value is not None and value > self.max_value:
self.raise_error(f'IntegerField value {value} is larger than max value {self.max_value}')
super(IntegerField, self).validate(value)
class DecimalField(BaseField):
"""A fixed-point decimal number field. """
def __init__(self, min_value=None, max_value=None, force_string=False,
precision=2, rounding=decimal.ROUND_HALF_UP, **kwargs):
"""
:param min_value: Validation rule for the minimum acceptable value.
:param max_value: Validation rule for the maximum acceptable value.
:param force_string: Store as a string.
:param precision: Number of decimal places to store.
:param rounding: The rounding rule from the python decimal library:
- decimal.ROUND_CEILING (towards Infinity)
- decimal.ROUND_DOWN (towards zero)
- decimal.ROUND_FLOOR (towards -Infinity)
- decimal.ROUND_HALF_DOWN (to nearest with ties going towards zero)
- decimal.ROUND_HALF_EVEN (to nearest with ties going to nearest even integer)
- decimal.ROUND_HALF_UP (to nearest with ties going away from zero)
- decimal.ROUND_UP (away from zero)
- decimal.ROUND_05UP (away from zero if last digit after rounding towards zero would have been 0 or 5;
otherwise towards zero)
Defaults to: ``decimal.ROUND_HALF_UP``
"""
self.force_string = force_string
self.precision = precision
self.rounding = rounding
self.min_value = self.from_json(min_value)
self.max_value = self.from_json(max_value)
super(DecimalField, self).__init__(**kwargs)
def __get__(self, instance, owner):
value = super(DecimalField, self).__get__(instance, owner)
if value is self:
return value
return self.to_json(value)
def __set__(self, instance, value):
value = self.from_json(value)
super(DecimalField, self).__set__(instance, value)
def from_json(self, value):
if value is None:
# NoneType values are not jsonified by BaseDocument
return value
if isinstance(value, decimal.Decimal):
return value
try:
value = decimal.Decimal(str(value))
except decimal.InvalidOperation:
return value
return value.quantize(decimal.Decimal('.{0}'.format('0' * self.precision)), rounding=self.rounding)
def to_json(self, value):
if value is None:
# NoneType values are not jsonified by BaseDocument
return value
if self.force_string:
return str(value)
else:
return float(self.from_json(value))
def validate(self, value):
if not isinstance(value, decimal.Decimal):
if not isinstance(value, (bytes, str)):
value = str(value)
try:
value = decimal.Decimal(value)
except Exception:
self.raise_error(f'Could not parse {value} into a Decimal')
if self.min_value is not None and value < self.min_value:
self.raise_error(f'DecimalField value {value} is lower than min value {self.min_value}')
if self.max_value is not None and value > self.max_value:
self.raise_error(f'DecimalField value {value} is larger than max value {self.max_value}')
# super.validate() checks if the value is among the list of allowed choices
# most likely, it will be the list of floats and integers
# as the Decimal does not support automatic comparison with the float, we will cast it
super(DecimalField, self).validate(float(value))
class BooleanField(BaseField):
"""A boolean field type. """
def __init__(self, true_values=None, false_values=None, **kwargs):
self.true_values = true_values if true_values else ['true', 'yes', '1']
self.false_values = false_values if false_values else ['false', 'no', '0']
super(BooleanField, self).__init__(**kwargs)
def __set__(self, instance, value):
value = self.from_json(value)
super(BooleanField, self).__set__(instance, value)
def from_json(self, value):
if value is None:
# NoneType values are not jsonified by BaseDocument
return value
if isinstance(value, bool):
return value
if not isinstance(value, (bytes, str)):
# case numbers if needed to the string
value = str(value)
value = value.lower().strip()
if value in self.true_values:
return True
elif value in self.false_values:
return False
else:
raise ValueError(f'Could not parse {value} into a bool')
def validate(self, value):
if not isinstance(value, bool):
self.raise_error(f'Only boolean type may be used in the BooleanField vs provided {type(value).__name__}')
class DateTimeField(BaseField):
""" A datetime field. Features:
- During runtime, value is stored in datetime format
- If a string value is assigned to the field, then it is assumed to be in dt_format
and converted to the datetime object
- If an integer is assigned to the field, then it is considered to represent number of seconds since epoch
in UTC and converted to the datetime object
- During json serialization, value is converted to the string accordingly to dt_format. """
def __init__(self, dt_format=DEFAULT_DT_FORMAT, **kwargs):
self.dt_format = dt_format
super(DateTimeField, self).__init__(**kwargs)
def __set__(self, instance, value):
value = self.from_json(value)
super(DateTimeField, self).__set__(instance, value)
def validate(self, value):
new_value = self.to_json(value)
if not isinstance(new_value, (bytes, str)):
self.raise_error(f'Could not parse "{value}" into a date')
def to_json(self, value):
if value is None:
# NoneType values are not jsonified by BaseDocument
return value
if callable(value):
value = value()
if isinstance(value, (datetime.datetime, datetime.date)):
return value.strftime(self.dt_format)
raise ValueError(f'DateTimeField.to_json unknown datetime type: {type(value).__name__}')
def from_json(self, value):
if value is None:
# NoneType values are not jsonified by BaseDocument
return value
if isinstance(value, (datetime.datetime, datetime.date)):
return value
if isinstance(value, (bytes, str)):
return datetime.datetime.strptime(value, self.dt_format)
if isinstance(value, (int, float)):
return datetime.datetime.utcfromtimestamp(value)
raise ValueError(f'DateTimeField.from_json expects data of string/int/float types vs {type(value).__name__}')
class ObjectIdField(BaseField):
"""A field wrapper around ObjectIds. """
def __get__(self, instance, owner):
value = super(ObjectIdField, self).__get__(instance, owner)
if value is self:
return value
return self.from_json(value)
def __set__(self, instance, value):
value = self.from_json(value)
super(ObjectIdField, self).__set__(instance, value)
def from_json(self, value):
if value is None:
# NoneType values are not jsonified by BaseDocument
return value
if not isinstance(value, (bytes, str)):
value = str(value)
return value
def validate(self, value):
try:
str(value)
except:
self.raise_error(f'Could not parse {value} into a unicode')
| {
"repo_name": "mushkevych/synergy_odm",
"path": "odm/fields.py",
"copies": "1",
"size": "17669",
"license": "bsd-3-clause",
"hash": -8594575174720137000,
"line_mean": 36.9978494624,
"line_max": 118,
"alpha_frac": 0.6075046692,
"autogenerated": false,
"ratio": 4.3032148075986365,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5410719476798637,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
import setproctitle
from settings import settings
from system.process_context import ProcessContext
class SynergyProcess(object):
""" Fundamental class for all processes. Registers logger and renames process to SynergyYYY"""
def __init__(self, process_name, process_id=None):
""" renames process to SynergyYYY and creates PID file """
self.process_name = process_name
self.process_id = process_id
self.logger = ProcessContext.get_logger(process_name, process_id=self.process_id)
# process-related activities
process_title = settings['process_prefix'] + self.process_name
if self.process_id:
process_title += str(self.process_id)
setproctitle.setproctitle(process_title)
ProcessContext.create_pid_file(self.process_name, process_id=self.process_id)
def __del__(self):
""" removes PID file """
ProcessContext.remove_pid_file(self.process_name, process_id=self.process_id)
self.logger.info('Shutdown {0}'.format(self.process_name))
| {
"repo_name": "mushkevych/launch.py",
"path": "system/synergy_process.py",
"copies": "1",
"size": "1089",
"license": "bsd-3-clause",
"hash": -1040972032495139800,
"line_mean": 37.8928571429,
"line_max": 98,
"alpha_frac": 0.6877869605,
"autogenerated": false,
"ratio": 3.794425087108014,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.997427134489987,
"avg_score": 0.0015881405416289138,
"num_lines": 28
} |
__author__ = 'Bohdan Mushkevych'
import sys
import logging
import logging.handlers
from settings import settings
class Logger(object):
""" Logger presents wrapper around standard API enriched with formaters and roto handlers """
def __init__(self, file_name, log_tag, append_to_console=None, redirect_stdstream=None):
"""
:param file_name: path+name of the output file
:param log_tag: tag that is printed ahead of every logged message
:param append_to_console: True if messages should be printed to the terminal console
:param redirect_stdstream: True if stdout and stderr should be redirected to this Logger instance
"""
if append_to_console is None:
append_to_console = settings['under_test']
if redirect_stdstream is None:
redirect_stdstream = not settings['under_test']
self.logger = logging.getLogger(log_tag)
if append_to_console:
# ATTENTION: while running as stand-alone process, stdout and stderr must be muted and redirected to file
# otherwise the their pipes get overfilled, and process halts
stream_handler = logging.StreamHandler()
stream_formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
stream_handler.setFormatter(stream_formatter)
self.logger.addHandler(stream_handler)
if settings['debug']:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
# ROTO FILE HANDLER:
roto_file_handler = logging.handlers.RotatingFileHandler(file_name, maxBytes=2097152, backupCount=10)
roto_file_formatter = logging.Formatter(fmt='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
roto_file_handler.setFormatter(roto_file_formatter)
self.logger.addHandler(roto_file_handler)
if redirect_stdstream:
# While under_test, tools as xml_unittest_runner are doing complex sys.stdXXX reassignments
sys.stderr = self
sys.stdout = self
def get_logger(self):
return self.logger
def write(self, msg, level=logging.INFO):
""" method implements stream write interface, allowing to redirect stdout to logger """
if msg is not None and len(msg.strip()) > 0:
self.logger.log(level, msg)
def flush(self):
""" method implements stream flush interface, allowing to redirect stdout to logger """
for handler in self.logger.handlers:
handler.flush()
def isatty(self):
""" is the sys.stdout attached to the terminal?
python -c "import sys; print(sys.stdout.isatty())" (should write True)
python -c "import sys; print(sys.stdout.isatty())" | grep . (should write False).
:return: False, indicating that the output is pipped or redirected
"""
return False
if __name__ == '__main__':
from system.process_context import ProcessContext
from tests.ut_process_context import PROCESS_UNIT_TEST, register_unit_test_context
register_unit_test_context()
logger = ProcessContext.get_logger(PROCESS_UNIT_TEST)
logger.info('test_message')
print('regular print message')
sys.stdout.flush()
| {
"repo_name": "mushkevych/launch.py",
"path": "system/system_logger.py",
"copies": "1",
"size": "3362",
"license": "bsd-3-clause",
"hash": -8009176904955917000,
"line_mean": 40,
"line_max": 117,
"alpha_frac": 0.6487209994,
"autogenerated": false,
"ratio": 4.207759699624531,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5356480699024531,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
import sys
import logging
import logging.handlers
from synergy.conf import settings
from synergy.conf import context
from synergy.db.model.daemon_process_entry import DaemonProcessEntry
from synergy.db.model.freerun_process_entry import FreerunProcessEntry
from synergy.db.model.managed_process_entry import ManagedProcessEntry
class Logger(object):
"""
Logger presents standard API to log messages and store them for future analysis
"""
def __init__(self, file_name, log_tag, append_to_console):
"""
Constructor: dictionary of loggers available for this Python process
:param file_name: path+name of the output file
:param log_tag: tag that is printed ahead of every logged message
:param append_to_console: True if messages should be printed to the terminal console
"""
self.logger = logging.getLogger(log_tag)
if append_to_console:
# ATTENTION: while running as stand-alone process, stdout and stderr must be muted and redirected to file
# otherwise the their pipes get overfilled, and process halts
stream_handler = logging.StreamHandler()
stream_formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
stream_handler.setFormatter(stream_formatter)
self.logger.addHandler(stream_handler)
else:
# While under_test, tools as xml_unittest_runner are doing complex sys.stdXXX reassignments
sys.stdout = self
sys.stderr = self
if settings.settings['debug']:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
# ROTO FILE HANDLER:
roto_file_handler = logging.handlers.RotatingFileHandler(file_name, maxBytes=2097152, backupCount=10)
roto_file_formatter = logging.Formatter(fmt='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
roto_file_handler.setFormatter(roto_file_formatter)
self.logger.addHandler(roto_file_handler)
def get_logger(self):
return self.logger
def write(self, msg, level=logging.INFO):
""" method implements stream write interface, allowing to redirect stdout to logger """
if msg is not None and len(msg.strip()) > 0:
self.logger.log(level, msg)
def flush(self):
""" method implements stream flush interface, allowing to redirect stdout to logger """
for handler in self.logger.handlers:
handler.flush()
def isatty(self):
""" is the sys.stdout attached to the terminal?
python -c "import sys; print(sys.stdout.isatty())" (should write True)
python -c "import sys; print(sys.stdout.isatty())" | grep . (should write False).
:return: False, indicating that the output is pipped or redirected
"""
return False
# holds Logger instance per process name (and optional suffix)
logger_pool = dict()
def get_logger(process_name, append_to_console=settings.settings['under_test']):
""" method returns initiated logger"""
if process_name not in logger_pool:
file_name = get_log_filename(process_name)
log_tag = get_log_tag(process_name)
logger_pool[process_name] = Logger(file_name, log_tag, append_to_console=append_to_console)
return logger_pool[process_name].get_logger()
def get_log_filename(process_name):
"""method returns path for the Log filename"""
return settings.settings['log_directory'] + context.process_context[process_name].log_filename
def get_log_tag(process_name):
"""method returns tag that all messages will be preceded with"""
process_obj = context.process_context[process_name]
if isinstance(process_obj, FreerunProcessEntry):
return str(process_obj.token)
elif isinstance(process_obj, ManagedProcessEntry):
return str(process_obj.token) + str(process_obj.time_qualifier)
elif isinstance(process_obj, DaemonProcessEntry):
return str(process_obj.token)
else:
raise ValueError('Unknown process type: %s' % process_obj.__class__.__name__)
if __name__ == '__main__':
process_name = 'TestAggregator'
logger = get_logger(process_name)
logger.info('test_message')
print('regular print message')
sys.stdout.flush()
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/system/data_logging.py",
"copies": "1",
"size": "4441",
"license": "bsd-3-clause",
"hash": 3974238331341114400,
"line_mean": 39.3727272727,
"line_max": 117,
"alpha_frac": 0.6692186444,
"autogenerated": false,
"ratio": 4.037272727272727,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5206491371672727,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
import sys
import time
import functools
import traceback
def thread_safe(method):
""" wraps method with lock acquire/release cycle
decorator requires class instance to have field self.lock of type threading.Lock or threading.RLock """
@functools.wraps(method)
def _locker(self, *args, **kwargs):
assert hasattr(self, 'lock'), \
'thread_safe decorator applied to method {0}.{1}: missing required field {0}.lock'\
.format(self.__class__.__name__, method.__name__)
try:
self.lock.acquire()
return method(self, *args, **kwargs)
finally:
try:
self.lock.release()
except:
sys.stderr.write(f'Exception on releasing lock at method {method.__name__}')
traceback.print_exc(file=sys.stderr)
return _locker
def with_reconnect(func):
"""
Handle when AutoReconnect is raised from pymongo. This is the standard error
raised for everything from "host disconnected" to "couldn't connect to host"
and more.
The sleep handles the edge case when the state of a replica set changes, and
the cursor raises AutoReconnect because the master may have changed. It can
take some time for the replica set to stop raising this exception, and the
small sleep and iteration count gives us a couple of seconds before we fail
completely.
"""
from pymongo.errors import AutoReconnect
@functools.wraps(func)
def _reconnector(*args, **kwargs):
e = None
for _ in range(20):
try:
return func(*args, **kwargs)
except AutoReconnect as ar:
time.sleep(0.250)
e = ar
if e:
raise e
return _reconnector
def singleton(cls):
"""
turns class to singleton
:param cls: class itself
:return: function that either creates new instance of the class or returns existing one
"""
# the only way to implement nonlocal closure variables in Python 2.X
instances = {}
def get_instance():
if cls not in instances:
instances[cls] = cls()
return instances[cls]
return get_instance
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/system/decorator.py",
"copies": "1",
"size": "2260",
"license": "bsd-3-clause",
"hash": -1958881657771678700,
"line_mean": 28.7368421053,
"line_max": 108,
"alpha_frac": 0.6216814159,
"autogenerated": false,
"ratio": 4.431372549019608,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016875035397334673,
"num_lines": 76
} |
__author__ = 'Bohdan Mushkevych'
import sys
import types
from six import class_types
from synergy.conf import context
def get_class(kls):
"""
:param kls - string of fully identified starter function or starter method path
for instance:
- workers.abstract_worker.AbstractWorker.start
- workers.example_script_worker.main
:return tuple (type, object, starter)
for instance:
- (FunctionType, <function_main>, None)
- (type, <Class_...>, 'start')
"""
parts = kls.split('.')
try:
# First, try to import module hosting starter function
fqn_module = '.'.join(parts[:-1])
m = __import__(fqn_module)
except ImportError:
# Alternatively, try to import module hosting Class with a starter method
fqn_module = '.'.join(parts[:-2])
m = __import__(fqn_module)
t = None
starter = None
for i in range(1, len(parts)):
comp = parts[i]
starter = parts[i:]
m = getattr(m, comp)
if isinstance(m, class_types):
t = type
starter = None if len(parts[i:]) == 1 else '.'.join(parts[i + 1:])
break
if isinstance(m, types.FunctionType):
t = types.FunctionType
starter = None
break
return t, m, starter
def start_by_process_name(process_name, *args):
"""
Function starts the process by:
1. retrieving its fully specified path name
2. if the path name ends with starter method - then creates an instance of the wrapping class
and calls <code>starter(*args)</code> method on it
3. if the path name ends with starter function - then retrieves its module
and calls <code>starter(*args)</code> function on it
"""
sys.stdout.write('INFO: Starter path {0} \n'.format(context.process_context[process_name].classname))
t, m, starter = get_class(context.process_context[process_name].classname)
if isinstance(m, class_types):
sys.stdout.write('INFO: Starting process by calling starter method {0} \n'.format(starter))
instance = m(process_name)
method = getattr(instance, starter)
method(*args)
elif isinstance(m, types.FunctionType):
sys.stdout.write('INFO: Starting module.\n')
_function = m
_function(*args)
else:
raise ValueError('Improper starter path {0}'.format(context.process_context[process_name].classname))
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.stderr.write('ERROR: no Process Name specified to start \n')
elif len(sys.argv) == 2:
process_name = sys.argv[1]
start_by_process_name(process_name, None)
else:
process_name = sys.argv[1]
args = sys.argv[2:]
start_by_process_name(process_name, args)
| {
"repo_name": "mushkevych/scheduler",
"path": "process_starter.py",
"copies": "1",
"size": "2812",
"license": "bsd-3-clause",
"hash": -2038598630955410400,
"line_mean": 32.8795180723,
"line_max": 109,
"alpha_frac": 0.6173541963,
"autogenerated": false,
"ratio": 3.7493333333333334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48666875296333334,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
import sys
import types
from six import class_types
from system.process_context import ProcessContext
def get_class(kls):
"""
:param kls - string of fully identified starter function or starter method path
for instance:
- workers.abstract_worker.AbstractWorker.start
- workers.example_script_worker.main
:return tuple (type, object, starter)
for instance:
- (FunctionType, <function_main>, None)
- (type, <Class_...>, 'start')
"""
parts = kls.split('.')
try:
# First, try to import module hosting starter function
module = '.'.join(parts[:-1])
m = __import__(module)
except ImportError:
# Alternatively, try to import module hosting Class with a starter method
module = '.'.join(parts[:-2])
m = __import__(module)
t = None
starter = None
for i in range(1, len(parts)):
comp = parts[i]
starter = parts[i:]
m = getattr(m, comp)
if isinstance(m, class_types):
t = type
starter = None if len(parts[i:]) == 1 else '.'.join(parts[i + 1:])
break
if isinstance(m, types.FunctionType):
t = types.FunctionType
starter = None
break
return t, m, starter
def start_by_process_name(process_name, *args):
"""
Function starts the process by:
1. retrieving its fully specified path name
2. if the path name ends with starter method - then creates an instance of the wrapping class
and calls <code>starter(*args)</code> method on it
3. if the path name ends with starter function - then retrieves its module
and calls <code>starter(*args)</code> function on it
"""
sys.stdout.write('INFO: Starter path {0} \n'.format(ProcessContext.get_classname(process_name)))
t, m, starter = get_class(ProcessContext.get_classname(process_name))
if isinstance(m, class_types):
sys.stdout.write('INFO: Starting process by calling starter method {0} \n'.format(starter))
instance = m(process_name)
method = getattr(instance, starter)
method(*args)
elif isinstance(m, types.FunctionType):
sys.stdout.write('INFO: Starting module.\n')
function = m
function(*args)
else:
raise ValueError('Improper starter path {0}'.format(ProcessContext.get_classname(process_name)))
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.stderr.write('ERROR: no Process Name specified to start \n')
elif len(sys.argv) == 2:
process_name = sys.argv[1]
start_by_process_name(process_name, None)
else:
process_name = sys.argv[1]
args = sys.argv[2:]
start_by_process_name(process_name, args)
| {
"repo_name": "mushkevych/launch.py",
"path": "process_starter.py",
"copies": "1",
"size": "2796",
"license": "bsd-3-clause",
"hash": -4981911235537339000,
"line_mean": 32.686746988,
"line_max": 104,
"alpha_frac": 0.6180257511,
"autogenerated": false,
"ratio": 3.804081632653061,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9918268301517432,
"avg_score": 0.0007678164471259506,
"num_lines": 83
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.