text stringlengths 0 1.05M | meta dict |
|---|---|
__author__ = 'bkeroack'
from types import FunctionType
import functools
import logging
import collections
import type_check
def log_wrapper(func):
@functools.wraps(func)
def log(*args, **kwargs):
logging.debug("CALLING: {} (args: {}, kwargs: {}".format(func.__name__, args, kwargs))
ret = func(*args, **kwargs)
logging.debug("{} returned: {}".format(func.__name__, ret))
return ret
return log
class LoggingMetaClass(type):
def __new__(mcs, classname, bases, class_dict):
new_class_dict = dict()
for attr_name, attr in class_dict.items():
new_class_dict[attr_name] = log_wrapper(attr) if type(attr) == FunctionType else attr
return type.__new__(mcs, classname, bases, new_class_dict)
def change_dict_keys(obj, char, rep):
'''
Recursively replaces char in nested dict keys with rep (for sanitizing input to mongo, for example)
Modifies object in place! Returns None.
'''
new_obj = obj
for k in new_obj:
if isinstance(k, list):
change_dict_keys(k, char, rep)
if isinstance(obj, dict):
if isinstance(obj[k], dict):
change_dict_keys(obj[k], char, rep)
if char in k:
obj[k.replace(char, rep)] = obj[k]
del obj[k]
def split_seq(seq, n):
'''split iterable into n number of equal-length (approx) chunks'''
newseq = []
splitsize = 1.0/n*len(seq)
for i in range(n):
ns = seq[int(round(i*splitsize)):int(round((i+1)*splitsize))]
if len(ns) > 0:
newseq.append(ns)
return newseq
def set_dict_key(obj, path, value):
'''
In the dict-like obj (assumed to be a nested set of dicts), walk path and insert value.
For example,
obj = root_tree
path = ('app', 'myapp', 'builds')
value = { [build_reference_doc] }
'''
for k in path[:-1]:
obj = obj.setdefault(k, {})
obj[path[-1]] = value
def paths_from_nested_dict(dict_obj, path=None):
'''
Given an arbitrarily-nested dict-like object, generate a list of unique tree path tuples.
The last object in any path will be the deepest leaf value in that path.
Ex:
dict_obj = {
'a': {
0: 1,
1: 2
},
'b': {
'foo': 'bar'
}
}
returns:
[
('a', 0, 1),
('a', 1, 2),
('b', 'foo', 'bar')
]
@type dict_obj: dict
@type path: list
'''
assert not path or hasattr(path, '__getitem__')
assert type_check.is_dictlike(dict_obj)
assert not path or isinstance(path, list)
path = path if path else list()
unique_paths = list()
for i, item in enumerate(dict_obj.iteritems()):
if type_check.is_dictlike(item[1]):
for nested_item in paths_from_nested_dict(item[1], path=path+[item[0]]):
unique_paths.append(nested_item)
else:
unique_paths.append(tuple(path + [item[0]] + [item[1]]))
return unique_paths
def flatten(list_obj):
'''
Given a nested n-dimensional list, return a flattened list
'''
assert list_obj
assert isinstance(list_obj, collections.Iterable)
for item in list_obj:
if isinstance(item, collections.Iterable) and not type_check.is_string(item):
for x in flatten(item):
yield x
else:
yield item
def flatten_list(list_obj):
'''
Above without having to iterate
'''
return [x for x in flatten(list_obj)]
| {
"repo_name": "bkeroack/elita",
"path": "elita/util/__init__.py",
"copies": "1",
"size": "3592",
"license": "apache-2.0",
"hash": 2348633151828624000,
"line_mean": 27.9677419355,
"line_max": 103,
"alpha_frac": 0.5662583519,
"autogenerated": false,
"ratio": 3.617321248741188,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9671180544049669,
"avg_score": 0.00247981131830369,
"num_lines": 124
} |
__author__ = 'bkeroack'
import deployment.salt_control
import deployment.sshconfig
def setup_new_server(datasvc, name):
'''
Do any initial server setup so it works for elita.
Initially we just check if it's a Windows server and if so set up the git wrapper. At some point maybe we should
verify that git is available (for all platforms) and whatever else is needed.
'''
ss = ServerSetup(datasvc)
ss.new(name)
class ServerSetup:
'''
Initialization routines for new servers.
'''
def __init__(self, datasvc):
'''
@type datasvc: elita.models.DataService
'''
self.datasvc = datasvc
def set_server_type(self, rc, name):
os_type = rc.get_os_text(name)
self.datasvc.serversvc.UpdateServer(name, {'server_type': os_type})
return os_type
def set_server_status(self, rc, name):
if rc.sc.verify_connectivity(name):
status = "ok"
else:
status = "not connectable (check salt)"
self.datasvc.serversvc.UpdateServer(name, {'status': status})
return status == "ok"
def do_server_setup(self, rc, name, os_type):
#create key/config dirs
sshc = deployment.sshconfig.SSHController()
sshc.create_key_dir(rc, [name])
if os_type == "windows":
# push git wrapper script. run it.
# make sure git works
wrapper_setup_path = "C:/git_wrapper_setup.ps1"
rc.rm_file_if_exists([name], wrapper_setup_path)
rc.push_files([name], {"salt://elita/files/win/git_wrapper_setup.ps1": wrapper_setup_path})
rc.run_powershell_script([name], wrapper_setup_path)
if 'not recognized as an internal or external command' in rc.run_shell_command([name], 'git')[name]:
status = 'git not available'
else:
self.datasvc.jobsvc.NewJobData({'note': 'IMPORTANT!! For the Windows git wrapper script to take effect '
'(necessary for key/hostname management), you *must* either '
'reboot or restart salt-minion at a minimum! Any git operation '
'will fail until this happens.'})
status = 'ok, initialized'
else:
status = 'ok, initialized'
self.datasvc.serversvc.UpdateServer(name, {'status': status})
def new(self, name):
sc = deployment.salt_control.SaltController(self.datasvc)
rc = deployment.salt_control.RemoteCommands(sc)
ost = self.set_server_type(rc, name)
if self.set_server_status(rc, name):
self.do_server_setup(rc, name, ost)
| {
"repo_name": "bkeroack/elita",
"path": "elita/servers.py",
"copies": "1",
"size": "2777",
"license": "apache-2.0",
"hash": -5600264720181478000,
"line_mean": 38.6714285714,
"line_max": 120,
"alpha_frac": 0.5819229384,
"autogenerated": false,
"ratio": 3.9785100286532953,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5060432967053295,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bkeroack'
import fnmatch
import logging
import elita.util
from elita.dataservice.models import User
class ValidatePermissionsObject:
__metaclass__ = elita.util.LoggingMetaClass
def __init__(self, permissions):
self.permissions = permissions
def verify_top_dict(self):
return isinstance(self.permissions, dict)
def verify_top_keys(self):
if self.permissions:
for t in self.permissions:
try:
assert (t == "apps") or (t == "actions") or (t == "servers")
except AssertionError:
return False
return True
else:
return False
def verify_perm_keys(self):
#perm keys can be basically anything
return True
def verify_iterable(self):
for t in self.permissions:
for p in self.permissions[t]:
try:
_ = (e for e in p) # check if iterable
except TypeError:
return False
return True
def run(self):
if self.verify_top_dict():
if self.verify_top_keys():
if self.verify_perm_keys():
if self.verify_iterable():
return True
return False
class UserPermissions:
__metaclass__ = elita.util.LoggingMetaClass
def __init__(self, usersvc, token, datasvc=None):
self.usersvc = usersvc
self.token = token
self.userobj = False
self.valid_token = False
self.datasvc = datasvc
self.username = ""
if self.validate_token():
self.valid_token = True
logging.debug("valid token")
self.username = self.usersvc.GetUserFromToken(token)
logging.debug("username: {}".format(self.username))
else:
logging.debug("INVALID token")
def validate_token(self):
return self.token in self.usersvc.GetAllTokens()
def get_allowed_apps(self, username=None):
if not username:
username = self.username
user = self.usersvc.GetUser(username)
assert self.datasvc is not None
assert 'permissions' in user
permissions = user['permissions']
apps = self.datasvc.appsvc.GetApplications()
apps.append('_global') #not returned by GetApplications()
logging.debug("get_allowed_apps: username: {}".format(username))
raw_app_list = [(fnmatch.filter(apps, a),
permissions['apps'][a]) for a in permissions['apps']]
logging.debug("get_allowed_apps: raw_app_list: {}".format(raw_app_list))
perms_dict = dict()
for l in raw_app_list: # coalesce the list, really wish this could be a dict comprehension
perm = l[1]
apps = l[0]
if perm in perms_dict:
perms_dict[perm].update(apps)
else:
perms_dict[perm] = set(apps)
return {k: list(perms_dict[k]) for k in perms_dict}
def get_allowed_app_list(self, permission="read", username=None):
if not username:
username = self.username
allowed_apps = self.get_allowed_apps(username=username)
output = set()
for k in allowed_apps:
if permission in k:
[output.add(a) for a in allowed_apps[k]]
return list(output)
def get_allowed_actions(self, username):
'''Returns list of tuples: (appname, actionname). If present, 'execute' permission is implicit'''
user = self.usersvc.GetUser(username)
assert self.datasvc is not None
assert 'permissions' in user
permissions = user['permissions']
logging.debug("get_allowed_actions: username: {}".format(username))
allowed_actions = dict()
for a in permissions['actions']:
app_list = fnmatch.filter(self.datasvc.appsvc.GetApplications(), a)
for app in app_list:
app_actions_allowed = set()
for action_pattern in permissions['actions'][a]:
app_actions_allowed.update(tuple(fnmatch.filter(self.datasvc.jobsvc.GetAllActions(app),
action_pattern)))
allowed_actions[app] = list(app_actions_allowed)
return allowed_actions
def get_allowed_servers(self, username):
'''Returns list'''
user = self.usersvc.GetUser(username)
assert self.datasvc is not None
assert 'permissions' in user
assert 'servers' in user['permissions']
logging.debug("get_allowed_servers: username: {}".format(username))
servers = self.datasvc.serversvc.GetServers()
return ([fnmatch.filter(servers, s) for s in user['permissions']['servers']])
def get_action_permissions(self, app, action):
logging.debug("get_action_permissions: {}: {}".format(app, action))
if self.valid_token and self.username in self.usersvc.GetUsers():
user = self.usersvc.GetUser(self.username)
assert 'permissions' in user
permissions = user['permissions']
if self.username == 'admin':
logging.debug("returning admin permissions")
return "execute"
if app in permissions['actions']:
logging.debug("{} in permissions['actions']".format(app))
if action in permissions['actions'][app]:
return permissions['actions'][app][action]
if '*' in permissions['actions'][app]:
return permissions['actions'][app]['*']
if "*" in permissions['actions']:
logging.debug("* in permissions['actions']")
if action in permissions['actions']['*']:
return permissions['actions']['*'][action]
if '*' in permissions['actions']['*']:
return permissions['actions']['*']['*']
logging.debug("returning deny")
return "deny"
def get_app_permissions(self, app):
logging.debug("get_permissions: app: {}".format(app))
if self.valid_token and self.username in self.usersvc.GetUsers():
user = self.usersvc.GetUser(self.username)
if user['username'] == 'admin':
logging.debug("returning admin permissions")
return "read;write"
elif "*" in user['permissions']['apps']:
logging.debug("found wildcard perms")
return user['permissions']['apps']['*']
elif app in user['permissions']['apps']:
logging.debug("returning perms: {}".format(user['permissions']['apps'][app]))
return user['permissions']['apps'][app]
logging.debug("invalid user or token: {}; {}".format(self.username, self.token))
return ""
def validate_pw(self, username, password):
userobj = User(self.usersvc.GetUser(username))
return userobj.validate_password(password)
| {
"repo_name": "bkeroack/elita",
"path": "elita/auth.py",
"copies": "1",
"size": "7103",
"license": "apache-2.0",
"hash": -6390413687966621000,
"line_mean": 39.1299435028,
"line_max": 107,
"alpha_frac": 0.570322399,
"autogenerated": false,
"ratio": 4.495569620253165,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5565892019253165,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bkeroack'
import logging
import bson
import collections
import elita.util
import models
DEFAULT_ADMIN_USERNAME = 'admin'
DEFAULT_ADMIN_PASSWORD = 'elita'
DEFAULT_ADMIN_PERMISSIONS = {
'apps': {
'*': 'read/write',
'_global': 'read/write'
},
'actions': {
'*': {
'*': 'execute'
}
},
'servers': ['*']
}
class DataValidator:
'''
Responsible for:
- Creating proper data model/root_tree on first run
- Validating that data isn't broken/inconsistent
- Running migrations between versions
'''
__metaclass__ = elita.util.LoggingMetaClass
def __init__(self, settings, root, db):
'''
@type settings: pyramid.registry.Registry
@type root: dict
@type db: pymongo.database.Database
'''
# root is optional if this is the first time elita runs
assert settings and db
self.settings = settings
self.root = root
self.db = db
def run(self):
# order is very significant
logging.debug("running")
self.check_root()
self.check_toplevel()
self.check_global()
self.check_root_references()
self.check_users()
self.check_user_permissions()
self.check_doc_consistency()
self.check_containers()
self.check_apps()
self.check_jobs()
self.check_deployments()
self.check_servers()
self.check_gitdeploys()
self.check_gitrepos()
self.check_groups()
self.SaveRoot()
def SaveRoot(self):
logging.debug('saving root_tree')
self.db['root_tree'].save(self.root)
def NewContainer(self, class_name, name, parent):
cdoc = self.db['containers'].insert({'_class': class_name,
'name': name,
'parent': parent})
return bson.DBRef('containers', cdoc)
def check_doc_consistency(self):
#enforce collection rules and verify connectivity b/w objects and root tree
collects = [
'applications',
'builds',
'gitproviders',
'gitrepos',
'gitdeploys',
'jobs',
'tokens',
'users',
'servers',
'groups',
'packagemaps'
]
for c in collects:
rm_docs = list()
for d in self.db[c].find():
if '_class' not in d:
logging.warning("class specification not found in object {} from collection {}"
.format(d['_id'], c))
logging.debug("...deleting malformed object")
self.db[c].remove({'_id': d['_id']})
if c == 'applications':
if d['app_name'] not in self.root['app']:
logging.warning("orphan application object: '{}', adding to root tree".format(d['app_name']))
self.root['app'][d['app_name']] = {'_doc': bson.DBRef('applications', d['_id'])}
if self.root['app'][d['app_name']]['_doc'].id != d['_id']:
logging.warning("unlinked application document found, deleting")
rm_docs.append(d['_id'])
if c == 'builds':
if d['build_name'] not in self.root['app'][d['app_name']]['builds']:
logging.warning("orphan build object: '{}/{}', adding to root tree".format(d['app_name'], d['build_name']))
self.root['app'][d['app_name']]['builds'][d['build_name']] = {'_doc': bson.DBRef('builds', d['_id'])}
if self.root['app'][d['app_name']]['builds'][d['build_name']]['_doc'].id != d['_id']:
logging.warning("unlinked build document found, deleting")
rm_docs.append(d['_id'])
if c == 'gitproviders':
if d['name'] not in self.root['global']['gitproviders']:
logging.warning("orphan gitprovider object: '{}', adding to root tree".format(d['name']))
self.root['global']['gitproviders'][d['name']] = {'_doc': bson.DBRef('gitproviders', d['_id'])}
if self.root['global']['gitproviders'][d['name']]['_doc'].id != d['_id']:
logging.warning("unlinked gitprovider document found, deleting")
rm_docs.append(d['_id'])
if c == 'gitrepos':
if d['name'] not in self.root['app'][d['application']]['gitrepos']:
logging.warning("orphan gitrepo object: '{}/{}', adding to root tree".format(d['application'], d['name']))
self.root['app'][d['application']]['gitrepos'][d['name']] = {'_doc': bson.DBRef('gitrepos', d['_id'])}
if self.root['app'][d['application']]['gitrepos'][d['name']]['_doc'].id != d['_id']:
logging.warning("unlinked gitrepo document found, deleting")
rm_docs.append(d['_id'])
if c == 'gitdeploys':
if d['name'] not in self.root['app'][d['application']]['gitdeploys']:
logging.warning("orphan gitdeploy object: '{}/{}', adding to root tree".format(d['application'], d['name']))
self.root['app'][d['application']]['gitdeploys'][d['name']] = {'_doc': bson.DBRef('gitdeploys', d['_id'])}
if self.root['app'][d['application']]['gitdeploys'][d['name']]['_doc'].id != d['_id']:
logging.warning("unlinked gitdeploy document found, deleting")
rm_docs.append(d['_id'])
if c == 'jobs':
if d['job_id'] not in self.root['job']:
logging.warning("orphan job object: '{}', adding to root tree".format(d['job_id']))
self.root['job'][d['job_id']] = {'_doc': bson.DBRef('jobs', d['_id'])}
#we don't really care about unlinked jobs
if c == 'tokens':
if d['token'] not in self.root['global']['tokens']:
logging.warning("orphan token object: '{}', adding to root tree".format(d['token']))
self.root['global']['tokens'][d['token']] = {'_doc': bson.DBRef('tokens', d['_id'])}
if self.root['global']['tokens'][d['token']]['_doc'].id != d['_id']:
logging.warning("unlinked token document found, deleting")
rm_docs.append(d['_id'])
if c == 'users':
if d['username'] not in self.root['global']['users']:
logging.warning("orphan user object: '{}', adding to root tree".format(d['username']))
self.root['global']['users'][d['username']] = {'_doc': bson.DBRef('users', d['_id'])}
if self.root['global']['users'][d['username']]['_doc'].id != d['_id']:
logging.warning("unlinked user document found ({}), deleting".format(d['username']))
rm_docs.append(d['_id'])
if c == 'servers':
if d['name'] not in self.root['server']:
logging.warning("orphan server object: '{}', adding to root tree".format(d['name']))
self.root['server'][d['name']] = {'_doc': bson.DBRef('servers', d['_id'])}
if self.root['server'][d['name']]['_doc'].id != d['_id']:
logging.warning("unlinked server document found, deleting")
rm_docs.append(d['_id'])
if c == 'groups':
if d['name'] not in self.root['app'][d['application']]['groups']:
logging.warning("orphan group object: '{}', adding to root tree".format(d['name']))
self.root['app'][d['application']]['groups'][d['name']] = {'_doc': bson.DBRef('groups', d['_id'])}
if self.root['app'][d['application']]['groups'][d['name']]['_doc'].id != d['_id']:
logging.warning("unlinked group document found ({}), deleting".format(d['name']))
rm_docs.append(d['_id'])
if c == 'packagemaps':
if d['name'] not in self.root['app'][d['application']]['packagemaps']:
logging.warning("orphan packagemap object: '{}', adding to root tree".format(d['name']))
self.root['app'][d['application']]['packagemaps'][d['name']] = {'_doc': bson.DBRef('packagemaps', d['_id'])}
if self.root['app'][d['application']]['packagemaps'][d['name']]['_doc'].id != d['_id']:
logging.warning("unlinked packagemap document found ({}), deleting".format(d['name']))
rm_docs.append(d['_id'])
for id in rm_docs:
self.db[c].remove({'_id': id})
@staticmethod
def check_root_refs(db, obj):
'''
Recurse into obj, find DBRefs, check if they are valid
'''
for k in obj:
if k == '_doc':
if not db.dereference(obj[k]):
logging.warning("Invalid dbref found! {}".format(obj[k].collection))
elif k[0] != '_' and not isinstance(obj[k], bson.ObjectId):
DataValidator.check_root_refs(db, obj[k])
def check_root_references(self):
'''
Check that every DBRef in the root tree points to a valid document
'''
DataValidator.check_root_refs(self.db, self.root)
def check_global(self):
global_levels = {
'users': {
'class': "UserContainer"
},
'tokens': {
'class': "TokenContainer"
},
'gitproviders': {
'class': "GitProviderContainer"
},
'keypairs': {
'class': "KeyPairContainer"
}
}
for l in global_levels:
if l not in self.root['global']:
logging.warning("'{}' not found under global".format(l))
self.root['global'][l] = dict()
self.root['global'][l]['_doc'] = self.NewContainer(global_levels[l]['class'], l, "")
if DEFAULT_ADMIN_USERNAME not in self.root['global']['users']:
logging.warning("admin user not found")
users = [u for u in self.db['users'].find()]
if DEFAULT_ADMIN_USERNAME in [u['username'] for u in users]:
admin = self.db['users'].find_one({'username': DEFAULT_ADMIN_USERNAME})
assert admin
self.root['global']['users'][DEFAULT_ADMIN_USERNAME] = {
'_doc': bson.DBRef('users', admin['_id'])
}
def check_users(self):
for u in self.root['global']['users']:
if u != '_doc':
if "permissions" not in self.root['global']['users'][u]:
logging.warning("permissions container object not found under user {} in root tree; "
"fixing".format(u))
pid = self.db['userpermissions'].insert({
"_class": "UserPermissions",
"username": u,
"applications": list(),
"actions": dict(),
"servers": list()
})
self.root['global']['users'][u]['permissions'] = {
'_doc': bson.DBRef('userpermissions', pid)
}
users = [u for u in self.db['users'].find()]
for u in users:
if 'username' not in u:
logging.warning("user found without username property; fixing {}".format(u['_id']))
if 'name' in u:
u['username'] = u['name']
self.db['users'].save(u)
else:
logging.warning("...couldn't fix user because name field not found!")
if DEFAULT_ADMIN_USERNAME not in [u['username'] for u in users]:
logging.warning("admin user document not found; creating")
userobj = models.User({
'username': DEFAULT_ADMIN_USERNAME,
'permissions': DEFAULT_ADMIN_PERMISSIONS,
'password': DEFAULT_ADMIN_PASSWORD
})
doc = userobj.get_doc()
doc['_class'] = 'User'
uid = self.db['users'].insert(doc)
pid = self.db['userpermissions'].insert({
"_class": "UserPermissions",
"username": DEFAULT_ADMIN_USERNAME,
"applications": list(),
"actions": dict(),
"servers": list()
})
self.root['global']['users'][DEFAULT_ADMIN_USERNAME] = {
'_doc': bson.DBRef('users', uid),
'permissions': {
'_doc': bson.DBRef('userpermissions', pid)
}
}
def check_user_permissions(self):
for u in self.db['users'].find():
if 'permissions' not in u:
logging.warning("permissions object not found for user {}; fixing with empty obj"
.format(u['username']))
u['permissions'] = {
'apps': {},
'actions': {},
'servers': []
}
for o in ('apps', 'actions', 'servers'):
if o not in u['permissions']:
logging.warning("{} not found in permissions object for user {}; fixing with empty "
"obj".format(o, u['username']))
u['permissions'][o] = list() if o == 'servers' else dict()
if not isinstance(u['permissions']['servers'], list):
logging.warning("servers key under permissions object for user {} is not a list; fixing"
.format(u['username']))
u['permissions']['servers'] = list(u['permissions']['servers'])
for o in ('apps', 'actions'):
if not isinstance(u['permissions'][o], dict):
logging.warning("{} key under permissions object for user {} is not a dict; invalid "
"so replacing with empty dict".format(o, u['username']))
u['permissions'][o] = dict()
self.db['users'].save(u)
def check_jobs(self):
djs = list()
for j in self.root['job']:
if j[0] != '_':
if self.db.dereference(self.root['job'][j]['_doc']) is None:
logging.warning("found dangling job ref in root tree: {}; deleting".format(j))
djs.append(j)
for j in djs:
del self.root['job'][j]
job_fixlist = list()
for doc in self.db['jobs'].find():
if doc['job_id'] not in self.root['job']:
job_id = doc['job_id']
logging.warning("found orphan job: {}; adding to root tree".format(doc['job_id']))
self.root['job'][job_id] = {'_doc': bson.DBRef('jobs', doc['_id'])}
for k in ('job_type', 'data', 'name'):
if k not in doc:
logging.warning("found job without {}: {}; adding blank".format(k, doc['job_id']))
doc[k] = ""
job_fixlist.append(doc)
for d in job_fixlist:
self.db['jobs'].save(d)
def check_apps(self):
app_sublevels = {
'builds': {
'class': "BuildContainer"
},
'actions': {
'class': "ActionContainer"
},
'gitrepos': {
'class': "GitRepoContainer"
},
'gitdeploys': {
'class': "GitDeployContainer"
},
'deployments': {
'class': "DeploymentContainer"
},
'groups': {
'class': "GroupContainer"
},
'packagemaps': {
'class': "PackageMapContainer"
}
}
for a in self.root['app']:
if a[0] != '_':
if 'action' in self.root['app'][a]:
logging.warning("found old 'action' container under app {}; deleting".format(a))
del self.root['app'][a]['action']
for sl in app_sublevels:
if sl not in self.root['app'][a]:
logging.warning("'{}' not found under {}".format(sl, a))
self.root['app'][a][sl] = dict()
self.root['app'][a][sl]['_doc'] = self.NewContainer(app_sublevels[sl]['class'], sl, a)
d_o = list()
for o in self.root['app'][a][sl]:
if o[0] != '_' and '_doc' in self.root['app'][a][sl][o]:
if self.db.dereference(self.root['app'][a][sl][o]['_doc']) is None:
logging.warning("dangling {} reference '{}' in app {}; deleting".format(sl, o, a))
d_o.append(o)
for d in d_o:
del self.root['app'][a][sl][d]
def check_servers(self):
ds = list()
for s in self.root['server']:
if s[0] != '_':
if self.db.dereference(self.root['server'][s]['_doc']) is None:
logging.warning("found dangling server ref in root tree: {}; deleting".format(s))
ds.append(s)
for s in ds:
del self.root['server'][s]
update_list = list()
for d in self.db['servers'].find():
update = False
if 'environment' not in d:
logging.warning("environment field not found for server {}; fixing".format(d['_id']))
d['environment'] = ""
update = True
if 'status' not in d:
logging.warning("status field not found for server {}; fixing".format(d['_id']))
d['status'] = "ok"
update = True
if update:
update_list.append(d)
for d in update_list:
self.db['servers'].save(d)
def check_deployments(self):
update_list = list()
for d in self.db['deployments'].find():
if 'server_specs' in d:
logging.warning("found deployment with old-style server_specs object: {}; fixing".format(
d['_id']))
d['deployment'] = {
'servers': d['server_specs']['spec'],
'gitdeploys': d['server_specs']['gitdeploys']
}
update_list.append(d)
if 'job_id' not in d:
logging.warning("found deployment without job_id: {}; fixing with blank job".format(d[
'_id']))
d['job_id'] = ""
update_list.append(d)
if 'results' in d:
logging.warning("found deployment with old-style results field: {}; removing"
.format(d['_id']))
update_list.append(d)
if 'status' not in d:
logging.warning("found deployment without status: {}; fixing with blank".format(d['_id']))
d['status'] = ""
update_list.append(d)
if 'progress' not in d:
logging.warning("found deployment without progress: {}; fixing with empty dict".format(d['_id']))
d['progress'] = dict()
update_list.append(d)
if 'username' not in d:
logging.warning("found deployment without username: {}; fixing with empty string".format(d['_id']))
d['username'] = ""
update_list.append(d)
for d in update_list:
if 'server_specs' in d:
del d['server_specs']
if 'results' in d:
del d['results']
self.db['deployments'].save(d)
update_list = list()
for a in self.root['app']:
if a[0] != '_':
for d in self.root['app'][a]['deployments']:
if d[0] != '_':
doc = self.db.dereference(self.root['app'][a]['deployments'][d]['_doc'])
assert doc is not None
if 'name' not in doc:
logging.warning("name not found under deployment {}; fixing".format(d))
doc['name'] = d
update_list.append(doc)
if len(update_list) > 0:
for d in update_list:
self.db['deployments'].save(d)
def check_gitdeploys(self):
dlist = list()
fixlist = list()
for d in self.db['gitdeploys'].find():
delete = False
for k in ('application', 'name', 'package', 'location'):
if k not in d:
logging.warning("mandatory key '{}' not found under gitdeploy {}; removing".
format(k, d['_id']))
dlist.append(d['_id'])
delete = True
fix = False
if 'attributes' not in d and not delete:
logging.warning("attributes not found under gitdeploy {}; fixing".format(d['_id']))
d['attributes'] = dict()
fix = True
if 'actions' not in d and not delete:
logging.warning("actions not found under gitdeploy {}; fixing".format(d['_id']))
d['actions'] = {
'prepull': dict(),
'postpull': dict()
}
fix = True
if 'servers' not in d and not delete:
logging.warning("servers not found under gitdeploy {}; fixing".format(d['_id']))
d['servers'] = list()
fix = True
if 'server' in d:
logging.warning("gitdeploy found with obsolete server field {}; removing".format(d['_id']))
del d['server']
fix = True
if len([x for x, y in collections.Counter(d['servers']).items() if y > 1]) > 0 and not delete:
logging.warning("duplicate server entries found in gitdeploy {}; fixing".format(d['_id']))
d['servers'] = list(set(tuple(d['servers'])))
fix = True
if 'deployed_build' not in d:
logging.warning("deployed_build not found in gitdeploy {}".format(d['_id']))
gr_doc = self.db.dereference(d['location']['gitrepo'])
if not gr_doc or 'last_build' not in gr_doc:
logging.debug("...ERROR: referenced gitrepo not found! Aborting fix...")
else:
d['deployed_build'] = gr_doc['last_build']
fix = True
if fix:
fixlist.append(d)
for f in fixlist:
self.db['gitdeploys'].find_and_modify(query={'_id': f['_id']}, update=f, upsert=True,
new=True)
for dl in dlist:
self.db['gitdeploys'].remove({'_id': dl})
def check_gitrepos(self):
fixlist = list()
for d in self.db['gitrepos'].find():
if 'uri' in d:
if d['uri'] is not None and ':' in d['uri']:
logging.warning("found gitrepo URI with ':'; replacing with '/' ({})".format(d['name']))
d['uri'] = d['uri'].replace(':', '/')
fixlist.append(d)
else:
logging.warning("found gitrepo without URI ({}); adding empty field".format(d['name']))
d['uri'] = ""
fixlist.append(d)
if 'last_build' not in d:
logging.warning("found gitrepo without last_build: {}; adding empty field".format(d['name']))
d['last_build'] = None
fixlist.append(d)
for d in fixlist:
self.db['gitrepos'].save(d)
def check_groups(self):
fixlist = list()
for d in self.db['groups'].find():
fix = False
if 'description' not in d:
logging.warning("found group without description: {}; fixing".format(d['name']))
d['description'] = None
fix = True
if 'servers' in d:
logging.warning("found group with explicit server list: {}; removing".format(d['name']))
del d['servers']
fix = True
if 'rolling_deploy' not in d:
logging.warning("found group without rolling_deploy flag: {}; fixing".format(d['name']))
d['rolling_deploy'] = False
fix = True
if fix:
fixlist.append(d)
for d in fixlist:
self.db['groups'].save(d)
def check_toplevel(self):
top_levels = {
'app': {
'class': "ApplicationContainer"
},
'global': {
'class': "GlobalContainer"
},
'job': {
'class': 'JobContainer'
},
'server': {
'class': 'ServerContainer'
}
}
for tl in top_levels:
if tl not in self.root:
logging.warning("'{}' not found under root".format(tl))
self.root[tl] = dict()
self.root[tl]['_doc'] = self.NewContainer(top_levels[tl]['class'], tl, "")
if tl == 'server':
if "environments" not in self.root[tl] or isinstance(self.root[tl]['environments'], bson.DBRef):
logging.warning("environments endpoint not found under servers container; fixing")
eid = self.db['environments'].insert({
'_class': "Environment",
'environments': ""
})
self.root[tl]['environments'] = {
"_doc": bson.DBRef("environments", eid)
}
def check_containers(self):
update_list = list()
for c in self.db['containers'].find():
if c['_class'] == 'AppContainer':
logging.warning("found Application container with incorrect 'AppContainer' class; fixing")
c['_class'] = 'ApplicationContainer'
update_list.append(c)
for c in update_list:
self.db['containers'].save(c)
def check_root(self):
if not self.root:
logging.warning("no root_tree found!")
self.root = dict() if not self.root else self.root
if '_class' in self.root:
logging.warning("'_class' found in base of root; deleting")
del self.root['_class']
if '_doc' not in self.root:
logging.warning("'_doc' not found in base of root")
self.root['_doc'] = self.NewContainer('Root', 'Root', '')
rt_list = [d for d in self.db['root_tree'].find()]
if len(rt_list) > 1:
logging.warning("duplicate root_tree docs found! Removing all but the first")
for d in rt_list[1:]:
self.db['root_tree'].remove({'_id': d['_id']})
| {
"repo_name": "bkeroack/elita",
"path": "elita/dataservice/datavalidator.py",
"copies": "1",
"size": "27790",
"license": "apache-2.0",
"hash": 1252719993124415200,
"line_mean": 45.8634064081,
"line_max": 132,
"alpha_frac": 0.4686577906,
"autogenerated": false,
"ratio": 4.38743290179981,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5356090692399811,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bkeroack'
import logging
import collections
import pprint
class RootTree(collections.MutableMapping):
def __init__(self, db, updater, tree, doc, *args, **kwargs):
self.pp = pprint.PrettyPrinter(indent=4)
self.db = db
self.tree = tree
self.doc = doc
self.updater = updater
def is_action(self):
return self.doc and self.doc['_class'] == 'ActionContainer'
def __getitem__(self, key):
key = self.__keytransform__(key)
if self.is_action():
return self.tree[key]
if key in self.tree:
if key == '_doc':
return self.tree[key]
doc = self.db.dereference(self.tree[key]['_doc'])
if doc is None:
logging.debug("RootTree: __getitem__: {}: doc is None: KeyError".format(key))
raise KeyError
return RootTree(self.db, self.updater, self.tree[key], doc)
else:
logging.debug("RootTree: __getitem__: {}: key not in self.tree: KeyError".format(key))
raise KeyError
def __setitem__(self, key, value):
self.tree[key] = value
if not self.is_action(): # dynamically populated each request
pass
#self.updater.update()
def __delitem__(self, key):
del self.tree[key]
#self.updater.update()
return
def __iter__(self):
return iter(self.tree)
def __len__(self):
return len(self.tree)
def __keytransform__(self, key):
return key
class RootTreeUpdater:
'''
Vestigial remnant of code that was intended to make RootTree 'magic' like ZODB, saving values when added.
'''
def __init__(self, tree, db):
self.tree = tree
self.db = db
def clean_actions(self):
#actions can't be serialized into mongo
for a in self.tree['app']:
actions = list()
if a[0] != '_':
if "actions" in self.tree['app'][a]:
for ac in self.tree['app'][a]['actions']:
if ac[0] != '_':
actions.append(ac)
for action in actions:
del self.tree['app'][a]['actions'][action]
def update(self):
self.clean_actions()
root_tree = self.db['root_tree'].find_one()
self.db['root_tree'].update({"_id": root_tree['_id']}, self.tree)
| {
"repo_name": "bkeroack/elita",
"path": "elita/dataservice/root_tree.py",
"copies": "1",
"size": "2449",
"license": "apache-2.0",
"hash": -8585424347718574000,
"line_mean": 30.3974358974,
"line_max": 109,
"alpha_frac": 0.5300122499,
"autogenerated": false,
"ratio": 3.9756493506493507,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9994270666121018,
"avg_score": 0.002278186885666519,
"num_lines": 78
} |
__author__ = 'bkeroack'
import logging
import elita.util
import bson
class MongoService:
# logspam
#__metaclass__ = elita.util.LoggingMetaClass
def __init__(self, db):
'''
@type db = pymongo.database.Database
'''
assert db
self.db = db
def create_new(self, collection, keys, classname, doc, remove_existing=True):
'''
Creates new document in collection. Optionally, remove any existing according to keys (which specify how the
new document is unique)
Returns id of new document
'''
assert elita.util.type_check.is_string(collection)
assert elita.util.type_check.is_dictlike(keys)
assert elita.util.type_check.is_optional_str(classname)
assert elita.util.type_check.is_dictlike(doc)
assert collection
# keys/classname are only mandatory if remove_existing=True
assert (keys and classname and remove_existing) or not remove_existing
if classname:
doc['_class'] = classname
existing = None
if remove_existing:
existing = [d for d in self.db[collection].find(keys)]
for k in keys:
doc[k] = keys[k]
if '_id' in doc:
del doc['_id']
id = self.db[collection].save(doc, fsync=True)
logging.debug("new id: {}".format(id))
if existing and remove_existing:
logging.warning("create_new found existing docs! deleting...(collection: {}, keys: {})".format(collection, keys))
keys['_id'] = {'$ne': id}
self.db[collection].remove(keys)
return id
def modify(self, collection, keys, path, doc_or_obj):
'''
Modifies document with the keys in doc. Does so atomically but remember that any key will overwrite the existing
key.
doc_or_obj could be None, zero, etc.
Returns boolean indicating success
'''
assert hasattr(path, '__iter__')
assert path
assert elita.util.type_check.is_string(collection)
assert isinstance(keys, dict)
assert collection and keys
dlist = [d for d in self.db[collection].find(keys)]
assert dlist
canonical_id = dlist[0]['_id']
if len(dlist) > 1:
logging.warning("Found duplicate entries for query {} in collection {}; using the first and removing others"
.format(keys, collection))
keys['_id'] = {'$ne': canonical_id}
self.db[collection].remove(keys)
path_dot_notation = '.'.join(path)
result = self.db[collection].update({'_id': canonical_id}, {'$set': {path_dot_notation: doc_or_obj}}, fsync=True)
return result['n'] == 1 and result['updatedExisting'] and not result['err']
def save(self, collection, doc):
'''
Replace a document completely with a new one. Must have an '_id' field
'''
assert collection
assert elita.util.type_check.is_string(collection)
assert elita.util.type_check.is_dictlike(doc)
assert '_id' in doc
return self.db[collection].save(doc)
def delete(self, collection, keys):
'''
Drop a document from the collection
Return whatever pymongo returns for deletion
'''
assert elita.util.type_check.is_string(collection)
assert isinstance(keys, dict)
assert collection and keys
dlist = [d for d in self.db[collection].find(keys)]
assert dlist
if len(dlist) > 1:
logging.warning("Found duplicate entries for query {} in collection {}; removing all".format(keys,
collection))
return self.db[collection].remove(keys, fsync=True)
def update_roottree(self, path, collection, id, doc=None):
'''
Update the root tree at path [must be a tuple of indices: ('app', 'myapp', 'builds', '123-foo')] with DBRef
Optional doc can be passed in which will be inserted into the tree after adding DBRef field
Return boolean indicating success
'''
assert hasattr(path, '__iter__')
assert elita.util.type_check.is_string(collection)
assert id.__class__.__name__ == 'ObjectId'
assert elita.util.type_check.is_optional_dict(doc)
path_dot_notation = '.'.join(path)
root_tree_doc = doc if doc else {}
root_tree_doc['_doc'] = bson.DBRef(collection, id)
result = self.db['root_tree'].update({}, {'$set': {path_dot_notation: root_tree_doc}}, fsync=True)
return result['n'] == 1 and result['updatedExisting'] and not result['err']
def rm_roottree(self, path):
'''
Delete/remove the root_tree reference at path
'''
assert hasattr(path, '__iter__')
assert path
path_dot_notation = '.'.join(path)
result = self.db['root_tree'].update({}, {'$unset': {path_dot_notation: ''}}, fsync=True)
return result['n'] == 1 and result['updatedExisting'] and not result['err']
def get(self, collection, keys, multi=False, empty=False):
'''
Thin wrapper around find()
Retrieve a document from Mongo, keyed by name. Optionally, if duplicates are found, delete all but the first.
If empty, it's ok to return None if nothing matches
Returns document
@rtype: dict | list(dict) | None
'''
assert elita.util.type_check.is_string(collection)
assert isinstance(keys, dict)
assert collection
dlist = [d for d in self.db[collection].find(keys)]
assert dlist or empty
if len(dlist) > 1 and not multi:
logging.warning("Found duplicate entries ({}) for query {} in collection {}; dropping all but the first"
.format(len(dlist), keys, collection))
keys['_id'] = {'$ne': dlist[0]['_id']}
self.db[collection].remove(keys)
return dlist if multi else (dlist[0] if dlist else dlist)
def dereference(self, dbref):
'''
Simple wrapper around db.dereference()
Returns document pointed to by DBRef
@type id: bson.DBRef
'''
assert dbref
assert dbref.__class__.__name__ == 'DBRef'
return self.db.dereference(dbref)
| {
"repo_name": "bkeroack/elita",
"path": "elita/dataservice/mongo_service.py",
"copies": "1",
"size": "6422",
"license": "apache-2.0",
"hash": 7265936299309052000,
"line_mean": 39.1375,
"line_max": 125,
"alpha_frac": 0.5879788228,
"autogenerated": false,
"ratio": 4.167423750811162,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5255402573611162,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bkeroack'
import logging
import os
import shutil
import bson
import datetime
import pytz
import copy
import sys
import jsonpatch
import traceback
import elita.util
import elita.elita_exceptions
import models
from root_tree import RootTree
from mongo_service import MongoService
from elita.deployment.gitservice import EMBEDDED_YAML_DOT_REPLACEMENT
from elita.actions.action import ActionService
from elita.deployment import deploy, salt_control
class GenericChildDataService:
__metaclass__ = elita.util.LoggingMetaClass
def __init__(self, mongo_service, root, settings, job_id=None):
'''
@type mongo_service: MongoService
@type root: RootTree
@type settings: pyramid.registry.Registry
@type job_id: None | str
'''
assert isinstance(mongo_service, MongoService)
assert isinstance(root, RootTree)
assert elita.util.type_check.is_optional_str(job_id)
self.mongo_service = mongo_service
self.root = root
self.settings = settings
self.job_id = job_id
def populate_dependencies(self, dependency_objs):
'''
Child dataservice classes may need to access methods of siblings. This allows parent dataservice to inject
cross dependencies as needed without generating a big reference cycle.
dependency_objs = { 'FooDataService': FooDataService }
@type dependency_objs: dict
'''
assert elita.util.type_check.is_dictlike(dependency_objs)
self.deps = dependency_objs
def NewContainer(self, class_name, name, parent):
'''
Create new container object suitable for a root_tree reference
'''
assert class_name and name and parent
assert elita.util.type_check.is_string(class_name)
assert elita.util.type_check.is_string(name)
assert elita.util.type_check.is_string(parent)
return self.mongo_service.create_new('containers', {'name': name, 'parent': parent}, class_name,
{'name': name, 'parent': parent}, remove_existing=False)
def UpdateObjectFromPatch(self, collection, keys, patch):
'''
Generic method to update an object (document) with a JSON Patch document
'''
assert collection and keys and patch
assert elita.util.type_check.is_string(collection)
assert elita.util.type_check.is_dictlike(keys)
assert elita.util.type_check.is_seq(patch)
assert all([len(str(op["path"]).split('/')) > 1 for op in patch]) # well-formed path for every op
assert not any([str(op["path"]).split('/')[1][0] == '_' for op in patch]) # not trying to operate on internal fields
original_doc = self.mongo_service.get(collection, keys)
assert original_doc
result = jsonpatch.apply_patch(original_doc, patch)
self.mongo_service.save(collection, result)
def UpdateObject(self, collection, keys, doc):
'''
Generic method to update a particular object (document) with the data in doc
'''
assert collection and keys and doc
assert elita.util.type_check.is_string(collection)
assert elita.util.type_check.is_dictlike(keys)
assert elita.util.type_check.is_dictlike(doc)
paths = elita.util.paths_from_nested_dict(doc)
assert paths
for path in paths:
self.mongo_service.modify(collection, keys, path[:-1], path[-1])
def AddThreadLocalRootTree(self, path):
'''
Add new node to thread-local root_tree (copy from mongo's root_tree)
'''
assert path
assert elita.util.type_check.is_seq(path)
root_tree = self.mongo_service.get('root_tree', {})
assert root_tree
reduce(lambda d, k: d[k], path[:-1], self.root)[path[-1]] = reduce(lambda d, k: d[k], path, root_tree)
def RmThreadLocalRootTree(self, path):
'''
Remove deleted node from thread-local root_tree
'''
assert path
assert elita.util.type_check.is_seq(path)
node = reduce(lambda d, k: d[k], path[:-1], self.root)
del node[path[-1]]
class BuildDataService(GenericChildDataService):
def GetBuilds(self, app_name):
'''
Get all builds for application.
When getting a list of all objects of a given type, the convention is to pull from in-memory root_tree instead
of directly from mongo. This keeps it fast so we can do it frequently for things like parameter validation, etc
'''
assert app_name
assert elita.util.type_check.is_string(app_name)
assert app_name in self.root['app']
return [build for build in self.root['app'][app_name]['builds'] if build[0] != '_']
def NewBuild(self, app_name, build_name, attribs):
'''
Create new build document
'''
assert elita.util.type_check.is_string(app_name)
assert elita.util.type_check.is_string(build_name)
assert isinstance(attribs, dict)
assert app_name and build_name
app_doc = self.mongo_service.get('applications', {'app_name': app_name})
assert app_doc
buildobj = models.Build({
'app_name': app_name,
'build_name': build_name,
'attributes': attribs
})
bid = self.mongo_service.create_new('builds', {'app_name': app_name, 'build_name': build_name}, 'Build',
buildobj.get_doc())
self.mongo_service.update_roottree(('app', app_name, 'builds', build_name), 'builds', bid)
self.AddThreadLocalRootTree(('app', app_name, 'builds', build_name))
return True
def AddPackages(self, app, build, packages):
'''
Add new packages fields to existing build. Regenerate legacy 'files' field (which is a flat array of files
associated with build
'''
assert elita.util.type_check.is_string(app)
assert elita.util.type_check.is_string(build)
assert isinstance(packages, dict)
assert app in self.root['app']
assert build in self.root['app'][app]['builds']
keys = {'app_name': app, 'build_name': build}
for p in packages:
path = ('packages', p)
self.mongo_service.modify('builds', keys, path, packages[p])
build_doc = self.mongo_service.get('builds', keys)
assert all([p in build_doc['packages'] for p in packages])
#generate files from packages (avoid dupes)
files = [{"file_type": packages[p]['file_type'], "path": packages[p]['filename']} for p in packages]
self.mongo_service.modify('builds', keys, ('files',), files)
def UpdateBuild(self, app, name, doc):
'''
Update build with doc (JSON Patch or keys to update).
'''
assert elita.util.type_check.is_string(app)
assert elita.util.type_check.is_string(name)
assert elita.util.type_check.is_dictlike(doc) or elita.util.type_check.is_seq(doc)
assert app and name
assert app in self.root['app']
assert name in self.root['app'][app]['builds']
if elita.util.type_check.is_dictlike(doc):
self.UpdateObject('builds', {'app_name': app, 'build_name': name}, doc)
else:
try:
self.UpdateObjectFromPatch('builds', {'app_name': app, 'build_name': name}, doc)
except:
return False
return True
def DeleteBuildStorage(self, app_name, build_name):
'''
Delete all stored files associated with build.
'''
assert elita.util.type_check.is_string(app_name)
assert elita.util.type_check.is_string(build_name)
assert app_name and build_name
app_doc = self.mongo_service.get('applications', {'app_name': app_name})
assert app_doc
dir = self.settings['elita.builds.dir']
path = "{root_dir}/{app}/{build}".format(root_dir=dir, app=app_name, build=build_name)
logging.debug("DeleteBuildStorage: path: {}".format(path))
if os.path.isdir(path):
logging.debug("DeleteBuildStorage: remove_build: deleting")
shutil.rmtree(path)
def DeleteBuild(self, app_name, build_name):
'''
Delete build object, root_tree reference and all stored files.
'''
assert elita.util.type_check.is_string(app_name)
assert elita.util.type_check.is_string(build_name)
assert app_name and build_name
root_path = ('app', app_name, 'builds', build_name)
self.mongo_service.rm_roottree(root_path)
self.RmThreadLocalRootTree(root_path)
self.mongo_service.delete('builds', {'app_name': app_name, 'build_name': build_name})
self.DeleteBuildStorage(app_name, build_name)
def GetBuild(self, app_name, build_name):
'''
Get build document
'''
assert elita.util.type_check.is_string(app_name)
assert elita.util.type_check.is_string(build_name)
assert app_name and build_name
assert app_name in self.root['app']
assert build_name in self.root['app'][app_name]['builds']
doc = self.mongo_service.get('builds', {'app_name': app_name, 'build_name': build_name})
doc['created_datetime'] = doc['_id'].generation_time
return {k: doc[k] for k in doc if k[0] != '_'}
class UserDataService(GenericChildDataService):
def NewUser(self, name, pw, perms, attribs):
'''
Create a new user object and insert root_tree references for both the user and the computed permissions
endpoint. Pipe parameters into User object to get the pw hashed, etc.
'''
assert elita.util.type_check.is_string(name)
assert elita.util.type_check.is_string(pw)
assert isinstance(attribs, dict)
assert isinstance(perms, dict)
assert name and pw and perms
userobj = models.User({
'username': name,
'permissions': perms,
'password': pw,
'attributes': attribs
})
uid = self.mongo_service.create_new('users', {'username': userobj.username}, 'User', userobj.get_doc())
pid = self.mongo_service.create_new('userpermissions', {'username': userobj.username}, 'UserPermissions', {
"username": userobj.username,
"applications": list(),
"actions": dict(),
"servers": list()
})
self.mongo_service.update_roottree(('global', 'users', userobj.username), 'users', uid)
self.AddThreadLocalRootTree(('global', 'users', userobj.username))
self.mongo_service.update_roottree(('global', 'users', userobj.username, 'permissions'), 'userpermissions', pid)
self.AddThreadLocalRootTree(('global', 'users', userobj.username, 'permissions'))
return uid, pid
def GetUserTokens(self, username):
'''
Get all auth tokens associated with user
'''
assert elita.util.type_check.is_string(username)
assert username
return [d['token'] for d in self.mongo_service.get('tokens', {'username': username}, multi=True, empty=True)]
def GetUserFromToken(self, token):
'''
Get username associated with token
'''
assert elita.util.type_check.is_string(token)
assert token
return self.mongo_service.get('tokens', {'token': token})['username']
def GetAllTokens(self):
'''
Get all valid tokens
'''
return [token for token in self.root['global']['tokens'] if token[0] != '_']
def NewToken(self, username):
'''
Create new auth token associated with username and insert reference into root_tree
'''
assert elita.util.type_check.is_string(username)
assert username
token = models.Token({
'username': username
})
tid = self.mongo_service.create_new('tokens', {'username': username, 'token': token.token}, 'Token',
token.get_doc())
self.mongo_service.update_roottree(('global', 'tokens', token.token), 'tokens', tid)
self.AddThreadLocalRootTree(('global', 'tokens', token.token))
return token
def GetUsers(self):
'''
Get all valid users
'''
return [user for user in self.root['global']['users'] if user[0] != '_']
def GetUser(self, username):
'''
Get user document
'''
assert elita.util.type_check.is_string(username)
assert username
doc = self.mongo_service.get('users', {'username': username})
doc['created_datetime'] = doc['_id'].generation_time
return {k: doc[k] for k in doc if k[0] != '_'}
def DeleteUser(self, name):
'''
Delete a single user and root_tree reference
'''
assert name
assert elita.util.type_check.is_string(name)
self.mongo_service.rm_roottree(('global', 'users', name))
self.RmThreadLocalRootTree(('global', 'users', name))
self.mongo_service.delete('users', {'username': name})
def DeleteToken(self, token):
'''
Delete a token and root_tree reference
'''
assert token
assert elita.util.type_check.is_string(token)
self.mongo_service.rm_roottree(('global', 'tokens', token))
self.RmThreadLocalRootTree(('global', 'tokens', token))
self.mongo_service.delete('tokens', {'token': token})
def UpdateUser(self, name, doc):
'''
Update user with keys in doc
'''
assert name and doc
assert elita.util.type_check.is_string(name)
assert elita.util.type_check.is_dictlike(doc) or elita.util.type_check.is_seq(doc)
assert name in self.root['global']['users']
if elita.util.type_check.is_dictlike(doc) and "password" in doc:
user = models.User(doc)
doc['hashed_pw'] = user.hashed_pw
doc['salt'] = user.salt
doc['password'] = None
elif elita.util.type_check.is_seq(doc):
# If this is a JSON Patch, we need to do magic if user is trying to change password
# we replace JSON Patch operation to replace password field with operations to replace
# hashed_pw and salt instead. We have to do it in-place by splicing because the JSON Patch
# could be arbitrarily complex.
splice_index = None
splices = []
for i, op in enumerate(doc):
if not ("op" in op and "path" in op):
logging.debug("Bad JSON patch: missing required key")
return False
for k in ("salt", "hashed_pw"):
if k in op["path"]:
logging.debug("Bad JSON patch: trying to modify hashed_pw or salt")
return False
path_split = str(op["path"]).split('/')
if len(path_split) < 2:
logging.debug("Bad JSON patch: bad path")
return False
if path_split[1] == "password":
if elita.util.type_check.is_string(op["value"]) and op["op"] == "replace":
user = models.User({"password": op["value"]})
splice_index = i
splices.append({"op": "replace", "path": "/password", "value": None})
splices.append({"op": "replace", "path": "/hashed_pw", "value": user.hashed_pw})
splices.append({"op": "replace", "path": "/salt", "value": user.salt})
else:
logging.debug("Bad JSON patch: password op not replace")
return False
if splice_index is not None: # don't use truthiness because 0 is valid
doc[splice_index] = splices[0]
doc.insert(splice_index+1, splices[1])
doc.insert(splice_index+2, splices[2])
if elita.util.type_check.is_dictlike(doc):
self.UpdateObject('users', {'username': name}, doc)
else:
try:
self.UpdateObjectFromPatch('users', {'username': name}, doc)
except:
exc_type, exc_obj, tb = sys.exc_info()
tbf = traceback.format_exception(exc_type, exc_obj, tb)
logging.debug("Error applying JSON patch: {}".format(tbf[-1]))
return False
return True
class ApplicationDataService(GenericChildDataService):
def GetApplications(self):
'''
Get all applications. Pull from in-memory root_tree rather than mongo for speed.
'''
return [app for app in self.root['app'] if app[0] != '_']
def GetApplication(self, app_name):
'''
Get application document
'''
assert app_name
assert elita.util.type_check.is_string(app_name)
doc = self.mongo_service.get('applications', {'app_name': app_name})
doc['created_datetime'] = doc['_id'].generation_time
return {k: doc[k] for k in doc if k[0] != '_'}
def NewApplication(self, app_name):
'''
Create new application and all subcontainers and root_tree sub-references
'''
assert app_name
assert elita.util.type_check.is_string(app_name)
aid = self.mongo_service.create_new('applications', {'app_name': app_name}, 'Application', {})
root_doc = {
"builds": {"_doc": bson.DBRef('containers', self.NewContainer("BuildContainer", "builds", app_name))},
"actions": {"_doc": bson.DBRef('containers', self.NewContainer("ActionContainer", "actions", app_name))},
"gitrepos": {"_doc": bson.DBRef('containers', self.NewContainer("GitRepoContainer", "gitrepos", app_name))},
"gitdeploys": {"_doc": bson.DBRef('containers', self.NewContainer("GitDeployContainer", "gitdeploys", app_name))},
"deployments": {"_doc": bson.DBRef('containers', self.NewContainer("DeploymentContainer", "deployments", app_name))},
"groups": {"_doc": bson.DBRef('containers', self.NewContainer("GroupContainer", "groups", app_name))},
"packagemaps": {"_doc": bson.DBRef('containers', self.NewContainer("PackageMapContainer", "packagemaps", app_name))}
}
res = self.mongo_service.update_roottree(('app', app_name), 'applications', aid, doc=root_doc)
self.AddThreadLocalRootTree(('app', app_name))
return res
def DeleteApplication(self, app_name):
'''
Delete application and all root_tree references and sub-objects.
'''
assert app_name
assert elita.util.type_check.is_string(app_name)
self.mongo_service.rm_roottree(('app', app_name))
self.mongo_service.delete('applications', {'app_name': app_name})
self.mongo_service.delete('builds', {'app_name': app_name})
self.mongo_service.delete('gitrepos', {'application': app_name})
self.mongo_service.delete('gitdeploys', {'application': app_name})
self.mongo_service.delete('deployments', {'application': app_name})
self.mongo_service.delete('groups', {'application': app_name})
self.mongo_service.delete('packagemaps', {'application': app_name})
self.RmThreadLocalRootTree(('app', app_name))
def GetApplicationCensus(self, app_name):
'''
Generates a census of all environments, groups, servers and builds deployed:
{
"env_name": {
"group_name": {
"server_name": {
"gitdeploy_name": {
"committed": "build_name",
"deployed": "build_name"
}
}
}
'''
assert app_name
assert elita.util.type_check.is_string(app_name)
groups = [d['name'] for d in self.mongo_service.get('groups', {'application': app_name}, multi=True, empty=True)]
envs = list({d['environment'] for d in self.mongo_service.get('servers', {}, multi=True, empty=True)})
census = dict()
for e in envs:
census[e] = dict()
for g in groups:
g_servers = self.deps['GroupDataService'].GetGroupServers(app_name, g, environments=[e])
census[e][g] = dict()
for s in g_servers:
census[e][g][s] = dict()
group_doc = self.deps['GroupDataService'].GetGroup(app_name, g)
for gd in group_doc['gitdeploys']:
gdl = gd if isinstance(gd, list) else [gd]
for gd in gdl:
gd_doc = self.deps['GitDataService'].GetGitDeploy(app_name, gd)
census[e][g][s][gd] = {
"committed": gd_doc['location']['gitrepo']['last_build'],
"deployed": gd_doc['deployed_build']
}
if len(census[e][g][s]) == 0:
del census[e][g][s]
if len(census[e][g]) == 0:
del census[e][g]
if len(census[e]) == 0:
del census[e]
return census
def UpdateApplication(self, app, doc):
'''
Update application with keys in doc
'''
assert app and doc
assert elita.util.type_check.is_string(app)
assert elita.util.type_check.is_dictlike(doc) or elita.util.type_check.is_seq(doc)
assert app in self.root['app']
if elita.util.type_check.is_dictlike(doc):
self.UpdateObject('applications', {'app_name': app}, doc)
else:
try:
self.UpdateObjectFromPatch('applications', {'app_name': app}, doc)
except:
return False
return True
class PackageMapDataService(GenericChildDataService):
def GetPackageMaps(self, app):
'''
Get all packagemaps for application
'''
assert app
assert elita.util.type_check.is_string(app)
assert app in self.root['app']
return [pm for pm in self.root['app'][app]['packagemaps'] if pm[0] != '_']
def GetPackageMap(self, app, name):
'''
Get document for packagemap
'''
assert app and name
assert elita.util.type_check.is_string(app)
assert elita.util.type_check.is_string(name)
doc = self.mongo_service.get('packagemaps', {'application': app, 'name': name})
doc['created_datetime'] = doc['_id'].generation_time
return {k: doc[k] for k in doc if k[0] != '_'}
def NewPackageMap(self, app, name, packages, attributes=None):
'''
Create new packagemap
@type app: str
@type name: str
@type packages: dict
@type attributes: dict | None
'''
assert app and name and packages
assert elita.util.type_check.is_string(app)
assert elita.util.type_check.is_string(name)
assert elita.util.type_check.is_dictlike(packages)
assert elita.util.type_check.is_optional_dict(attributes)
attributes = attributes if attributes else {}
pm = models.PackageMap({
'application': app,
'name': name,
'packages': packages,
'attributes': attributes
})
pmid = self.mongo_service.create_new('packagemaps', {'application': app, 'name': name}, 'PackageMap', pm.get_doc())
self.mongo_service.update_roottree(('app', app, 'packagemaps', name), 'packagemaps', pmid)
self.AddThreadLocalRootTree(('app', app, 'packagemaps', name))
def DeletePackageMap(self, app, name):
'''
Delete a packagemap object and root_tree reference
'''
assert app and name
assert elita.util.type_check.is_string(app)
assert elita.util.type_check.is_string(name)
self.mongo_service.rm_roottree(('app', app, 'packagemaps', name))
self.RmThreadLocalRootTree(('app', app, 'packagemaps', name))
self.mongo_service.delete('packagemaps', {'application': app, 'name': name})
def UpdatePackageMap(self, app, name, doc):
'''
Update packagemap with keys in doc
'''
assert app and name and doc
assert elita.util.type_check.is_string(app)
assert elita.util.type_check.is_string(name)
assert elita.util.type_check.is_dictlike(doc) or elita.util.type_check.is_seq(doc)
assert app in self.root['app']
assert name in self.root['app'][app]['packagemaps']
if elita.util.type_check.is_dictlike(doc):
self.UpdateObject('packagemaps', {'application': app, 'name': name}, doc)
else:
try:
self.UpdateObjectFromPatch('packagemaps', {'application': app, 'name': name}, doc)
except:
return False
return True
class GroupDataService(GenericChildDataService):
def GetGroups(self, app):
'''
Get all groups for application.
'''
assert app
assert elita.util.type_check.is_string(app)
assert app in self.root['app']
return [group for group in self.root['app'][app]['groups'] if group[0] != '_']
def GetGroup(self, app, name):
'''
Get document for application group
'''
assert app and name
assert elita.util.type_check.is_string(app)
assert elita.util.type_check.is_string(name)
doc = self.mongo_service.get('groups', {'application': app, 'name': name})
doc['servers'] = self.GetGroupServers(app, name, group_doc=doc)
doc['created_datetime'] = doc['_id'].generation_time
return {k: doc[k] for k in doc if k[0] != '_'}
def NewGroup(self, app, name, gitdeploys, rolling_deploy=False, description="", attributes=None):
'''
Create new application group.
@type app: str
@type name: str
@type gitdeploys: list[str]
@type rolling_deploys: True | False
@type description: str
@type attributes: dict | None
'''
assert app and name and gitdeploys
assert elita.util.type_check.is_string(app)
assert elita.util.type_check.is_string(name)
assert elita.util.type_check.is_seq(gitdeploys)
assert elita.util.type_check.is_string(description)
assert elita.util.type_check.is_optional_dict(attributes)
attributes = attributes if attributes else {}
gp = models.Group({
"application": app,
"name": name,
"description": description,
"gitdeploys": gitdeploys,
"attributes": attributes,
"rolling_deploy": rolling_deploy
})
gid = self.mongo_service.create_new('groups', {'application': app, 'name': name}, 'Group', gp.get_doc())
self.mongo_service.update_roottree(('app', app, 'groups', name), 'groups', gid)
self.AddThreadLocalRootTree(('app', app, 'groups', name))
def DeleteGroup(self, app, name):
'''
Delete a group object and root_tree reference
'''
assert app and name
assert elita.util.type_check.is_string(app)
assert elita.util.type_check.is_string(name)
self.mongo_service.rm_roottree(('app', app, 'groups', name))
self.RmThreadLocalRootTree(('app', app, 'groups', name))
self.mongo_service.delete('groups', {'application': app, 'name': name})
def GetGroupServers(self, app, name, environments=None, group_doc=None):
'''
Build sets from initialized servers in each gitdeploy in the group, then take intersection of all the sets
If environments specified, take intersection with that set as well
Allow caller to provide group_doc to prevent infinite recursion from GetGroup calling GetGroupServers
'''
assert app and name
assert elita.util.type_check.is_string(app)
assert elita.util.type_check.is_string(name)
assert app in self.root['app']
assert name in self.root['app'][app]['groups']
if not group_doc:
group = self.GetGroup(app, name)
else:
group = group_doc
# this is ugly. gitdeploys can either be a list of strings or a list of lists. We have to flatten the
# list of lists if necessary
server_sets = [set(self.deps['GitDataService'].GetGitDeploy(app, gd)['servers']) for sublist in group['gitdeploys'] for gd in sublist] if isinstance(group['gitdeploys'][0], list) else [set(self.deps['GitDataService'].GetGitDeploy(app, gd)['servers']) for gd in group['gitdeploys']]
if environments:
server_env_set = set()
envs = self.deps['ServerDataService'].GetEnvironments()
for e in environments:
assert e in envs
server_env_set = set(envs[e]).union(server_env_set)
server_sets.append(server_env_set)
return list(set.intersection(*server_sets))
def UpdateGroup(self, app, name, doc):
'''
Update group with keys in doc
'''
assert app and name and doc
assert elita.util.type_check.is_string(app)
assert elita.util.type_check.is_string(name)
assert elita.util.type_check.is_dictlike(doc) or elita.util.type_check.is_seq(doc)
assert app in self.root['app']
assert name in self.root['app'][app]['groups']
if elita.util.type_check.is_dictlike(doc):
self.UpdateObject('groups', {'application': app, 'name': name}, doc)
else:
try:
self.UpdateObjectFromPatch('groups', {'application': app, 'name': name}, doc)
except:
return False
return True
class JobDataService(GenericChildDataService):
def GetAllActions(self, app_name):
'''
Get all actions associated with application. Get it from root_tree because the actions are dynamically populated
at the start of each request.
'''
assert app_name
assert elita.util.type_check.is_string(app_name)
if 'actions' in self.root['app'][app_name]:
return [action for action in self.root['app'][app_name]['actions'] if action[0] != '_']
def GetAction(self, app_name, action_name):
'''
Get details (name, description, parameters) about all actions associated with application
'''
assert app_name and action_name
assert elita.util.type_check.is_string(app_name)
assert elita.util.type_check.is_string(action_name)
actions = self.deps['ActionService'].get_action_details(app_name, action_name)
return {k: actions[k] for k in actions if k is not "callable"}
def NewJob(self, name, job_type, data):
'''
Create new job object.
data parameter can be anything serializable (including None) so that's all we will check for
'''
assert name and job_type
assert elita.util.type_check.is_serializable(data)
job = models.Job({
'status': "running",
'name': name,
'job_type': job_type,
'data': data,
'attributes': {
'name': name
}
})
jid = self.mongo_service.create_new('jobs', {'job_id': str(job.job_id)}, 'Job', job.get_doc())
self.mongo_service.update_roottree(('job', str(job.job_id)), 'jobs', jid)
self.AddThreadLocalRootTree(('job', str(job.job_id)))
return job
def NewJobData(self, data):
'''
Insert new job_data record. Called by async jobs to log progress. Data inserted here can be viewed by user
by polling the respective job object endpoint.
Only valid to be called in an asynch context, so assert we have a valid job_id
'''
assert data
assert elita.util.type_check.is_serializable(data)
assert self.job_id
elita.util.change_dict_keys(data, '.', '_')
self.mongo_service.create_new('job_data', {}, None, {
'job_id': self.job_id,
'data': data
}, remove_existing=False)
def GetJobs(self, active):
'''
Get all actively running jobs. Pulling from mongo could possibly be more efficient (maybe) than using
in-memory root_tree because we're querying on the status field
'''
return [d['job_id'] for d in self.mongo_service.get('jobs', {'status': 'running'}, multi=True)]
def GetJobData(self, job_id):
'''
Get job data for a specific job sorted by created_datetime (ascending)
'''
assert job_id
assert elita.util.type_check.is_string(job_id)
return sorted([{'created_datetime': d['_id'].generation_time.isoformat(' '), 'data': d['data']} for
d in self.mongo_service.get('job_data', {'job_id': job_id}, multi=True, empty=True)],
key=lambda k: k['created_datetime'])
def SaveJobResults(self, results):
'''
Called at the end of async jobs. Changes state of job object to reflect job completion.
'''
assert self.job_id
assert elita.util.type_check.is_serializable(results)
now = datetime.datetime.now(tz=pytz.utc)
doc = self.mongo_service.get('jobs', {'job_id': self.job_id})
assert doc and elita.util.type_check.is_dictlike(doc) and '_id' in doc
results_sanitized = copy.deepcopy(results)
elita.util.change_dict_keys(results_sanitized, '.', '_')
diff = (now - doc['_id'].generation_time).total_seconds()
self.mongo_service.modify('jobs', {'job_id': self.job_id}, ('status',), "completed")
self.mongo_service.modify('jobs', {'job_id': self.job_id}, ('completed_datetime',), now)
self.mongo_service.modify('jobs', {'job_id': self.job_id}, ('duration_in_seconds',), diff)
self.NewJobData({"completed_results": results_sanitized})
def NewAction(self, app_name, action_name, params):
'''
Register new dynamically-loaded action in root_tree. These are loaded from plugins at the start of each request
Note that action is added to in-memory root_tree object (not the root_tree record in mongo) because it is not
persistent. Note further that this is *our* (meaning this thread's) root_tree and will only be in effect for the
duration of this request, so we don't care about any root_tree updates by other threads running concurrently
'''
assert app_name and action_name
assert elita.util.type_check.is_string(app_name)
assert elita.util.type_check.is_string(action_name)
assert elita.util.type_check.is_optional_seq(params)
logging.debug("NewAction: app_name: {}".format(app_name))
logging.debug("NewAction: action_name: {}".format(action_name))
if app_name in self.deps['ApplicationDataService'].GetApplications():
assert app_name in self.root['app']
assert 'actions' in self.root['app'][app_name]
assert elita.util.type_check.is_dictlike(self.root['app'][app_name]['actions'])
self.root['app'][app_name]['actions'][action_name] = models.Action(app_name, action_name, params, self)
else:
logging.debug("NewAction: application '{}' not found".format(app_name))
def ExecuteAction(self, app_name, action_name, params):
'''
Spawn async job for an action
'''
assert app_name and action_name and params
assert elita.util.type_check.is_string(app_name)
assert elita.util.type_check.is_string(action_name)
assert elita.util.type_check.is_dictlike(params)
return self.deps['ActionService'].async(app_name, action_name, params)
class ServerDataService(GenericChildDataService):
def GetServers(self):
'''
Return a list of all extant server objects
'''
return [k for k in self.root['server'].keys() if k[0] != '_' and k != 'environments']
def NewServer(self, name, attribs, environment, existing=False):
'''
Create a new server object
'''
server = models.Server({
'name': name,
'status': 'new',
'server_type': 'unknown',
'environment': environment,
'attributes': attribs
})
sid = self.mongo_service.create_new('servers', {'name': name}, 'Server', server.get_doc())
self.mongo_service.update_roottree(('server', name), 'servers', sid, doc={
"gitdeploys": self.NewContainer('GitDeployContainer', name, "gitdeploys")
})
self.AddThreadLocalRootTree(('server', name))
return {
'NewServer': {
'name': name,
'environment': environment,
'attributes': attribs,
'status': 'ok'
}
}
def UpdateServer(self, name, doc):
'''
Change existing server object with data in doc
'''
assert name and doc
assert elita.util.type_check.is_string(name)
assert elita.util.type_check.is_dictlike(doc) or elita.util.type_check.is_seq(doc)
assert name in self.root['server']
if elita.util.type_check.is_dictlike(doc):
self.UpdateObject('servers', {'name': name}, doc)
else:
try:
self.UpdateObjectFromPatch('servers', {'name': name}, doc)
except:
return False
return True
def DeleteServer(self, name):
'''
Delete a server object
'''
assert name
assert elita.util.type_check.is_string(name)
assert name in self.root['server']
self.mongo_service.rm_roottree(('server', name))
self.RmThreadLocalRootTree(('server', name))
self.mongo_service.delete('servers', {'name': name})
def GetGitDeploys(self, name):
'''
Get all gitdeploys initialized on a server. The canonical data source is the gitdeploy object (which contains
a list of servers it's been initialized on)
'''
assert name
assert elita.util.type_check.is_string(name)
assert name in self.root['server']
gitdeploys = self.mongo_service.get('gitdeploys', {'servers': {'$in': [name]}}, multi=True, empty=True)
if gitdeploys:
return [{'application': gd['application'], 'gitdeploy_name': gd['name']} for gd in gitdeploys]
else:
return []
def GetEnvironments(self):
'''
Get a census of all environments. "environment" is just a tag associated with a server upon creation, so get all
tags and dedupe.
'''
environments = dict()
for sd in self.mongo_service.get('servers', {}, multi=True):
assert elita.util.type_check.is_dictlike(sd)
assert 'environment' in sd
if sd['environment'] not in environments:
environments[sd['environment']] = list()
environments[sd['environment']].append(sd['name'])
return environments
class GitDataService(GenericChildDataService):
def NewGitDeploy(self, name, app_name, package, options, actions, location, attributes):
'''
Create new gitdeploy object. One of a few New* methods that returns status of success/failure to the view layer
@rtype dict
'''
assert name and app_name and location
assert elita.util.type_check.is_string(name)
assert elita.util.type_check.is_string(app_name)
assert elita.util.type_check.is_dictlike(location)
assert all([k in location for k in ('gitrepo', 'path', 'default_branch')])
assert app_name in self.root['app']
assert elita.util.type_check.is_optional_str(package)
assert elita.util.type_check.is_optional_dict(options)
assert elita.util.type_check.is_optional_dict(actions)
assert elita.util.type_check.is_optional_dict(attributes)
#get associated gitrepo
gitrepo_doc = self.mongo_service.get('gitrepos', {'name': location['gitrepo']})
logging.debug("NewGitDeploy: gitrepo_doc: {}".format(gitrepo_doc))
if not gitrepo_doc:
return {'error': "invalid gitrepo (not found)"}
#replace gitrepo name with DBRef
location['gitrepo'] = bson.DBRef("gitrepos", gitrepo_doc['_id'])
#construct gitdeploy document
gd_obj = models.GitDeploy({})
gd_doc = gd_obj.get_doc()
gd_doc['name'] = name
gd_doc['application'] = app_name
gd_doc['location'] = location
gd_doc['attributes'] = attributes if attributes else {}
gd_doc['package'] = package
#override defaults if specified
if options:
for k in options:
if k in gd_doc['options']:
gd_doc['options'][k] = options[k]
if actions:
elita.util.change_dict_keys(actions, '.', EMBEDDED_YAML_DOT_REPLACEMENT)
for k in actions:
if k in gd_doc['actions']:
gd_doc['actions'][k] = actions[k]
gdid = self.mongo_service.create_new('gitdeploys', {'name': name, 'application': app_name}, 'GitDeploy', gd_doc)
self.mongo_service.update_roottree(('app', app_name, 'gitdeploys', name), 'gitdeploys', gdid)
self.AddThreadLocalRootTree(('app', app_name, 'gitdeploys', name))
return {"ok": "done"}
def GetGitDeploys(self, app):
'''
Get all gitdeploys associated with application
@rtype: list
'''
assert app
assert elita.util.type_check.is_string(app)
assert app in self.root['app']
return [k for k in self.root['app'][app]['gitdeploys'] if k[0] != '_']
def GetGitDeploy(self, app, name):
'''
Get gitdeploy document. Dereference embedded DBrefs for convenience.
@rtype: dict
'''
assert app and name
assert elita.util.type_check.is_string(app)
assert elita.util.type_check.is_string(name)
assert app in self.root['app']
assert name in self.root['app'][app]['gitdeploys']
doc = self.mongo_service.get('gitdeploys', {'name': name, 'application': app})
doc['created_datetime'] = doc['_id'].generation_time
assert 'location' in doc
assert 'gitrepo' in doc['location']
#dereference embedded dbrefs
doc['location']['gitrepo'] = self.mongo_service.dereference(doc['location']['gitrepo'])
assert doc['location']['gitrepo']
assert all([k in doc['location']['gitrepo'] for k in ('keypair', 'gitprovider')])
doc['location']['gitrepo']['keypair'] = self.mongo_service.dereference(doc['location']['gitrepo']['keypair'])
assert doc['location']['gitrepo']['keypair']
doc['location']['gitrepo']['gitprovider'] = self.mongo_service.dereference(doc['location']['gitrepo']['gitprovider'])
assert doc['location']['gitrepo']['gitprovider']
return {k: doc[k] for k in doc if k[0] != '_'}
def UpdateGitDeploy(self, app, name, doc):
'''
Update gitdeploy object with the data in doc
'''
assert app and name and doc
assert elita.util.type_check.is_string(app)
assert elita.util.type_check.is_string(name)
assert elita.util.type_check.is_dictlike(doc) or elita.util.type_check.is_seq(doc)
assert app in self.root['app']
assert name in self.root['app'][app]['gitdeploys']
if elita.util.type_check.is_dictlike(doc):
#clean up any actions
if 'actions' in doc:
elita.util.change_dict_keys(doc['actions'], '.', EMBEDDED_YAML_DOT_REPLACEMENT)
#replace gitrepo with DBRef if necessary
if 'location' in doc and 'gitrepo' in doc['location']:
grd = self.mongo_service.get('gitrepos', {'name': doc['location']['gitrepo'], 'application': app})
assert grd
doc['location']['gitrepo'] = bson.DBRef('gitrepos', grd['_id'])
self.UpdateObject('gitdeploys', {'name': name, 'application': app}, doc)
else:
try:
self.UpdateObjectFromPatch('gitdeploys', {'name': name, 'application': app}, doc)
except:
return False
return True
def DeleteGitDeploy(self, app, name):
'''
Delete a gitdeploy object and the root_tree reference
'''
assert app and name
assert elita.util.type_check.is_string(app)
assert elita.util.type_check.is_string(name)
assert app in self.root['app']
assert name in self.root['app'][app]['gitdeploys']
self.mongo_service.rm_roottree(('app', app, 'gitdeploys', name))
self.RmThreadLocalRootTree(('app', app, 'gitdeploys', name))
self.mongo_service.delete('gitdeploys', {'name': name, 'application': app})
def GetGitProviders(self):
'''
Get all gitproviders
@rtype: list(str) | None
'''
return [k for k in self.root['global']['gitproviders'] if k[0] != '_']
def GetGitProvider(self, name):
'''
Get gitprovider document
@rtype: dict | None
'''
assert name
assert elita.util.type_check.is_string(name)
assert name in self.root['global']['gitproviders']
doc = self.mongo_service.get('gitproviders', {'name': name})
doc['created_datetime'] = doc['_id'].generation_time
return {k: doc[k] for k in doc if k[0] != '_'}
def NewGitProvider(self, name, provider_type, auth):
'''
Create new gitprovider object
'''
assert name and type and auth
assert elita.util.type_check.is_string(name)
assert elita.util.type_check.is_string(provider_type)
assert provider_type in ('bitbucket', 'github')
assert elita.util.type_check.is_dictlike(auth)
assert 'username' in auth and 'password' in auth
assert elita.util.type_check.is_string(auth['username'])
assert elita.util.type_check.is_string(auth['password'])
gpo = models.GitProvider({
'name': name,
'type': provider_type,
'auth': auth
})
gpid = self.mongo_service.create_new('gitproviders', {'name': name}, 'GitProvider', gpo.get_doc())
self.mongo_service.update_roottree(('global', 'gitproviders', name), 'gitproviders', gpid)
self.AddThreadLocalRootTree(('global', 'gitproviders', name))
def UpdateGitProvider(self, name, doc):
'''
Modify gitprovider with the data in doc
'''
assert name and doc
assert elita.util.type_check.is_string(name)
assert elita.util.type_check.is_dictlike(doc) or elita.util.type_check.is_seq(doc)
assert name in self.root['global']['gitproviders']
if elita.util.type_check.is_dictlike(doc):
self.UpdateObject('gitproviders', {'name': name}, doc)
else:
try:
self.UpdateObjectFromPatch('gitproviders', {'name': name}, doc)
except:
return False
return True
def DeleteGitProvider(self, name):
'''
Delete gitprovider object and root_tree reference
'''
assert name
assert elita.util.type_check.is_string(name)
assert name in self.root['global']['gitproviders']
self.mongo_service.rm_roottree(('global', 'gitproviders', name))
self.RmThreadLocalRootTree(('global', 'gitproviders', name))
self.mongo_service.delete('gitproviders', {'name': name})
def GetGitRepos(self, app):
'''
Get a list of all gitrepos associated with application
@rtype: list(dict)
'''
assert app
assert elita.util.type_check.is_string(app)
assert app in self.root['app']
return [k for k in self.root['app'][app]['gitrepos'] if k[0] != '_']
def GetGitRepo(self, app, name):
'''
Get gitrepo document
@rtype: dict
'''
assert app and name
assert elita.util.type_check.is_string(app)
assert elita.util.type_check.is_string(name)
assert app in self.root['app']
assert name in self.root['app'][app]['gitrepos']
doc = self.mongo_service.get('gitrepos', {'name': name, 'application': app})
assert doc and 'gitprovider' in doc and 'keypair' in doc
doc['created_datetime'] = doc['_id'].generation_time
gitprovider = self.mongo_service.dereference(doc['gitprovider'])
doc['gitprovider'] = {k: gitprovider[k] for k in gitprovider if k[0] != '_'}
keypair = self.mongo_service.dereference(doc['keypair'])
doc['keypair'] = {k: keypair[k] for k in keypair if k[0] != '_'}
return {k: doc[k] for k in doc if k[0] != '_'}
def NewGitRepo(self, app, name, keypair, gitprovider, uri):
'''
Create new gitrepo object
@rtype: dict
'''
assert app and name and keypair and gitprovider
assert all([elita.util.type_check.is_string(p) for p in (app, name, keypair, gitprovider)])
assert elita.util.type_check.is_optional_str(uri)
assert app in self.root['app']
assert keypair in self.root['global']['keypairs']
assert gitprovider in self.root['global']['gitproviders']
#get docs so we can generate DBRefs
kp_doc = self.mongo_service.get('keypairs', {'name': keypair})
gp_doc = self.mongo_service.get('gitproviders', {'name': gitprovider})
if not gp_doc:
return {'NewGitRepo': "gitprovider '{}' is unknown".format(gitprovider)}
if not kp_doc:
return {'NewGitRepo': "keypair '{}' is unknown".format(keypair)}
gro = models.GitRepo({
'name': name,
'application': app,
'keypair': bson.DBRef("keypairs", kp_doc['_id']),
'gitprovider': bson.DBRef("gitproviders", gp_doc['_id']),
'uri': uri,
'last_build': None
})
grid = self.mongo_service.create_new('gitrepos', {'name': name, 'application': app}, 'GitRepo', gro.get_doc())
self.mongo_service.update_roottree(('app', app, 'gitrepos', name), 'gitrepos', grid)
self.AddThreadLocalRootTree(('app', app, 'gitrepos', name))
return {'NewGitRepo': 'ok'}
def UpdateGitRepo(self, app, name, doc):
'''
Update gitrepo with the data in doc
'''
assert app and name and doc
assert elita.util.type_check.is_string(app)
assert elita.util.type_check.is_string(name)
assert elita.util.type_check.is_dictlike(doc) or elita.util.type_check.is_seq(doc)
assert app in self.root['app']
if elita.util.type_check.is_dictlike(doc):
self.UpdateObject('gitrepos', {'name': name, 'application': app}, doc)
else:
try:
self.UpdateObjectFromPatch('gitrepos', {'name': name, 'application': app}, doc)
except:
return False
return True
def DeleteGitRepo(self, app, name):
'''
Delete gitrepo object and root_tree reference
'''
assert app and name
assert elita.util.type_check.is_string(app)
assert elita.util.type_check.is_string(name)
assert app in self.root['app']
assert name in self.root['app'][app]['gitrepos']
self.mongo_service.rm_roottree(('app', app, 'gitrepos', name))
self.RmThreadLocalRootTree(('app', app, 'gitrepos', name))
self.mongo_service.delete('gitrepos', {'name': name, 'application': app})
class DeploymentDataService(GenericChildDataService):
def NewDeployment(self, app, build_name, environments, groups, servers, gitdeploys, username, options):
'''
Create new deployment object
@rtype: dict
'''
assert app and build_name and username and options and ((environments and groups) or (servers and gitdeploys))
assert elita.util.type_check.is_string(app)
assert elita.util.type_check.is_string(build_name)
assert elita.util.type_check.is_string(username)
assert elita.util.type_check.is_dictlike(options)
assert app in self.root['app']
assert build_name in self.root['app'][app]['builds']
assert username in self.root['global']['users']
assert all([not p for p in (environments, groups)]) or all([elita.util.type_check.is_seq(p) for p in (environments, groups)])
assert all([not p for p in (servers, gitdeploys)]) or all([elita.util.type_check.is_seq(p) for p in (servers, gitdeploys)])
assert not environments or set(environments).issubset(set(self.deps['ServerDataService'].GetEnvironments()))
assert not groups or all([g in self.root['app'][app]['groups'] for g in groups])
assert not servers or all([s in self.root['server'] for s in servers])
assert not gitdeploys or all([gd in self.root['app'][app]['gitdeploys'] for gd in gitdeploys])
dpo = models.Deployment({
'name': "",
'application': app,
'build_name': build_name,
'environments': environments,
'groups': groups,
'servers': servers,
'gitdeploys': gitdeploys,
'username': username,
'options': options,
'status': 'created',
'job_id': ''
})
did = self.mongo_service.create_new('deployments', {}, 'Deployment', dpo.get_doc(), remove_existing=False)
# we don't know the full deployment 'name' until it's inserted
name = "{}_{}".format(build_name, str(did))
self.mongo_service.modify('deployments', {'_id': did}, ('name',), name)
self.mongo_service.update_roottree(('app', app, 'deployments', name), 'deployments', did)
self.AddThreadLocalRootTree(('app', app, 'deployments', name))
return {
'NewDeployment': {
'application': app,
'id': name
}
}
def GetDeployments(self, app, sort=False, with_details=False):
'''
Get a list of all deployments for application.
sort can be either "asc" or "desc" (False indicates no sorting). Sorting always done by created_datetime
@rtype: list(str)
'''
assert app
assert elita.util.type_check.is_string(app)
assert app in self.root['app']
#query from mongo instead of root_tree so we can sort and get datetimes
deployments = self.mongo_service.get('deployments', {'application': app}, multi=True, empty=True)
#pymongo does not easily let you sort by generation_time internally, so we have to hack it here
if sort:
deployments = sorted(deployments, key=lambda d: d['_id'].generation_time, reverse=(sort == "desc"))
if with_details:
for d in deployments:
d['created'] = d['_id'].generation_time.isoformat(' ')
return [{k: doc[k] for k in doc if k[0] != '_'} for doc in deployments]
else:
return [doc['name'] for doc in deployments]
def GetDeployment(self, app, name):
'''
Get a specific deployment document
'''
assert app and name
assert app in self.root['app']
assert name in self.root['app'][app]['deployments']
doc = self.mongo_service.get('deployments', {'application': app, 'name': name})
doc['created_datetime'] = doc['_id'].generation_time
return {k: doc[k] for k in doc if k[0] != '_'}
def UpdateDeployment(self, app, name, doc):
'''
Modify deployment object with the data in doc
'''
assert app and name and doc
assert elita.util.type_check.is_string(app)
assert elita.util.type_check.is_string(name)
assert elita.util.type_check.is_dictlike(doc) or elita.util.type_check.is_seq(doc)
assert app in self.root['app']
assert name in self.root['app'][app]['deployments']
if elita.util.type_check.is_dictlike(doc):
self.UpdateObject('deployments', {'application': app, 'name': name}, doc)
else:
try:
self.UpdateObjectFromPatch('deployments', {'application': app, 'name': name}, doc)
except:
return False
return True
def InitializeDeploymentPlan(self, app, name, batches, gitrepos):
'''
Create the appropriate structure in the "progress" field of the deployment.
@type app: str
@type name: str
@type batches: list(dict)
@type gitdeploys: list(str)
'''
assert app and name and batches and gitrepos
assert elita.util.type_check.is_string(app)
assert elita.util.type_check.is_seq(batches)
assert all([elita.util.type_check.is_dictlike(b) for b in batches])
assert elita.util.type_check.is_seq(gitrepos) # list of all gitdeploys
for gr in gitrepos:
doc = {
'progress': {
'phase1': {
'gitrepos': {
gr: {
'progress': 0,
'step': 'not started',
'changed_files': []
}
}
}
}
}
self.UpdateDeployment(app, name, doc)
# at a low level, deployment operates in a gitdeploy-centric way
# but on a human level, a server-centric view is more intuitive, so we generate a list of servers per batch,
# then the gitdeploys to be deployed on each server:
# batch 0:
# server0:
# - gitdeployA
# * path: C:\foo\bar
# * package: webapplication
# * progress: 0%
# * state: not started
# - gitdeployB
# * progress: 10%
# * package: webapplication
# * progress: 0%
# * state: checking out default branch
doc = {
'progress': {
'phase2': {}
}
}
for i, batch in enumerate(batches):
batch_name = 'batch{}'.format(i)
doc['progress']['phase2'][batch_name] = {}
for server in batch['servers']:
doc['progress']['phase2'][batch_name][server] = {}
for gitdeploy in batch['gitdeploys']:
gddoc = self.deps['GitDataService'].GetGitDeploy(app, gitdeploy)
if server in elita.deployment.deploy.determine_deployabe_servers(gddoc['servers'], [server]):
doc['progress']['phase2'][batch_name][server][gitdeploy] = {
'path': gddoc['location']['path'],
'package': gddoc['package'],
'progress': 0,
'state': 'not started'
}
logging.debug('InitializeDeploymentPlan: doc: {}'.format(doc))
self.UpdateDeployment(app, name, doc)
def StartDeployment_Phase(self, app, name, phase):
'''
Mark the progress field currently_on as phaseN
'''
assert app and name and phase
assert all([elita.util.type_check.is_string(p) for p in (app, name)])
assert isinstance(phase, int) and (phase == 1 or phase == 2)
assert app in self.root['app']
assert name in self.root['app'][app]['deployments']
self.UpdateDeployment(app, name, {'progress': {'currently_on': 'phase{}'.format(phase)}})
def FailDeployment(self, app, name):
'''
Mark deployment as failed in event of errors
'''
assert app and name
assert all([elita.util.type_check.is_string(p) for p in (app, name)])
assert app in self.root['app']
assert name in self.root['app'][app]['deployments']
self.UpdateDeployment(app, name, {'progress': {'currently_on': 'failure'}})
self.UpdateDeployment(app, name, {'status': 'error'})
def CompleteDeployment(self, app, name):
'''
Mark deployment as done
'''
assert app and name
assert all([elita.util.type_check.is_string(p) for p in (app, name)])
assert app in self.root['app']
assert name in self.root['app'][app]['deployments']
self.UpdateDeployment(app, name, {'progress': {'currently_on': 'completed'}})
def UpdateDeployment_Phase1(self, app, name, gitrepo, progress=None, step=None, changed_files=None):
'''
Phase 1 is gitrepo processing: decompressing package to local gitrepo, computing changes, commiting/pushing
'''
assert app and name and gitrepo
assert all([elita.util.type_check.is_string(p) for p in (app, name, gitrepo)])
assert (isinstance(progress, int) and 0 <= progress <= 100) or not progress
assert elita.util.type_check.is_string(step) or elita.util.type_check.is_seq(step) or not step
assert elita.util.type_check.is_optional_seq(changed_files)
assert app in self.root['app']
assert name in self.root['app'][app]['deployments']
progress_dict = dict()
if progress:
progress_dict['progress'] = progress
if step:
progress_dict['step'] = step
if changed_files:
progress_dict['changed_files'] = changed_files
self.UpdateDeployment(app, name, {'progress': {'phase1': {'gitrepos': {gitrepo: progress_dict}}}})
def UpdateDeployment_Phase2(self, app, name, gitdeploy, servers, batch, progress=None, state=None):
'''
Phase 2 is performing git pulls across all remote servers. Progress is presented per server, but the backend
deployment is done per gitdeploy (to multiple servers)
'''
assert app and name and gitdeploy and servers
assert all([elita.util.type_check.is_string(p) for p in (app, name, gitdeploy)])
assert elita.util.type_check.is_seq(servers)
assert isinstance(batch, int) and batch >= 0
assert app in self.root['app']
assert name in self.root['app'][app]['deployments']
progress_dict = {'batch{}'.format(batch): {}}
for s in servers:
progress_dict['batch{}'.format(batch)][s] = dict()
progress_dict['batch{}'.format(batch)][s][gitdeploy] = dict()
if progress:
progress_dict['batch{}'.format(batch)][s][gitdeploy]['progress'] = progress
if state:
progress_dict['batch{}'.format(batch)][s][gitdeploy]['state'] = state
self.UpdateDeployment(app, name, {'progress': {'phase2': progress_dict}})
class KeyDataService(GenericChildDataService):
def GetKeyPairs(self):
'''
Get all extant keypair names
'''
return [k for k in self.root['global']['keypairs'] if k[0] != '_']
def GetKeyPair(self, name):
'''
Get keypair doc
'''
assert name
assert elita.util.type_check.is_string(name)
assert name in self.root['global']['keypairs']
doc = self.mongo_service.get('keypairs', {'name': name})
doc['created_datetime'] = doc['_id'].generation_time
return {k: doc[k] for k in doc if k[0] != '_'} if doc else None
def NewKeyPair(self, name, attribs, key_type, private_key, public_key):
'''
Create new keypair object and root_tree reference
'''
assert name and key_type and private_key and public_key
assert all([elita.util.type_check.is_string(p) for p in (name, key_type, private_key, public_key)])
assert elita.util.type_check.is_optional_dict(attribs)
try:
kp_obj = models.KeyPair({
"name": name,
"attributes": attribs,
"key_type": key_type,
"private_key": private_key,
"public_key": public_key
})
except:
exc_type, exc_obj, tb = sys.exc_info()
logging.debug("exception: {}, {}".format(exc_type, exc_obj))
if exc_type == elita.elita_exceptions.InvalidPrivateKey:
err = "Invalid private key"
if exc_type == elita.elita_exceptions.InvalidPublicKey:
err = "Invalid public key"
if exc_type == elita.elita_exceptions.InvalidKeyPairType:
err = "Invalid key type"
else:
err = "unknown key error"
return {
'NewKeyPair': {
'status': "error",
'message': err
}
}
kpid = self.mongo_service.create_new('keypairs', {'name': name}, 'KeyPair', kp_obj.get_doc())
self.mongo_service.update_roottree(('global', 'keypairs', name), 'keypairs', kpid)
self.AddThreadLocalRootTree(('global', 'keypairs', name))
return {
'NewKeyPair': {
'status': 'ok'
}
}
def UpdateKeyPair(self, name, doc):
'''
Update key pair object with data in doc
'''
assert name and doc
assert elita.util.type_check.is_string(name)
assert elita.util.type_check.is_dictlike(doc) or elita.util.type_check.is_seq(doc)
assert name in self.root['global']['keypairs']
if elita.util.type_check.is_dictlike(doc):
self.UpdateObject('keypairs', {'name': name}, doc)
else:
try:
self.UpdateObjectFromPatch('keypairs', {'name': name}, doc)
except:
return False
return True
def DeleteKeyPair(self, name):
'''
Delete keypair object and root_tree reference
'''
assert name
assert elita.util.type_check.is_string(name)
assert name in self.root['global']['keypairs']
self.mongo_service.rm_roottree(('global', 'keypairs', name))
self.RmThreadLocalRootTree(('global', 'keypairs', name))
self.mongo_service.delete('keypairs', {'name': name})
class DataService:
'''
DataService is an object that holds all the data-layer handling objects. A DataService instance is part of the request
object and also passed to async jobs, etc. It is the main internal API for data handling.
'''
__metaclass__ = elita.util.LoggingMetaClass
def __init__(self, settings, db, root, job_id=None):
'''
@type root: RootTree
@type db: pymongo.database.Database
@type settings: pyramid.registry.Registry
@type job_id: str | None
'''
self.settings = settings
self.db = db
self.root = root
self.mongo_service = MongoService(db)
self.buildsvc = BuildDataService(self.mongo_service, root, settings, job_id=job_id)
self.usersvc = UserDataService(self.mongo_service, root, settings, job_id=job_id)
self.appsvc = ApplicationDataService(self.mongo_service, root, settings, job_id=job_id)
self.jobsvc = JobDataService(self.mongo_service, root, settings, job_id=job_id)
self.serversvc = ServerDataService(self.mongo_service, root, settings, job_id=job_id)
self.gitsvc = GitDataService(self.mongo_service, root, settings, job_id=job_id)
self.keysvc = KeyDataService(self.mongo_service, root, settings, job_id=job_id)
self.deploysvc = DeploymentDataService(self.mongo_service, root, settings, job_id=job_id)
self.actionsvc = ActionService(self)
self.groupsvc = GroupDataService(self.mongo_service, root, settings, job_id=job_id)
self.pmsvc = PackageMapDataService(self.mongo_service, root, settings, job_id=job_id)
#cross-dependencies between child dataservice objects above
self.appsvc.populate_dependencies({
'ServerDataService': self.serversvc,
'GroupDataService': self.groupsvc,
'GitDataService': self.gitsvc
})
self.groupsvc.populate_dependencies({
'ServerDataService': self.serversvc,
'GitDataService': self.gitsvc
})
self.jobsvc.populate_dependencies({
'ActionService': self.actionsvc,
'ApplicationDataService': self.appsvc
})
self.deploysvc.populate_dependencies({
'ServerDataService': self.serversvc,
'GroupDataService': self.groupsvc,
'GitDataService': self.gitsvc
})
#load all plugins and register actions/hooks
self.actionsvc.register()
#passed in if this is part of an async job
self.job_id = job_id
#super ugly below - only exists for plugin access
if job_id is not None:
self.salt_controller = salt_control.SaltController(self)
self.remote_controller = salt_control.RemoteCommands(self.salt_controller)
def GetAppKeys(self, app):
return [k for k in self.root['app'][app] if k[0] != '_']
def GetGlobalKeys(self):
return [k for k in self.root['global'] if k[0] != '_']
| {
"repo_name": "bkeroack/elita",
"path": "elita/dataservice/__init__.py",
"copies": "1",
"size": "69752",
"license": "apache-2.0",
"hash": 7504837548507160000,
"line_mean": 41.2483343428,
"line_max": 289,
"alpha_frac": 0.5893164354,
"autogenerated": false,
"ratio": 3.9407909604519773,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0026308886690436426,
"num_lines": 1651
} |
__author__ = 'bkeroack'
import os
import pkg_resources
from clint.arguments import Args
from clint.textui import puts, colored
import subcommands
config = {
'host': 'localhost' if 'ELITA_HOST' not in os.environ else os.environ['ELITA_HOST'],
'port': 2718 if 'ELITA_PORT' not in os.environ else os.environ['ELITA_PORT'],
'secure': 'ELITA_SECURE' in os.environ,
'ignore_cert': 'ELITA_IGNORE_CERT' in os.environ
}
sub_commands = {
'about': subcommands.About
}
help_text = """
{title} {version}
=================
USAGE: elita [SUBCOMMAND] [OPTIONS]
SUBCOMMANDS:
{subcommands}
""".format(title=colored.green('elita'), version=colored.magenta(pkg_resources.require("elita")[0].version),
subcommands="\n".join([k for k in sub_commands]))
def Command_Line_Client():
args = Args().grouped
for item in args:
if item is '_':
sc = args[item].all
if not sc or len(sc) != 1 or sc[0] not in sub_commands:
puts(help_text)
else:
sub_commands[sc[0]](config) | {
"repo_name": "bkeroack/elita",
"path": "elita_cli/__init__.py",
"copies": "1",
"size": "1066",
"license": "apache-2.0",
"hash": -6089715625281243000,
"line_mean": 23.25,
"line_max": 108,
"alpha_frac": 0.6181988743,
"autogenerated": false,
"ratio": 3.182089552238806,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4300288426538806,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bkeroack'
import os
import platform
from clint.textui import puts, indent, colored
import linux
import darwin
# Autodetect platform, perform correct installation routine
def Install():
os_name = os.name
p_name = platform.system()
puts("Installing Elita")
with indent(4, quote=colored.blue('> ')):
if os_name == 'posix':
if p_name == 'Linux':
puts("OS: {}".format(colored.green("Linux")))
return linux.InstallLinux()
elif p_name == 'Darwin':
puts("OS: {}".format(colored.green("Darwin")))
return darwin.InstallOSX()
else:
puts("{}: Unknown/unsupported POSIX-like OS: {}".format(colored.red("ERROR"), p_name))
if os_name == 'nt':
puts(colored.magenta("LOL Windows."))
puts(colored.red("Only Linux/POSIX-like OSes are supported for Elita server installation."))
else:
puts("{}: Unsupported OS: {}".format(colored.red("ERROR"), p_name))
return 1 | {
"repo_name": "bkeroack/elita",
"path": "elita_install/__init__.py",
"copies": "1",
"size": "1058",
"license": "apache-2.0",
"hash": 2948814902095085000,
"line_mean": 30.1470588235,
"line_max": 104,
"alpha_frac": 0.5746691871,
"autogenerated": false,
"ratio": 3.9185185185185185,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4993187705618518,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bkeroack'
import os
import shutil
import requests
import zipfile
import tarfile
import tempfile
import logging
import glob2
import billiard
import elita.util
class SupportedFileType:
TarGz = 'tar.gz'
TarBz2 = 'tar.bz2'
Zip = 'zip'
types = [TarBz2, TarGz, Zip]
class UnsupportedFileType(Exception):
pass
class PackagingError(Exception):
pass
#async callables
def store_indirect_build(datasvc, app, build, file_type, uri, verify, package_map):
logging.debug("indirect_upload: downloading from {}".format(uri))
datasvc.jobsvc.NewJobData({'status': 'Downloading build file from {}'.format(uri)})
r = requests.get(uri, verify=verify)
fd, temp_file = tempfile.mkstemp()
with open(temp_file, 'wb') as f:
f.write(r.content)
logging.debug("download and file write complete")
datasvc.jobsvc.NewJobData({'status': "download and file write complete"})
return store_uploaded_build(datasvc, app, build, file_type, temp_file, package_map)
def store_uploaded_build(datasvc, app, build, file_type, temp_file, package_map):
builds_dir = datasvc.settings['elita.builds.dir']
minimum_build_size = int(datasvc.settings['elita.builds.minimum_size'])
bs_obj = BuildStorage(builds_dir, app, build, file_type=file_type, input_file=temp_file,
size_cutoff=minimum_build_size)
datasvc.jobsvc.NewJobData({'status': 'validating file size and type'})
if not bs_obj.validate():
return {'error': "Invalid file type or corrupted file--check log"}
datasvc.jobsvc.NewJobData({'status': 'storing file in builds dir'})
fname = bs_obj.store()
logging.debug("bs_results: {}".format(fname))
datasvc.jobsvc.NewJobData({'status': 'updating build packages'})
build_doc = datasvc.buildsvc.GetBuild(app, build)
build_doc['master_file'] = fname
build_doc['packages']['master'] = {'filename': fname, 'file_type': file_type}
if package_map:
datasvc.jobsvc.NewJobData({'status': 'applying package map',
'package_map': package_map})
pm = PackageMapper(fname, file_type, file_type, os.path.dirname(fname), package_map)
packages = pm.apply()
pm.cleanup()
for pkg in packages:
build_doc['packages'][pkg] = packages[pkg]
for k in build_doc['packages']:
fname = build_doc['packages'][k]['filename']
ftype = build_doc['packages'][k]['file_type']
found = False
for f in build_doc['files']:
if f['path'] == fname:
found = True
if not found:
build_doc['files'].append({"file_type": ftype, "path": fname})
build_doc['stored'] = True
logging.debug("packages: {}".format(build_doc['packages'].keys()))
datasvc.buildsvc.UpdateBuild(app, build, build_doc)
datasvc.jobsvc.NewJobData({'status': 'running hook BUILD_UPLOAD_SUCCESS'})
args = {
'hook_parameters':
{
'build_name': build,
'build_storage_info':
{
'storage_dir': bs_obj.storage_dir,
'filename': build_doc['master_file'],
'file_type': file_type
}
}
}
res = datasvc.actionsvc.hooks.run_hook(app, 'BUILD_UPLOAD_SUCCESS', args)
return {
"build_stored": {
"application": app,
"build_name": build,
"actions_result": res
}
}
class BuildError(Exception):
pass
class BuildStorage:
__metaclass__ = elita.util.LoggingMetaClass
def __init__(self, builds_toplevel_dir=None, application=None, name=None, file_type=None, input_file=None,
size_cutoff=1000000):
assert isinstance(size_cutoff, int)
assert size_cutoff > 0
self.builds_toplevel_dir = builds_toplevel_dir
self.name = name
self.application = application
self.file_type = file_type
self.temp_file_name = input_file
self.size_cutoff = size_cutoff
def create_storage_dir(self):
build_dir = "{}{}/{}".format(self.builds_toplevel_dir, self.application, self.name)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
self.storage_dir = build_dir
def store(self):
self.create_storage_dir()
fname = "{}/{}.{}".format(self.storage_dir, self.name, self.file_type)
shutil.copy(self.temp_file_name, fname)
os.remove(self.temp_file_name)
return fname
def validate_file_size(self):
filesize = os.path.getsize(self.temp_file_name)
logging.debug("validate_file_size: size_cutoff: {}".format(self.size_cutoff))
logging.debug("validate_file_size: temp size: {}".format(filesize))
return filesize >= self.size_cutoff
def validate(self):
if not self.validate_file_size():
return False
if self.file_type == SupportedFileType.TarGz:
return self.validate_tgz()
elif self.file_type == SupportedFileType.TarBz2:
return self.validate_tbz2()
elif self.file_type == SupportedFileType.Zip:
return self.validate_zip()
def validate_tgz(self):
return self.validate_tar('gz')
def validate_tbz2(self):
return self.validate_tar('bz2')
def validate_tar(self, compression):
try:
with tarfile.open(name=self.temp_file_name, mode='r:{}'.format(compression)) as tf:
logging.debug("tar.{}: {}, {}, {} members".format(compression, self.application, self.name,
len(tf.getnames())))
except tarfile.ReadError:
logging.debug("tar.{}: invalid tar file!".format(compression))
return False
return True
def validate_zip(self):
try:
with zipfile.ZipFile(self.temp_file_name, mode='r') as zf:
logging.debug("zip: {}, {}, {} members".format(self.application, self.name, len(zf.namelist())))
except zipfile.BadZipfile:
logging.debug("zip: invalid zip file!")
return False
return True
class BuildFile:
def __init__(self, package_doc):
self.file_type = package_doc['file_type']
self.filename = package_doc['filename']
def decompress(self, target_path):
if self.file_type == SupportedFileType.Zip:
self.decompress_zip(target_path)
elif self.file_type == SupportedFileType.TarBz2:
self.decompress_tbz2(target_path)
elif self.file_type == SupportedFileType.TarGz:
self.decompress_tgz(target_path)
else:
raise BuildError
def decompress_tar(self, target_path, ext):
with tarfile.open(name=self.filename, mode='r:{}'.format(ext)) as tf:
tf.extractall(target_path)
def decompress_tbz2(self, target_path):
self.decompress_tar(target_path, 'bz2')
def decompress_tgz(self, target_path):
self.decompress_tar(target_path, 'gz')
def decompress_zip(self, target_path):
with zipfile.ZipFile(self.filename, 'r') as zf:
zf.extractall(target_path)
def _threadsafe_apply_package(output_dir, package_name, package, target_type, cwd, q):
'''
Executed concurrently in a separate process. Applies a set of patterns to an already-decompressed master package,
creates package and pushes to output directory
@type q: billiard.Queue
'''
assert elita.util.type_check.is_dictlike(package)
assert 'patterns' in package
assert elita.util.type_check.is_seq(package['patterns'])
patterns = package['patterns']
prefix = package['prefix'] if 'prefix' in package else None
remove_prefix = package['remove_prefix'] if 'remove_prefix' in package else None
def create_new_pkg():
logging.debug("cwd: {}".format(os.getcwd()))
if target_type == SupportedFileType.Zip:
package_fname = "{}.zip".format(package_name)
package_obj = zipfile.ZipFile(package_fname, 'w')
elif target_type == SupportedFileType.TarBz2:
package_fname = "{}.tar.bz2".format(package_name)
package_obj = tarfile.open(package_fname, mode='w:bz2')
elif target_type == SupportedFileType.TarGz:
package_fname = "{}.tar.gz".format(package_name)
package_obj = tarfile.open(package_fname, mode='w:gz')
else:
raise UnsupportedFileType
return package_obj, package_fname
def add_file_to_pkg(filename, package_obj):
assert filename
arcname = str(filename).replace(remove_prefix, "", 1) if remove_prefix else filename
arcname = "{}{}".format(prefix, arcname) if prefix else arcname
if target_type == SupportedFileType.Zip:
package_obj.write(filename, arcname, zipfile.ZIP_DEFLATED)
elif target_type == SupportedFileType.TarBz2 or target_type == SupportedFileType.TarGz:
package_obj.add(filename, arcname=arcname)
else:
raise UnsupportedFileType
def apply_pattern(pattern, package_obj):
assert pattern and package_obj
assert elita.util.type_check.is_string(pattern)
assert package_obj
logging.debug("applying pattern: {} ({})".format(pattern, package_name))
files = glob2.glob(pattern)
if files:
logging.debug("adding files")
for f in files:
add_file_to_pkg(f, package_obj)
else:
logging.debug("no files for pattern!")
os.chdir(cwd)
po, pfn = create_new_pkg()
for p in patterns:
apply_pattern(p, po)
po.close()
shutil.move(pfn, "{}/{}".format(output_dir, pfn))
q.put({package_name: {'file_type': target_type, 'filename': "{}/{}".format(output_dir, pfn)}})
class PackageMapper:
'''
Applies supplied package map to the build. Assumes pre-validated package map and a build_dir that exists
This puts the generated package files directly in the build storage directory and returns a mapping of package
names to filenames/types (but doesn't update the build object)
'''
def __init__(self, master_package_filename, master_file_type, target_file_type, build_dir, package_map):
assert master_package_filename and master_file_type and target_file_type and build_dir and package_map
assert elita.util.type_check.is_string(master_package_filename)
assert elita.util.type_check.is_string(master_file_type)
assert elita.util.type_check.is_string(target_file_type)
assert elita.util.type_check.is_string(build_dir)
assert master_file_type in SupportedFileType.types and target_file_type in SupportedFileType.types
assert elita.util.type_check.is_dictlike(package_map)
self.master_pkg = master_package_filename
self.master_ftype = master_file_type
self.target_type = target_file_type
self.build_dir = build_dir
self.package_map = package_map
self.temp_dir = tempfile.mkdtemp()
self.old_cwd = os.getcwd()
self.package_fname = None # container for filename
self.package_obj = None # container for ZipFile/TarFile objs
def cleanup(self):
shutil.rmtree(self.temp_dir)
def unpack_master_pkg(self):
'''
Decompress master package filename to temp location
'''
bf = BuildFile({'filename': self.master_pkg, 'file_type': self.master_ftype})
bf.decompress(self.temp_dir)
def apply(self):
'''
Apply the package map and return a dict of package_names to { 'file_type', 'filename' }
'''
self.unpack_master_pkg()
q = billiard.Queue()
procs = list()
for pkg in self.package_map:
p = billiard.Process(target=_threadsafe_apply_package, name=pkg,
args=(self.build_dir, pkg, self.package_map[pkg], self.target_type, self.temp_dir, q))
p.start()
procs.append(p)
i = 0
packages = dict()
while i < len(procs):
pkg = q.get(150)
assert pkg and len(pkg) == 1
pkg_name = pkg.keys()[0]
packages[pkg_name] = pkg[pkg_name]
i += 1
error = False
for p in procs:
p.join(150)
if p.is_alive():
logging.error("PackageMapper.apply(): timeout waiting for subprocess: {}".format(p.name))
p.terminate()
error = True
if p.exitcode < 0:
logging.error("PackageMapper.apply(): subprocess killed with signal {}".format(abs(p.exitcode)))
error = True
if p.exitcode > 0:
logging.error("PackageMapper.apply(): subprocess died with exit code {}".format(p.exitcode))
error = True
if error:
raise PackagingError
return packages
| {
"repo_name": "bkeroack/elita",
"path": "elita/builds.py",
"copies": "1",
"size": "13088",
"license": "apache-2.0",
"hash": 1384370331425454600,
"line_mean": 36.3942857143,
"line_max": 119,
"alpha_frac": 0.6101772616,
"autogenerated": false,
"ratio": 3.86989946777055,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.498007672937055,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bkeroack'
import pymongo
import bson
import multiprocessing
import copy
import time
import random
import elita.dataservice.mongo_service
def setup_db():
mc = pymongo.MongoClient(host='localhost', port=27017)
db = mc['elita_testing']
return mc, db
def clear_mocks(db):
db['mock_objs'].remove()
mc, db = setup_db()
def _create_roottree():
root = {
'_lock': False,
'_doc': bson.DBRef('dummy', bson.ObjectId()),
'app': {
'foo': {}
}
}
db['root_tree'].remove()
db['root_tree'].insert(root)
clear_mocks(db)
test_obj = {
'name': 'foobar',
'attributes': {
'a': 0,
'b': 1
}
}
test_obj2 = {
'name': 'baz',
'attributes': {
'a': 2,
'b': 3
}
}
def _threadsafe_direct_roottree_update(q, doc, path, collection, pause=0):
ts_mc = pymongo.MongoClient(host='localhost', port=27017)
ts_db = ts_mc['elita_testing']
id = ts_db['mock_objs'].insert(doc)
del doc['_id']
path_dot_notation = '.'.join(path)
root_tree_doc = {
'_doc': bson.DBRef(collection, id)
}
time.sleep(pause)
result = ts_db['root_tree'].update({}, {'$set': {path_dot_notation: root_tree_doc}})
q.put(result['n'] == 1 and result['updatedExisting'] and not result['err'], block=False)
def test_get_document():
'''
Test that retrieving a document works
'''
clear_mocks(db)
insert_doc = copy.deepcopy(test_obj)
insert_doc['name'] = 'tuna'
db['mock_objs'].insert(insert_doc)
del insert_doc['_id']
ms = elita.dataservice.mongo_service.MongoService(db)
doc = ms.get('mock_objs', {'name': 'tuna'})
assert doc
assert 'name' in doc
assert doc['name'] == 'tuna'
assert 'attributes' in doc
def test_roottree_update():
'''
Test that a single root_tree update does what it should
'''
_create_roottree()
id = db['mock_objs'].insert(test_obj)
del test_obj['_id']
ms = elita.dataservice.mongo_service.MongoService(db)
ms.update_roottree(('app', 'foo', 'mocks', 'foobar'), 'mock_objs', id)
rt_list = [d for d in db['root_tree'].find()]
assert len(rt_list) == 1
assert '_lock' in rt_list[0]
assert not rt_list[0]['_lock']
assert 'mocks' in rt_list[0]['app']['foo']
assert 'foobar' in rt_list[0]['app']['foo']['mocks']
assert '_doc' in rt_list[0]['app']['foo']['mocks']['foobar']
assert rt_list[0]['app']['foo']['mocks']['foobar']['_doc'].__class__.__name__ == 'DBRef'
assert rt_list[0]['app']['foo']['mocks']['foobar']['_doc'].collection == 'mock_objs'
assert rt_list[0]['app']['foo']['mocks']['foobar']['_doc'].id == id
def test_roottree_direct_update():
'''
Test that direct modification of root_tree works
'''
_create_roottree()
q = multiprocessing.Queue()
_threadsafe_direct_roottree_update(q, test_obj, ('app', 'foo', 'mocks', 'ashley'), 'mock_objs')
result = q.get()
assert result
root = [d for d in db['root_tree'].find()]
assert len(root) == 1
root = root[0]
assert 'ashley' in root['app']['foo']['mocks']
assert '_doc' in root['app']['foo']['mocks']['ashley']
def test_roottree_multiple_simultaneous_direct_updates():
'''
Test that multiple simultaneous root_tree updates all succeed with no data loss
'''
_create_roottree()
p_list = list()
q = multiprocessing.Queue()
names = (
'lecherousness',
'undefinable',
'agatizing',
'jamesburg',
'falling',
'subtentacular',
'acronymized',
'chandleries',
'croupiness',
'rebleach',
'unstartled',
'unherbaceous'
)
doc_list = [copy.deepcopy(test_obj) for n in names]
for n, d in zip(names, doc_list):
d['name'] = n
path = ('app', 'foo', 'mocks', n)
p = multiprocessing.Process(target=_threadsafe_direct_roottree_update,
args=[q], kwargs={'doc': d, 'path': path, 'collection': 'mock_objs',
'pause': random.random()/4})
p_list.append(p)
for p in p_list:
p.start()
for p in p_list:
p.join(600)
results = [q.get() for p in p_list]
assert all(results)
root = [d for d in db['root_tree'].find()]
assert len(root) == 1
root = root[0]
assert all([n in root['app']['foo']['mocks'] for n in names])
assert not root['_lock']
def test_roottree_delete_reference():
'''
Test that deleting a reference from root_tree works
'''
_create_roottree()
insert_doc = copy.deepcopy(test_obj)
insert_doc['name'] = 'salamander'
ms = elita.dataservice.mongo_service.MongoService(db)
q = multiprocessing.Queue()
_threadsafe_direct_roottree_update(q, insert_doc, ('app', 'foo', 'mocks', 'salamander'), 'mock_objs')
rt = [d for d in db['root_tree'].find()]
assert rt
assert len(rt) == 1
root = rt[0]
assert 'app' in root
assert 'foo' in root['app']
assert 'mocks' in root['app']['foo']
assert 'salamander' in root['app']['foo']['mocks']
ms.rm_roottree(('app', 'foo', 'mocks', 'salamander'))
rt = [d for d in db['root_tree'].find()]
assert rt
assert len(rt) == 1
root = rt[0]
assert 'app' in root
assert 'foo' in root['app']
assert 'mocks' in root['app']['foo']
assert 'salamander' not in root['app']['foo']['mocks']
def test_create_new_document():
'''
Test that creating a new document works
'''
insert_doc = copy.deepcopy(test_obj)
ms = elita.dataservice.mongo_service.MongoService(db)
res = ms.create_new('mock_objs', {'name': 'swimming'}, 'Mock', insert_doc)
assert res
dlist = [d for d in db['mock_objs'].find({'name': 'swimming'})]
assert len(dlist) == 1
def test_modify_existing_document():
'''
Test that modifying an existing document works
'''
insert_doc = copy.deepcopy(test_obj)
ms = elita.dataservice.mongo_service.MongoService(db)
res = ms.create_new('mock_objs', {'name': 'soccer'}, 'Mock', insert_doc)
assert res
doc = ms.get('mock_objs', {'name': 'soccer'})
assert 'name' in doc
assert doc['name'] == 'soccer'
ms.modify('mock_objs', {'name': 'soccer'}, ('attributes', 'b'), 99)
doc = ms.get('mock_objs', {'name': 'soccer'})
assert 'name' in doc
assert doc['name'] == 'soccer'
assert 'attributes' in doc
assert 'b' in doc['attributes']
assert doc['attributes']['b'] == 99
if __name__ == '__main__':
test_get_document()
test_roottree_update()
test_roottree_direct_update()
test_roottree_multiple_simultaneous_direct_updates()
test_roottree_delete_reference()
test_create_new_document()
test_modify_existing_document()
| {
"repo_name": "bkeroack/elita",
"path": "test/test_mongo.py",
"copies": "1",
"size": "6847",
"license": "apache-2.0",
"hash": -6909328713277020000,
"line_mean": 26.0632411067,
"line_max": 105,
"alpha_frac": 0.5764568424,
"autogenerated": false,
"ratio": 3.2918269230769233,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4368283765476923,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bkeroack'
import shutil
import os
import os.path
import sys
import stat
import sh
from sh import useradd, chown, chmod
import logging
from clint.textui import puts, colored, indent
ELITA_HOME = "/home/elita"
ELITA_LOG_DIR = "/var/log/elita"
ELITA_ETC = "/etc/elita"
ELITA_INITD = "/etc/init.d/elita"
ELITA_DEFAULTS = "/etc/default/elita"
ELITA_DATADIR = "/var/lib/elita"
ELITA_GITDEPLOY = "{}/gitdeploy".format(ELITA_DATADIR)
ELITA_BUILDS = "{}/builds".format(ELITA_DATADIR)
LOGROTATE_DIR = "/etc/logrotate.d"
def get_root_dir():
return os.path.abspath(os.path.dirname(__file__))
def cp_file_checkperms(src, dest):
logging.debug("copying {} to {}".format(src, dest))
try:
shutil.copyfile(src, dest)
except IOError:
puts(colored.red("IO error (Insufficient permissions?)"))
sys.exit(1)
def mk_dir(dirname):
if not os.path.isdir(dirname):
try:
os.mkdir(dirname)
except IOError:
puts(colored.red("IO error (Insufficient permissions?)"))
sys.exit(1)
def cp_prod_ini_posix():
ini_location = os.path.join(get_root_dir(), "util", "elita.ini")
cp_file_checkperms(ini_location, os.path.join(ELITA_ETC, "elita.ini"))
def cp_initd_defaults():
defaults_location = os.path.join(get_root_dir(), "util", "init.d-defaults")
cp_file_checkperms(defaults_location, ELITA_DEFAULTS)
def cp_logrotate():
if os.path.isdir(LOGROTATE_DIR):
logrotate_location = os.path.join(get_root_dir(), "util", "logrotate")
cp_file_checkperms(logrotate_location, os.path.join(LOGROTATE_DIR, "elita"))
else:
puts(colored.yellow("Logrotate directory not found!"))
def chmod_ax_initd():
os.chmod(ELITA_INITD, stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
def chown_dir(dirname):
chown("elita:elita", dirname, R=True) # use shell chown so we don't have to get the numeric UID/GID for 'elita'
def create_user_and_group():
try:
useradd("elita", s="/bin/false", d=ELITA_HOME, G='root') # root group so elita can run salt
except:
puts(colored.red("Error creating user/group elita!"))
def setup_nginx():
nginx_path = '/etc/nginx/sites-available'
elita_nginx_location = os.path.join(nginx_path, 'elita')
puts('\n')
with indent(4):
if os.path.isdir(nginx_path):
nginx_conf_location = os.path.join(get_root_dir(), "util", "nginx.conf")
cp_file_checkperms(nginx_conf_location, elita_nginx_location)
if not os.path.isdir('/etc/nginx/ssl'):
mk_dir('/etc/nginx/ssl')
puts(colored.magenta('Example nginx configuration copied to: {}'.format(elita_nginx_location)))
puts(colored.magenta('To use:'))
with indent(2, quote=colored.magenta('* ')):
puts(colored.magenta('Add a symlink in /etc/nginx/sites-enabled'))
puts(colored.magenta('Put your SSL certificate and key in /etc/nginx/ssl'))
puts(colored.magenta('Restart nginx'))
puts(colored.magenta('Elita will then be listening on port 2719 via SSL'))
else:
puts(colored.yellow('nginx not found. Install nginx and re-run'))
puts('\n')
def create_salt_dirs():
mk_dir('/srv')
mk_dir('/srv/salt')
mk_dir('/srv/salt/elita')
mk_dir('/srv/salt/elita/files')
mk_dir('/srv/salt/elita/files/win')
mk_dir('/srv/salt/elita/files/linux')
mk_dir('/srv/pillar')
chown("elita:elita", "/srv/pillar", R=True)
chown("elita:elita", "/srv/salt", R=True)
def copy_salt_files():
git_setup_location = os.path.join(get_root_dir(), "util", "git_wrapper_setup.ps1")
cp_file_checkperms(git_setup_location, '/srv/salt/elita/files/win/git_wrapper_setup.ps1')
def create_data_dirs():
mk_dir(ELITA_DATADIR)
mk_dir(ELITA_GITDEPLOY)
mk_dir(ELITA_BUILDS)
chown("elita:elita", ELITA_DATADIR, R=True)
def do_step(msg, func, params=[]):
puts(msg + " ... ", newline=False)
func(*params)
puts(colored.green("DONE"))
def add_salt_client_acl():
#find salt config
#if exists, load it
#deserialize the yaml
#add/modify client_acl
#serialize
#save
puts(colored.red("STUB"))
def InstallUbuntu():
puts("OS Flavor: Ubuntu")
do_step("Creating user and group 'elita'", create_user_and_group)
do_step("Creating log directory: {}".format(ELITA_LOG_DIR), mk_dir, [ELITA_LOG_DIR])
do_step("Setting ownership on log directory", chown_dir, [ELITA_LOG_DIR])
do_step("Creating data directories", create_data_dirs)
do_step("Copying logrotate script", cp_logrotate)
do_step("Creating config directory: {}".format(ELITA_ETC), mk_dir, [ELITA_ETC])
do_step("Setting ownership on config directory", chown_dir, [ELITA_ETC])
do_step("Creating running directory: {}".format(ELITA_HOME), mk_dir, [ELITA_HOME])
do_step("Creating Python egg cache directory", mk_dir, [os.path.join(ELITA_HOME, ".python-eggs")])
do_step("Setting ownership on running directory", chown_dir, [ELITA_HOME])
do_step("Copying ini", cp_prod_ini_posix)
do_step("Copying init.d defaults", cp_initd_defaults)
initd_location = os.path.join(get_root_dir(), "util", "init.d-elita")
do_step("Copying init.d script", cp_file_checkperms, [initd_location, ELITA_INITD])
do_step("Making init.d script executable", chmod_ax_initd)
do_step("Creating salt base dirs if necessary", create_salt_dirs)
do_step("Copying salt distributed files", copy_salt_files)
do_step("Adding elita to salt client_acl", add_salt_client_acl)
do_step("Setting up example nginx config", setup_nginx)
puts("Starting service...")
init_d = sh.Command(ELITA_INITD)
init_d("start")
puts(colored.yellow('Elita started and listening on http://localhost:2718/'))
puts(colored.green("Done!"))
def InstallLinux():
return InstallUbuntu() | {
"repo_name": "bkeroack/elita",
"path": "elita_install/linux.py",
"copies": "1",
"size": "5930",
"license": "apache-2.0",
"hash": 3686449027852783600,
"line_mean": 32.1340782123,
"line_max": 116,
"alpha_frac": 0.6507588533,
"autogenerated": false,
"ratio": 3.028600612870276,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4179359466170276,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bkeroack'
import sys
import logging
import traceback
import time
import billiard
import itertools
import pprint
pp = pprint.PrettyPrinter(indent=4)
import elita.util
import elita.util.type_check
import gitservice
import salt_control
from elita.actions.action import regen_datasvc
class FatalDeploymentError(Exception):
pass
#async callable
def run_deploy(datasvc, application, build_name, target, rolling_divisor, rolling_pause, ordered_pause, deployment_id):
'''
Asynchronous entry point for deployments
'''
# normally there's a higher level try/except block for all async actions
# we want to make sure the error is saved in the deployment object as well, not just the job
# so we duplicate the functionality here
try:
if target['groups']:
logging.debug("run_deploy: Doing rolling deployment")
dc = DeployController(datasvc, deployment_id)
rdc = RollingDeployController(datasvc, dc, deployment_id)
ret = rdc.run(application, build_name, target, rolling_divisor, rolling_pause, ordered_pause)
else:
logging.debug("run_deploy: Doing manual deployment")
dc = DeployController(datasvc, deployment_id)
ret, data = dc.run(application, build_name, target['servers'], target['gitdeploys'])
except:
exc_type, exc_obj, tb = sys.exc_info()
f_exc = traceback.format_exception(exc_type, exc_obj, tb)
results = {
"error": "unhandled exception during callable!",
"exception": f_exc
}
logging.debug("run_deploy: EXCEPTION: {}".format(f_exc))
datasvc.deploysvc.UpdateDeployment(application, deployment_id, {"status": "error"})
return {"deploy_status": "error", "details": results}
datasvc.deploysvc.CompleteDeployment(application, deployment_id)
datasvc.deploysvc.UpdateDeployment(application, deployment_id, {"status": "complete" if ret else "error"})
return {"deploy_status": "done" if ret else "error"}
class BatchCompute:
'''
Given a list of application groups that require rolling deployment and an (optional) list that do not,
compute the optimal batches of server/gitdeploy pairs. All non-rolling groups are added to the first batch.
Splitting algorithm is tolerant of outrageously large rolling_divisors.
Written in a functional style to facilitate testing.
'''
@staticmethod
def add_nonrolling_groups(batches, nonrolling_docs):
'''
Add servers and gitdeploys from nonrolling groups to the first batch
Not written in a functional style because that was totally unreadable
'''
if nonrolling_docs and len(nonrolling_docs) > 0:
assert all(map(lambda x: 'servers' in x and 'gitdeploys' in x, batches)) or not batches
assert all(map(lambda x: 'servers' in x[1] and 'gitdeploys' in x[1], nonrolling_docs.iteritems()))
non_rolling_batches = list()
for g in nonrolling_docs:
servers = nonrolling_docs[g]['servers']
gitdeploys = nonrolling_docs[g]['gitdeploys']
ordered = isinstance(nonrolling_docs[g]['gitdeploys'][0], list)
if ordered:
for i, gdb in enumerate(nonrolling_docs[g]['gitdeploys']):
if i > len(non_rolling_batches)-1:
non_rolling_batches.append({'gitdeploys': gdb, 'servers': servers})
else:
non_rolling_batches[i]['servers'] = list(set(servers + non_rolling_batches[i]['servers']))
non_rolling_batches[i]['gitdeploys'] = list(set(gdb + non_rolling_batches[i]['gitdeploys']))
if i == len(nonrolling_docs[g]['gitdeploys'])-1:
non_rolling_batches[i]['ordered_gitdeploy'] = False
else:
non_rolling_batches[i]['ordered_gitdeploy'] = True
else:
non_rolling_batches.append({'gitdeploys': gitdeploys, 'servers': servers, 'ordered_gitdeploy': False})
for i, nrb in enumerate(non_rolling_batches):
if i > len(batches)-1:
batches.append(nrb)
else:
batches[i]['servers'] = list(set(nrb['servers'] + batches[i]['servers']))
batches[i]['gitdeploys'] = list(set(nrb['gitdeploys'] + batches[i]['gitdeploys']))
batches[i]['ordered_gitdeploy'] = nrb['ordered_gitdeploy']
return batches
@staticmethod
def dedupe_batches(batches):
'''
Dedupe servers and gitdeploys list in the combined batches list:
[
{ "servers": [ "server1", "server1", ...], "gitdeploys": [ "gd1", "gd1", ...] }, #batch 0 (all groups)
{ "servers": [ "server1", "server1", ...], "gitdeploys": [ "gd1", "gd1", ...] }, #batch 1 (all groups)
...
]
'''
assert len(batches) > 0
assert all(map(lambda x: 'servers' in x and 'gitdeploys' in x, batches))
return map(lambda x: {"servers": list(set(x['servers'])),
"gitdeploys": list(set(elita.util.flatten_list(x['gitdeploys']))),
"ordered_gitdeploy": x['ordered_gitdeploy']}, batches)
@staticmethod
def reduce_group_batches(accumulated, update):
assert 'servers' in accumulated and 'servers' in update
assert 'gitdeploys' in accumulated and 'gitdeploys' in update
return {
"servers": accumulated['servers'] + update['servers'],
"gitdeploys": accumulated['gitdeploys'] + update['gitdeploys']
}
@staticmethod
def coalesce_batches(batches):
'''
Combine the big list of batches into a single list.
Function is passed a list of lists:
[
[ { "servers": [...], "gitdeploys": [...] }, ... ], # batches 0-n for group A
[ { "servers": [...], "gitdeploys": [...] }, ... ], # batches 0-n for broup B
...
]
Each nested list represents the computed batches for an individual group. All nested lists are expected to be
the same length.
'''
if not batches:
return list()
return map(
lambda batch_aggregate: reduce(
lambda acc, upd:
{
'servers': acc['servers'] + upd['servers'],
'gitdeploys': acc['gitdeploys'] + upd['gitdeploys'],
'ordered_gitdeploy': acc['ordered_gitdeploy'] and upd['ordered_gitdeploy']
}, batch_aggregate
), itertools.izip_longest(*batches, fillvalue={"servers": [], "gitdeploys": [], "ordered_gitdeploy": False}))
@staticmethod
def compute_group_batches(divisor, group):
'''
Compute batches for group.
Group is iteritems() result from group dict. group[0] is key (name), group[1] is dict of servers/gitdeploys
return list of dicts: [ { 'servers': [...], 'gitdeploys': [...] }, ... ]
'''
assert len(group) == 2
assert 'servers' in group[1]
assert 'gitdeploys' in group[1]
servers = group[1]['servers']
gitdeploys = group[1]['gitdeploys']
server_batches = elita.util.split_seq(servers, divisor)
gd_multiplier = len(server_batches) # gitdeploy_batches multipler
ordered = isinstance(gitdeploys[0], list)
if ordered:
# duplicate all server batches by the length of the gitdeploy list-of-lists
server_batches = [x for item in server_batches for x in itertools.repeat(item, len(gitdeploys))]
gitdeploy_batches = list(gitdeploys) * gd_multiplier
ordered_flags = [True] * (len(gitdeploys) - 1)
ordered_flags.append(False)
ordered_flags = ordered_flags * gd_multiplier
else:
gitdeploy_batches = [gitdeploys] * gd_multiplier
ordered_flags = [False] * gd_multiplier
assert len(gitdeploy_batches) == len(server_batches)
batches = [{'servers': sb, 'gitdeploys': gd, 'ordered_gitdeploy': of}
for sb, gd, of in zip(server_batches, gitdeploy_batches, ordered_flags)]
return batches
@staticmethod
def compute_rolling_batches(divisor, rolling_group_docs, nonrolling_group_docs):
assert isinstance(divisor, int)
assert elita.util.type_check.is_optional_dict(rolling_group_docs)
assert not rolling_group_docs or all(map(lambda x: 'servers' in x[1] and 'gitdeploys' in x[1], rolling_group_docs.iteritems()))
return BatchCompute.dedupe_batches(
BatchCompute.add_nonrolling_groups(
BatchCompute.coalesce_batches(
map(lambda x: BatchCompute.compute_group_batches(divisor, x), rolling_group_docs.iteritems() if rolling_group_docs else tuple())
), nonrolling_group_docs
)
)
class RollingDeployController:
'''
Break deployment up into server/gitdeploy batches, then invoke DeployController with each batch sequentially
'''
__metaclass__ = elita.util.LoggingMetaClass
def __init__(self, datasvc, deploy_controller, deployment_id):
'''
@type datasvc: models.DataService
'''
self.datasvc = datasvc
self.dc = deploy_controller
self.deployment_id = deployment_id
def get_nonrolling_groups(self, rolling_groups, all_groups):
return list(set(all_groups) - set(rolling_groups))
def compute_batches(self, rolling_group_docs, nonrolling_group_docs, rolling_divisor):
return BatchCompute.compute_rolling_batches(rolling_divisor, rolling_group_docs, nonrolling_group_docs)
def run_hook(self, name, application, build_name, batches, batch_number=None, target=None):
args = {
"hook_parameters": {
"deployment_id": self.deployment_id,
"build": build_name
}
}
if name == "AUTO_DEPLOYMENT_START":
args['hook_parameters']['target'] = target
args['hook_parameters']['batches'] = batches
if name == "AUTO_DEPLOYMENT_COMPLETE" or name == "AUTO_DEPLOYMENT_FAILED":
args['hook_parameters']['deployment'] = self.datasvc.deploysvc.GetDeployment(application,
self.deployment_id)
args['hook_parameters']['batches'] = batches
if "AUTO_DEPLOYMENT_BATCH" in name:
args['hook_parameters']['batch_number'] = batch_number
args['hook_parameters']['batch_count'] = len(batches)
args['hook_parameters']['batch'] = batches[batch_number]
self.datasvc.actionsvc.hooks.run_hook(application, name, args)
def run(self, application, build_name, target, rolling_divisor, rolling_pause, ordered_pause, parallel=True):
'''
Run rolling deployment. This should be called iff the deployment is called via groups/environments
'''
groups = target['groups']
rolling_groups = [g for g in groups if self.datasvc.groupsvc.GetGroup(application, g)['rolling_deploy']]
rolling_group_docs = {g: self.datasvc.groupsvc.GetGroup(application, g) for g in rolling_groups}
nonrolling_groups = self.get_nonrolling_groups(rolling_groups, groups)
nonrolling_group_docs = {g: self.datasvc.groupsvc.GetGroup(application, g) for g in nonrolling_groups}
gd_docs = [self.datasvc.gitsvc.GetGitDeploy(application, gd) for gd in target['gitdeploys']]
gitrepos = [gd['location']['gitrepo']['name'] for gd in gd_docs]
batches = self.compute_batches(rolling_group_docs, nonrolling_group_docs, rolling_divisor)
logging.debug("computed batches: {}".format(batches))
#run pre hook
self.run_hook("AUTO_DEPLOYMENT_START", application, build_name, batches, target=target)
self.datasvc.deploysvc.InitializeDeploymentPlan(application, self.deployment_id, batches, gitrepos)
self.datasvc.jobsvc.NewJobData({
"RollingDeployment": {
"batches": len(batches),
"batch_data": batches
}
})
for i, b in enumerate(batches):
logging.debug("doing DeployController.run: deploy_gds: {}".format(b['gitdeploys']))
#run start hook
self.run_hook("AUTO_DEPLOYMENT_BATCH_BEGIN", application, build_name, batches, batch_number=i)
ok, results = self.dc.run(application, build_name, b['servers'], b['gitdeploys'],
parallel=parallel, batch_number=i)
if not ok:
self.datasvc.jobsvc.NewJobData({"RollingDeployment": "error"})
self.run_hook("AUTO_DEPLOYMENT_FAILED", application, build_name, batches)
return False
#run batch done hook
self.run_hook("AUTO_DEPLOYMENT_BATCH_DONE", application, build_name, batches, batch_number=i)
deploy_doc = self.datasvc.deploysvc.GetDeployment(application, self.deployment_id)
assert deploy_doc
if deploy_doc['status'] == 'error':
self.datasvc.jobsvc.NewJobData({"message": "detected failed deployment so aborting further batches"})
self.datasvc.jobsvc.NewJobData({"RollingDeployment": "error"})
self.run_hook("AUTO_DEPLOYMENT_FAILED", application, build_name, batches)
return False
if i != (len(batches)-1):
pause = ordered_pause if b['ordered_gitdeploy'] else rolling_pause
msg = "pausing for {} seconds between batches ({})".format(pause,
"ordered" if b['ordered_gitdeploy']
else "batch complete")
self.datasvc.jobsvc.NewJobData({"RollingDeployment": msg})
logging.debug("RollingDeployController: {}".format(msg))
time.sleep(pause)
#run post hook
self.run_hook("AUTO_DEPLOYMENT_COMPLETE", application, build_name, batches, target=target)
return True
def determine_deployabe_servers(all_gd_servers, specified_servers):
return list(set(all_gd_servers).intersection(set(specified_servers)))
def _threadsafe_process_gitdeploy(gddoc, build_doc, settings, job_id, deployment_id):
'''
Threadsafe function for processing a single gitdeploy during a deployment.
Creates own instance of datasvc, etc.
'''
package = gddoc['package']
package_doc = build_doc['packages'][package]
client, datasvc = regen_datasvc(settings, job_id)
gdm = gitservice.GitDeployManager(gddoc, datasvc)
gitrepo_name = gddoc['location']['gitrepo']['name']
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
progress=10,
step='Checking out default branch')
try:
res = gdm.checkout_default_branch()
except:
exc_msg = str(sys.exc_info()[1]).split('\n')
exc_msg.insert(0, "ERROR: checkout_default_branch")
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
step=exc_msg)
return
logging.debug("_threadsafe_process_gitdeploy: git checkout output: {}".format(str(res)))
if gdm.last_build == build_doc['build_name']:
datasvc.jobsvc.NewJobData({"ProcessGitdeploys": {gitrepo_name: "already processed"}})
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
progress=100,
step='Complete (already processed)')
else:
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
progress=25,
step='Decompressing package to repository')
datasvc.jobsvc.NewJobData({"ProcessGitdeploys": {gddoc['name']: "processing"}})
try:
gdm.decompress_to_repo(package_doc)
except:
exc_msg = str(sys.exc_info()[1]).split('\n')
exc_msg.insert(0, "ERROR: decompress_to_repo")
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
step=exc_msg)
return
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
progress=50,
step='Checking for changes')
logging.debug("_threadsafe_process_gitdeploy: Checking for changes")
try:
res = gdm.check_repo_status()
except:
exc_msg = str(sys.exc_info()[1]).split('\n')
exc_msg.insert(0, "ERROR: check_repo_status")
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
step=exc_msg)
return
logging.debug("_threadsafe_process_gitdeploy: git status results: {}".format(str(res)))
if "nothing to commit" in res:
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
progress=100,
step='Complete (no changes found)')
datasvc.jobsvc.NewJobData({"ProcessGitdeploys": {gddoc['name']: "no changes"}})
else:
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
progress=60,
step='Adding changes to repository')
datasvc.jobsvc.NewJobData({"ProcessGitdeploys": {gddoc['name']: "adding to repository"}})
try:
res = gdm.add_files_to_repo()
except:
exc_msg = str(sys.exc_info()[1]).split('\n')
exc_msg.insert(0, "ERROR: add_files_to_repo")
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
step=exc_msg)
return
logging.debug("_threadsafe_process_gitdeploy: git add result: {}".format(str(res)))
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
progress=70,
step='Committing changes to repository')
datasvc.jobsvc.NewJobData({"ProcessGitdeploys": {gddoc['name']: "committing"}})
try:
res = gdm.commit_to_repo(build_doc['build_name'])
except:
exc_msg = str(sys.exc_info()[1]).split('\n')
exc_msg.insert(0, "ERROR: commit_to_repo")
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
step=exc_msg)
return
logging.debug("_threadsafe_process_gitdeploy: git commit result: {}".format(str(res)))
try:
commit_hash = gdm.get_latest_commit_hash()
datasvc.deploysvc.UpdateDeployment(gddoc['application'], deployment_id,
{'commits': {gitrepo_name: str(commit_hash)}})
logging.debug("_threadsafe_process_gitdeploy: git commit hash: {}".format(str(commit_hash)))
except:
exc_msg = str(sys.exc_info()[1]).split('\n')
exc_msg.insert(0, "ERROR: get_commit_hash")
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
step=exc_msg)
datasvc.jobsvc.NewJobData({"ProcessGitdeploys": {gddoc['name']: "checking diff"}})
try:
res = gdm.inspect_latest_diff()
except:
exc_msg = str(sys.exc_info()[1]).split('\n')
exc_msg.insert(0, "ERROR: inspect_latest_diff")
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
step=exc_msg)
return
logging.debug("_threadsafe_process_gitdeploy: inspect diff result: {}".format(str(res)))
# change to a list of dicts without filenames as keys to keep mongo happy
changed_files = [{
'filename': k,
'deletions': res[k]['deletions'],
'lines': res[k]['lines'],
'insertions': res[k]['insertions']
} for k in res]
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
changed_files=changed_files)
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
progress=90,
step='Pushing changes to gitprovider')
datasvc.jobsvc.NewJobData({"ProcessGitdeploys": {gddoc['name']: "pushing"}})
try:
res = gdm.push_repo()
except:
exc_msg = str(sys.exc_info()[1]).split('\n')
exc_msg.insert(0, "ERROR: push_repo")
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
step=exc_msg)
return
logging.debug("_threadsafe_process_gitdeploy: git push result: {}".format(str(res)))
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
progress=100,
step='Complete')
try:
gdm.update_repo_last_build(build_doc['build_name'])
except:
exc_msg = str(sys.exc_info()[1]).split('\n')
exc_msg.insert(0, "ERROR: update_repo_last_build")
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
step=exc_msg)
return
def _threadsafe_pull_callback(results, tag, **kwargs):
'''
Passed to run_slses_async and is used to provide realtime updates to users polling the deploy job object
'''
try:
assert all([arg in kwargs for arg in ('datasvc', 'application', 'deployment_id', 'batch_number', 'gitdeploy')])
except AssertionError:
#can't log anything to the job object because we may not have a valid DataService instance
logging.error("***************** _threadsafe_pull_callback: AssertionError: incorrect kwargs ****************")
return
datasvc = kwargs['datasvc']
app = kwargs['application']
deployment_id = kwargs['deployment_id']
batch_number = kwargs['batch_number']
gitdeploy = kwargs['gitdeploy']
try:
datasvc.jobsvc.NewJobData({"DeployServers": {"results": results, "tag": tag}})
for r in results:
#callback results always have a 'ret' key but underneath it may be a simple string or a big complex object
#for state call results. We have to unpack the state results if necessary
this_result = results[r]['ret']
datasvc.jobsvc.NewJobData(this_result)
if elita.util.type_check.is_dictlike(this_result): # state result
for state_res in this_result:
if "result" in this_result[state_res]:
state_comment = this_result[state_res]['comment'] if 'comment' in this_result[state_res] else state_res
stdout = this_result[state_res]["changes"]["stdout"] if 'changes' in this_result[state_res] and 'stdout' in this_result[state_res]['changes'] else "(none)"
stderr = this_result[state_res]["changes"]["stderr"] if 'changes' in this_result[state_res] and 'stderr' in this_result[state_res]['changes'] else "(none)"
if not this_result[state_res]["result"]: #error
logging.debug("_threadsafe_pull_callback: got error result ({}; {})".format(gitdeploy, r))
datasvc.jobsvc.NewJobData({'status': 'error', 'message': 'failing deployment due to detected error'})
datasvc.deploysvc.UpdateDeployment_Phase2(app, deployment_id, gitdeploy, [r], batch_number,
state="FAILURE: {}; stderr: {}; stdout: {}".format(state_comment, stderr, stdout))
datasvc.deploysvc.FailDeployment(app, deployment_id)
else:
logging.debug("_threadsafe_pull_callback: got successful result ({}; {}): {}".format(gitdeploy, r, state_comment))
datasvc.deploysvc.UpdateDeployment_Phase2(app, deployment_id, gitdeploy, [r], batch_number,
state=state_comment,
progress=66)
else: # simple result
logging.debug("_threadsafe_pull_callback: got simple return instead of results ({}; {})".format(gitdeploy, r))
datasvc.deploysvc.UpdateDeployment_Phase2(app, deployment_id, gitdeploy, [r], batch_number,
state="simple return: {}".format(results[r]['ret']))
except:
exc_type, exc_obj, tb = sys.exc_info()
datasvc.jobsvc.NewJobData({"_threadsafe_pull_callback EXCEPTION": traceback.format_exception(exc_type, exc_obj, tb)})
datasvc.deploysvc.FailDeployment(app, deployment_id)
def _threadsafe_pull_gitdeploy(application, gitdeploy_struct, queue, settings, job_id, deployment_id, batch_number):
'''
Thread-safe way of performing a deployment SLS call for one specific gitdeploy on a group of servers
gitdeploy_struct: { "gitdeploy_name": [ list_of_servers_to_deploy_to ] }
'''
# Wrap in a big try/except so we can log any failures in phase2 progress and fail the deployment
try:
assert settings
assert job_id
assert gitdeploy_struct
client, datasvc = regen_datasvc(settings, job_id)
gd_name = gitdeploy_struct.keys()[0]
servers = gitdeploy_struct[gd_name]
except:
exc_msg = str(sys.exc_info()[1]).split('\n')
logging.error("************* _threadsafe_pull_gitdeploy: preamble: {} *********************".format(exc_msg))
return
try:
assert application
assert queue
assert deployment_id
assert all([elita.util.type_check.is_string(gd) for gd in gitdeploy_struct])
assert all([elita.util.type_check.is_seq(gitdeploy_struct[gd]) for gd in gitdeploy_struct])
assert isinstance(batch_number, int) and batch_number >= 0
sc = salt_control.SaltController(datasvc)
rc = salt_control.RemoteCommands(sc)
assert len(gitdeploy_struct) == 1
datasvc.deploysvc.UpdateDeployment_Phase2(application, deployment_id, gd_name, servers, batch_number,
progress=10,
state="Beginning deployment")
#until salt Helium is released, we can only execute an SLS *file* as opposed to a single module call
sls_map = {sc.get_gitdeploy_entry_name(application, gd_name): servers}
if len(servers) == 0:
datasvc.jobsvc.NewJobData({"DeployServers": {gd_name: "no servers"}})
return True
gd_doc = datasvc.gitsvc.GetGitDeploy(application, gd_name)
branch = gd_doc['location']['default_branch']
path = gd_doc['location']['path']
#verify that we have salt connectivity to the target. Do three consecutive test.pings with 10 second timeouts
#if all target servers don't respond by the last attempt, fail deployment
i = 1
while True:
datasvc.deploysvc.UpdateDeployment_Phase2(application, deployment_id, gd_name, servers, batch_number,
progress=15,
state="Verifying salt connectivity (try: {})".format(i))
res = rc.ping(servers)
if all([s in res for s in servers]):
logging.debug("_threadsafe_process_gitdeploy: verify salt: all servers returned (try: {})".format(i))
break
else:
missing_servers = list(set(servers) - set(res.keys()))
logging.debug("_threadsafe_process_gitdeploy: verify salt: error: servers missing: {} (try {})".format(missing_servers, i))
if i >= 3:
datasvc.deploysvc.UpdateDeployment_Phase2(application, deployment_id, gd_name, missing_servers, batch_number,
progress=15,
state="ERROR: no salt connectivity!".format(i))
datasvc.deploysvc.FailDeployment(application, deployment_id)
logging.error("No salt connectivity to servers: {} (after {} tries)".format(missing_servers, i))
return False
i += 1
#delete stale git index lock if it exists
datasvc.deploysvc.UpdateDeployment_Phase2(application, deployment_id, gd_name, servers, batch_number,
progress=25,
state="Removing git index lock if it exists")
res = rc.rm_file_if_exists(servers, "{}/.git/index.lock".format(path))
logging.debug("_threadsafe_process_gitdeploy: delete git index lock results: {}".format(str(res)))
#clear uncommitted changes on targets
datasvc.deploysvc.UpdateDeployment_Phase2(application, deployment_id, gd_name, servers, batch_number,
progress=33,
state="Clearing uncommitted changes")
res = rc.discard_git_changes(servers, path)
logging.debug("_threadsafe_process_gitdeploy: discard git changes result: {}".format(str(res)))
res = rc.checkout_branch(servers, path, branch)
logging.debug("_threadsafe_process_gitdeploy: git checkout result: {}".format(str(res)))
datasvc.jobsvc.NewJobData({"DeployServers": {gd_name: "deploying", "servers": servers}})
logging.debug("_threadsafe_pull_gitdeploy: sls_map: {}".format(sls_map))
datasvc.deploysvc.UpdateDeployment_Phase2(application, deployment_id, gd_name, servers, batch_number,
progress=50,
state="Issuing state commands (git pull, etc)")
res = rc.run_slses_async(_threadsafe_pull_callback, sls_map, args={'datasvc': datasvc, 'application': application,
'deployment_id': deployment_id,
'batch_number': batch_number,
'gitdeploy': gd_name})
logging.debug("_threadsafe_pull_gitdeploy: results: {}".format(res))
errors = dict()
successes = dict()
for r in res:
for host in r:
for cmd in r[host]['ret']:
if "gitdeploy" in cmd:
if "result" in r[host]['ret'][cmd]:
if not r[host]['ret'][cmd]["result"]:
errors[host] = r[host]['ret'][cmd]["changes"] if "changes" in r[host]['ret'][cmd] else r[host]['ret'][cmd]
else:
if host not in successes:
successes[host] = dict()
module, state, command, subcommand = str(cmd).split('|')
if state not in successes[host]:
successes[host][state] = dict()
successes[host][state][command] = {
"stdout": r[host]['ret'][cmd]["changes"]["stdout"],
"stderr": r[host]['ret'][cmd]["changes"]["stderr"],
"retcode": r[host]['ret'][cmd]["changes"]["retcode"],
}
if len(errors) > 0:
for e in errors:
datasvc.deploysvc.UpdateDeployment_Phase2(application, deployment_id, gd_name, [e], batch_number,
state="ERROR: {}".format(errors[e]))
logging.debug("_threadsafe_pull_gitdeploy: SLS error servers: {}".format(errors.keys()))
logging.debug("_threadsafe_pull_gitdeploy: SLS error responses: {}".format(errors))
if len(successes) > 0:
datasvc.deploysvc.UpdateDeployment_Phase2(application, deployment_id, gd_name, successes.keys(), batch_number,
progress=100, state="Complete")
missing = list(set([host for r in res for host in r]).difference(set(servers)))
if missing:
datasvc.deploysvc.UpdateDeployment_Phase2(application, deployment_id, gd_name, missing, batch_number,
state="ERROR: no results (timed out waiting for salt?)")
logging.debug("_threadsafe_pull_gitdeploy: error: empty results for: {}; possible salt timeout".format(missing))
datasvc.jobsvc.NewJobData({"_threadsafe_pull_gitdeploy": "empty results for {}".format(missing)})
datasvc.deploysvc.FailDeployment(application, deployment_id)
deploy_results = {
gd_name: {
"raw_results": res,
"errors": len(errors) > 0,
"error_results": errors,
"successes": len(successes) > 0,
"success_results": successes
}
}
queue.put_nowait(deploy_results)
datasvc.jobsvc.NewJobData({
"DeployServers": deploy_results
})
logging.debug("_threadsafe_pull_gitdeploy: finished ({})".format(gitdeploy_struct))
except:
exc_type, exc_obj, tb = sys.exc_info()
exc_msg = "ERROR: Exception in _threadsafe_pull_gitdeploy: {}".format(traceback.format_exception(exc_type, exc_obj, tb))
datasvc.deploysvc.UpdateDeployment_Phase2(application, deployment_id, gd_name, servers, batch_number,
state=exc_msg)
datasvc.deploysvc.FailDeployment(application, deployment_id)
class DeployController:
'''
Class that runs deploys. Only knows about server/gitdeploy pairs, so is used for both manual-style deployments
and group/environment deployments.
'''
__metaclass__ = elita.util.LoggingMetaClass
def __init__(self, datasvc, deployment_id):
self.deployment_id = deployment_id
self.datasvc = datasvc
def run(self, app_name, build_name, servers, gitdeploys, parallel=True, batch_number=0):
'''
1. Decompress build to gitdeploy dir and push
a. Attempts to optimize by determining if build has already been decompressed to gitdeploy and skips if so
2. Determine which gitdeploys have changes (if any)
a. Build a mapping of gitdeploys_with_changes -> [ servers_to_deploy_it_to ]
b. Perform the state calls only to the server/gitdeploy pairs that have changes
@type app_name: str
@type build_name: str
@type servers: list(str)
@type gitdeploys: list(str)
'''
assert app_name and build_name and servers and gitdeploys
assert elita.util.type_check.is_string(app_name)
assert elita.util.type_check.is_string(build_name)
assert elita.util.type_check.is_seq(servers)
assert elita.util.type_check.is_seq(gitdeploys)
assert isinstance(batch_number, int) and batch_number >= 0
build_doc = self.datasvc.buildsvc.GetBuild(app_name, build_name)
gitdeploy_docs = {gd: self.datasvc.gitsvc.GetGitDeploy(app_name, gd) for gd in gitdeploys}
queue = billiard.Queue()
procs = list()
#we need to get a list of gitdeploys with unique gitrepos, so build a reverse mapping
gitrepo_gitdeploy_mapping = {gitdeploy_docs[gd]['location']['gitrepo']['name']: gd for gd in gitdeploys}
self.datasvc.deploysvc.StartDeployment_Phase(app_name, self.deployment_id, 1)
for gr in gitrepo_gitdeploy_mapping:
gd = gitrepo_gitdeploy_mapping[gr]
gddoc = gitdeploy_docs[gd]
if parallel:
p = billiard.Process(target=_threadsafe_process_gitdeploy, name=gd,
args=(gddoc, build_doc, self.datasvc.settings,
self.datasvc.job_id, self.deployment_id))
p.start()
procs.append(p)
else:
_threadsafe_process_gitdeploy(gddoc, build_doc, self.datasvc.settings,
self.datasvc.job_id, self.deployment_id)
if parallel:
error = False
for p in procs:
p.join(150)
if p.is_alive():
p.terminate()
logging.error("ERROR: _threadsafe_process_gitdeploy: timeout waiting for child process ({})!".
format(p.name))
self.datasvc.jobsvc.NewJobData({'status': 'error',
'message': 'timeout waiting for child process (process_gitdeploy: {}'.format(p.name)})
self.datasvc.deploysvc.UpdateDeployment_Phase1(app_name, self.deployment_id, p.name,
step="ERROR: timed out waiting for child process")
error = True
if p.exitcode < 0 or p.exitcode > 0:
msg = "process killed by signal {}!".format(abs(p.exitcode)) if p.exitcode < 0 \
else "process died with exit code {}".format(p.exitcode)
logging.error("_threadsafe_process_gitdeploy: {}".format(msg))
self.datasvc.jobsvc.NewJobData({'status': 'error',
'message': '{} (process_gitdeploy: {}'.format(msg, p.name)})
self.datasvc.deploysvc.UpdateDeployment_Phase1(app_name, self.deployment_id, p.name,
step="ERROR: {}".format(msg))
error = True
if error:
self.datasvc.deploysvc.FailDeployment(app_name, self.deployment_id)
return False, None
servers_by_gitdeploy = {gd: determine_deployabe_servers(gitdeploy_docs[gd]['servers'], servers) for gd in gitdeploy_docs}
queue = billiard.Queue()
procs = list()
self.datasvc.deploysvc.StartDeployment_Phase(app_name, self.deployment_id, 2)
for gd in servers_by_gitdeploy:
if parallel:
p = billiard.Process(target=_threadsafe_pull_gitdeploy, name=gd,
args=(app_name, {gd: servers_by_gitdeploy[gd]}, queue, self.datasvc.settings,
self.datasvc.job_id, self.deployment_id, batch_number))
p.start()
procs.append(p)
else:
_threadsafe_pull_gitdeploy(app_name, {gd: servers_by_gitdeploy[gd]}, queue, self.datasvc.settings,
self.datasvc.job_id, self.deployment_id, batch_number)
# pull from queue prior to joining to avoid deadlock
results = list()
i = 0
while i < len(procs):
results.append(queue.get(150))
i += 1
if parallel:
error = False
for p in procs:
p.join(150)
if p.is_alive():
p.terminate()
logging.error("_threadsafe_pull_gitdeploy: timeout waiting for child process ({})!".
format(p.name))
self.datasvc.jobsvc.NewJobData({'status': 'error',
'message': 'timeout waiting for child process (pull_gitdeploy: {}'.format(p.name)})
self.datasvc.deploysvc.UpdateDeployment_Phase2(app_name, self.deployment_id, p.name,
servers_by_gitdeploy[p.name], batch_number,
state="ERROR: timeout waiting for child process")
error = True
if p.exitcode < 0 or p.exitcode > 0:
msg = "process killed by signal {}!".format(abs(p.exitcode)) if p.exitcode < 0 \
else "process died with exit code {}".format(p.exitcode)
logging.error("_threadsafe_pull_gitdeploy: {}".format(msg))
self.datasvc.jobsvc.NewJobData({'status': 'error',
'message': '{} (pull_gitdeploy: {}'.format(msg, p.name)})
self.datasvc.deploysvc.UpdateDeployment_Phase2(app_name, self.deployment_id, p.name,
servers_by_gitdeploy[p.name], batch_number,
state="ERROR: {}".format(msg))
error = True
if error:
self.datasvc.deploysvc.FailDeployment(app_name, self.deployment_id)
return False, None
if not results:
return False, results
for r in results:
for gd in r:
if r[gd]['errors']:
return False, results
#update deployed_build
for gd in gitdeploys:
gdm = gitservice.GitDeployManager(gitdeploy_docs[gd], self.datasvc)
gdm.update_last_deployed(build_name)
return True, results
| {
"repo_name": "bkeroack/elita",
"path": "elita/deployment/deploy.py",
"copies": "1",
"size": "43499",
"license": "apache-2.0",
"hash": 4793380084265116000,
"line_mean": 51.9184914842,
"line_max": 179,
"alpha_frac": 0.5557139245,
"autogenerated": false,
"ratio": 4.2554294658579535,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006227843683667623,
"num_lines": 822
} |
__author__ = 'bk'
from typecheck import typecheck
import typecheck as tc
import collections.abc as abc
import collections
@typecheck
def flatten(list_obj: abc.MutableSequence):
'''
Iterate through n-dimensional list yielding items
'''
for item in list_obj:
if isinstance(item, collections.Iterable) and not isinstance(item, str):
for x in flatten(item):
yield x
else:
yield item
@typecheck
def flatten_list(list_obj: abc.MutableSequence):
'''
Flatten n-dimensional list
'''
return [x for x in flatten(list_obj)]
@typecheck
def merge_sort(list_obj: list) -> tc.optional(list):
'''
Sorts in place. Mutates input argument.
'''
print("Splitting ", list_obj)
if len(list_obj) > 1:
mid = len(list_obj)//2
left = list_obj[:mid]
right = list_obj[mid:]
merge_sort(left)
merge_sort(right)
i, j, k = 0, 0, 0
while i < len(left) and j < len(right):
if left[i] < right[j]:
list_obj[k] = left[i]
i += 1
else:
list_obj[k] = right[j]
j += 1
k += 1
while i < len(left):
list_obj[k] = left[i]
i += 1
k += 1
while j < len(right):
list_obj[k] = right[j]
j += 1
k += 1
print("Merging ", list_obj)
@typecheck
def quick_sort(list_obj: list) -> list:
'''
Returns sorted list
'''
if not list_obj:
return list_obj
else:
pivot = list_obj[0]
lesser = quick_sort([x for x in list_obj[1:] if x < pivot])
greater = quick_sort([x for x in list_obj[1:] if x >= pivot])
return lesser + [pivot] + greater
| {
"repo_name": "bkeroack/py-extlib",
"path": "extlib/seq_tools/__init__.py",
"copies": "1",
"size": "1796",
"license": "apache-2.0",
"hash": 4003092329899896000,
"line_mean": 23.2702702703,
"line_max": 80,
"alpha_frac": 0.5189309577,
"autogenerated": false,
"ratio": 3.5776892430278884,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45966202007278884,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bk'
from typecheck import typecheck
import typecheck as tc
import collections.abc as abc
@typecheck
def filter_dict_keys(obj: abc.MutableMapping, char: str, rep: str):
'''
Recursively replaces char in nested dict keys with rep (for sanitizing input to mongo, for example)
'''
new_obj = obj
for k in new_obj:
if isinstance(k, list):
filter_dict_keys(k, char, rep)
if isinstance(obj, dict):
if isinstance(obj[k], dict):
filter_dict_keys(obj[k], char, rep)
if char in k:
obj[k.replace(char, rep)] = obj[k]
del obj[k]
@typecheck
def insert_node(obj: abc.MutableMapping, path: abc.Sequence, value):
'''
In the dict-like obj (assumed to be a nested set of dicts), walk path and insert value.
'''
for k in path[:-1]:
obj = obj.setdefault(k, {})
obj[path[-1]] = value
@typecheck
def get_paths(obj: abc.MutableMapping, path: tc.optional(abc.MutableSequence)=None):
'''
Given an arbitrarily-nested dict-like object, generate a list of unique tree path tuples.
The last object in any path will be the deepest leaf value in that path.
'''
path = path if path else list()
unique_paths = list()
for i, item in enumerate(obj.items()):
if isinstance(item[1], abc.Mapping):
for nested_item in get_paths(item[1], path=path+[item[0]]):
unique_paths.append(nested_item)
else:
unique_paths.append(tuple(path + [item[0]] + [item[1]]))
return unique_paths
@typecheck
def bfs(tree: dict, start: str, end: str) -> tc.optional(tc.seq_of(str)):
'''
Breadth-first search.
Requires tree in adjacency list representation. Assumes no cycles.
'''
q = [[start]]
while q:
print("q: {}".format(q))
p = q.pop(0)
print("p: {}".format(p))
node = p[-1]
if node == end:
return p
for adj in tree.get(node, []):
print("adj: {}".format(adj))
np = list(p)
np.append(adj)
q.append(np)
@typecheck
def dfs(tree: dict, start: str, end: str) -> tc.optional(tc.seq_of(str)):
'''
Similar to above except depth-first search
'''
s = [[start]]
while s:
print("s: {}".format(s))
p = s.pop()
print("p: {}".format(p))
node = p[-1]
if node == end:
return p
for adj in tree.get(node, []):
print("adj: {}".format(adj))
np = list(p)
np.append(adj)
s.append(np)
@typecheck
def adjacency_list(tree: dict) -> dict:
'''
Given a nested dictionary, return nodes in adjacency list format
'''
edges = {}
for item in tree.items():
if isinstance(item[1], dict):
keys = [k for k in item[1].keys()]
edges[item[0]] = edges[item[0]] + keys if item[0] in edges else keys
nested_edges = adjacency_list(item[1])
for ne in nested_edges:
edges[ne] = edges[ne] + nested_edges[ne] if ne in edges else nested_edges[ne]
else:
edges[item[0]] = edges[item[0]] + [item[1]] if item[0] in edges else [item[1]]
return edges | {
"repo_name": "bkeroack/py-extlib",
"path": "extlib/tree_tools/__init__.py",
"copies": "1",
"size": "3312",
"license": "apache-2.0",
"hash": -1787854337991367700,
"line_mean": 30.8557692308,
"line_max": 103,
"alpha_frac": 0.5495169082,
"autogenerated": false,
"ratio": 3.576673866090713,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9597925374586431,
"avg_score": 0.005653079940856281,
"num_lines": 104
} |
__author__ = 'bk'
from typecheck import typecheck
import typecheck as tc
import zipfile
import os
import logging
@typecheck
def zipfolder(path: str, zipname: str, subpath: tc.optional(str)=""):
'''
Create zip of a folder, prepending subpath to all archive paths if supplied
'''
assert os.path.isdir(path)
empty_dirs = list()
if os.path.exists(zipname): # delete if exists
logging.warning("zipfolder: warning: zipfile exists, deleting")
os.remove(zipname)
root_len = len(os.path.abspath(path))
zf = zipfile.ZipFile(zipname, 'w', zipfile.ZIP_DEFLATED)
for dirpath, dirs, files in os.walk(path):
archive_root = os.path.abspath(dirpath)[root_len+1:]
empty_dirs.extend([dir for dir in dirs if os.listdir(os.path.join(dirpath, dir)) == []])
for d in empty_dirs:
zif = zipfile.ZipInfo(os.path.join(subpath, archive_root, d) + "/")
zf.writestr(zif, "")
for f in files:
archive_name = os.path.join(subpath, archive_root, f)
zf.write(os.path.join(dirpath, f), archive_name)
empty_dirs = list()
zf.close() | {
"repo_name": "bkeroack/py-extlib",
"path": "extlib/zip/__init__.py",
"copies": "1",
"size": "1141",
"license": "apache-2.0",
"hash": 1253618606030430500,
"line_mean": 35.8387096774,
"line_max": 96,
"alpha_frac": 0.6354075372,
"autogenerated": false,
"ratio": 3.4264264264264264,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45618339636264266,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bkovacev'
import os
import json
import urllib
from uuid import uuid1
from collections import OrderedDict
from google.appengine.ext import blobstore
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.ext.webapp import template
from handlers import user_required, BaseHandler
from models import *
'''
Dashboard Handler
'''
class Dashboard(BaseHandler):
@user_required
def get(self):
user_info = self.user_info
uuid = user_info.get('uuid')
user = User.query(User.uuid == uuid).get()
notes = Note.query(Note.user_uuid == uuid).fetch()
tasks = Task.query(Task.user_uuid == uuid).fetch()
documents = Document.query(Document.user_uuid == uuid).fetch()
template_values = {
'user': user,
'documents': documents,
'tasks': tasks,
'notes': notes,
}
path = os.path.join(os.path.dirname(__file__), 'templates/dashboard.html')
self.response.write(template.render(path, template_values))
'''
Drive - Upload File, Download File, Delete File, List File
'''
class Upload(blobstore_handlers.BlobstoreUploadHandler, BaseHandler):
@user_required
def get(self):
user_info = self.user_info
uuid = user_info.get('uuid')
user = User.query(User.uuid == uuid).get()
upload_url = blobstore.create_upload_url('/upload')
template_values = {
'user': user,
'upload_url': upload_url,
}
path = os.path.join(os.path.dirname(__file__), 'templates/upload.html')
self.response.write(template.render(path, template_values))
@user_required
def post(self):
user_info = self.user_info
uuid = user_info.get('uuid')
user_id = user_info.get('user_id')
document = Document()
document.user_uuid = uuid
document.user = ndb.Key(User, user_id)
document.name = self.request.get('name')
upload_files = self.get_uploads('file')
blob_info = upload_files[0]
blob = blob_info.key()
document.blob = blob
size = blob_info.size / 1048576
document.size = str(size)
document.extension = blob_info.filename
time.sleep(2)
document.put()
self.redirect("/drive")
class DeleteDocument(blobstore_handlers.BlobstoreDownloadHandler, BaseHandler):
@user_required
def get(self, key):
blobkey = blobstore.BlobKey(key)
document = Document.query(Document.blob == blobkey).get()
blobstore.delete(key)
document.key.delete()
self.redirect('/drive')
class DownloadDocument(blobstore_handlers.BlobstoreDownloadHandler, BaseHandler):
@user_required
def get(self, key):
public_blob = blobstore.BlobInfo.get(key)
self.send_blob(public_blob, save_as=public_blob.filename, content_type=public_blob.content_type)
return
class Drive(BaseHandler):
@user_required
def get(self):
user_info = self.user_info
uuid = user_info.get('uuid')
user = User.query(User.uuid == uuid).get()
uuid = user.uuid
documents = Document.query(Document.user_uuid == uuid).fetch()
some_json = []
for document in documents:
some_json.append(OrderedDict(
[('name', str(document.name)), ('ext', str(document.extension)), ('size', str(document.size))]))
files = json.dumps(some_json)
template_values = {
'user': user,
'documents': documents,
'files': files
}
path = os.path.join(os.path.dirname(__file__), 'templates/drive.html')
self.response.write(template.render(path, template_values))
'''
Settings - User Profile, Notifications
'''
class ServeHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, resource):
resource = str(urllib.unquote(resource))
blob_info = blobstore.BlobInfo.get(resource)
self.send_blob(blob_info)
class Settings(blobstore_handlers.BlobstoreUploadHandler, BaseHandler):
@user_required
def get(self):
user_info = self.user_info
uuid = user_info.get('uuid')
upload_url = blobstore.create_upload_url('/settings')
user = User.query(User.uuid == uuid).get()
activities = Activity.query(Activity.user == user.key).order(-Activity.date).fetch(10)
template_values = {
'user': user,
'activities': activities,
'upload_url': upload_url,
}
path = os.path.join(os.path.dirname(__file__), 'templates/settings.html')
self.response.write(template.render(path, template_values))
@user_required
def post(self):
user_info = self.user_info
uuid = user_info.get('uuid')
user = User.query(User.uuid == uuid).get()
upload_files = self.get_uploads('file')
if upload_files:
blob_info = upload_files[0]
blob = blob_info.key()
user.avatar = blob
user.name = self.request.get('name')
user.email = self.request.get('email')
user.personal_email = self.request.get('personal_email')
user.phone_number = self.request.get('phone_number')
user.facebook_handle = self.request.get('facebook_handle')
user.skype_handle = self.request.get('skype_handle')
user.github_handle = self.request.get('github_handle')
user.linkedin_handle = self.request.get('linkedin_handle')
user.google_handle = self.request.get('google_handle')
user.twitter_handle = self.request.get('twitter_handle')
user.interests = self.request.get('interests')
user.job = self.request.get('job')
user.sports = self.request.get('sports')
user.type = self.request.get('type')
user.positions = self.request.get('positions')
user.put()
self.redirect('/settings')
'''
Messaging
'''
class Inbox(BaseHandler):
@user_required
def get(self):
user_info = self.user_info
uuid = user_info.get('uuid')
user = User.query(User.uuid == uuid).get()
template_values = {
'user': user,
}
path = os.path.join(os.path.dirname(__file__), 'templates/inbox.html')
self.response.write(template.render(path, template_values))
class SendMessage(BaseHandler):
@user_required
def get(self, user_uuid):
user_info = self.user_info
uuid = user_info.get('uuid')
user_one = User.query(User.uuid == uuid).get()
user_two = User.query(User.uuid == user_uuid).get()
template_values = {
'user': user_one,
'user_two': user_two,
}
path = os.path.join(os.path.dirname(__file__), 'templates/send.html')
self.response.write(template.render(path, template_values))
@user_required
def post(self, user_uuid):
user_info = self.user_info
uuid = user_info.get('uuid')
user_one = User.query(User.uuid == uuid).get()
user_two = User.query(User.uuid == user_uuid).get()
real_conversation = Conversation.query(
Conversation.users == user_one.key and Conversation.users == user_two.key).get()
if real_conversation:
message = Message()
message.sender = user_one.key
message.conversation_key = real_conversation.key
message.message = self.request.get('message')
message_uuid = uuid1().get_hex()
message.uuid = message_uuid
message_to_put = message.put()
conversation = real_conversation
message_key_to_put = ndb.Key(Message, message_to_put.id())
conversation.messages.append(message_key_to_put)
conversation.put()
self.redirect('/inbox')
else:
conversation = Conversation()
conversation.users.append(user_one.key)
conversation.users.append(user_two.key)
conversation.uuid = uuid1().get_hex()
conversation_key = conversation.put()
time.sleep(1)
message = Message()
message.sender = user_one.key
message.conversation_key = ndb.Key(Conversation, conversation_key.id())
message.message = self.request.get('message')
message_uuid = uuid1().get_hex()
message.uuid = message_uuid
message_to_put = message.put()
time.sleep(1)
message_key_to_put = ndb.Key(Message, message_to_put.id())
conversation = Conversation.query(Conversation.key == conversation_key).get()
conversation.messages.append(message_key_to_put)
conversation.put()
self.redirect('/inbox')
class UserConversation(BaseHandler):
@user_required
def get(self, user_uuid):
user_info = self.user_info
uuid = user_info.get('uuid')
user_one = User.query(User.uuid == uuid).get()
serve_me_1 = get_avatar_from_uuid(uuid)
serve_me_2 = get_avatar_from_uuid(user_uuid)
user_two = User.query(User.uuid == user_uuid).get()
conversation = Conversation.query(
Conversation.users == user_one.key and Conversation.users == user_two.key).get()
if conversation is not None:
messages = Message.query(Message.conversation_key == conversation.key).order(Message.time).fetch()
sender = ndb.Key(User, user_two.key.id())
for message in messages:
if message.sender == sender:
message.read = True
message.put()
else:
messages = None
if serve_me_1 is not None and serve_me_2 is not None:
template_values = {
'user': user_one,
'user_one_key': user_one.key,
'user_one': user_one,
'user_two_key': user_two.key,
'user_two': user_two,
'messages': messages,
'serve_me_1': serve_me_1,
'serve_me_2': serve_me_2,
}
elif serve_me_1 is not None and serve_me_2 is None:
template_values = {
'user': user_one,
'user_one_key': user_one.key,
'user_one': user_one,
'user_two_key': user_two.key,
'user_two': user_two,
'messages': messages,
'serve_me_1': serve_me_1,
}
elif serve_me_2 is not None and serve_me_1 is None:
template_values = {
'user': user_one,
'user_one_key': user_one.key,
'user_one': user_one,
'user_two_key': user_two.key,
'user_two': user_two,
'messages': messages,
'serve_me_2': serve_me_2,
}
else:
template_values = {
'user': user_one,
'user_one_key': user_one.key,
'user_one': user_one,
'user_two_key': user_two.key,
'user_two': user_two,
'messages': messages,
}
path = os.path.join(os.path.dirname(__file__), 'templates/conversation.html')
self.response.write(template.render(path, template_values))
'''
Student Life
'''
class StudentLife(BaseHandler):
@user_required
def get(self):
user_info = self.user_info
uuid = user_info.get('uuid')
user = User.query(User.uuid == uuid).get()
template_values = {
'user': user,
}
path = os.path.join(os.path.dirname(__file__), 'templates/studentlife.html')
self.response.write(template.render(path, template_values))
'''
Organizer
'''
class Organizer(BaseHandler):
@user_required
def get(self):
user = self.user_info
uuid = user.get('uuid')
real_user = User.query(User.uuid == uuid).get()
notes = Note.query(Note.user_uuid == uuid).fetch()
tasks = Task.query(Task.user_uuid == uuid).fetch()
documents = Document.query(Document.user_uuid == uuid).fetch()
template_values = {
'user': real_user,
'documents': documents,
'tasks': tasks,
'notes': notes,
}
path = os.path.join(os.path.dirname(__file__), 'templates/organizer.html')
self.response.write(template.render(path, template_values))
class Tasks(BaseHandler):
@user_required
def get(self):
user = self.user_info
uuid = user.get('uuid')
real_user = User.query(User.uuid == uuid).get()
tasks = Task.query(Task.user_uuid == uuid).fetch()
template_values = {
'user': real_user,
'tasks': tasks,
}
path = os.path.join(os.path.dirname(__file__), 'templates/tasks.html')
self.response.write(template.render(path, template_values))
class AddTask(BaseHandler):
@user_required
def post(self):
user = self.user_info
uuid = user.get('uuid')
real_user = User.query(User.uuid == uuid).get()
task = Task()
task.user_uuid = uuid
task.uuid = uuid1().get_hex()
task.user = ndb.Key(User, real_user.key.id())
task.priority = self.request.get('priority')
task.text = self.request.get('description')
task.put()
self.redirect('/tasks')
class EditTask(BaseHandler):
@user_required
def get(self, uuid):
task = Task.query(Task.uuid == uuid).get()
task.status = str('Completed')
task.put()
self.redirect('/tasks')
class RemoveTask(BaseHandler):
@user_required
def get(self, uuid):
task = Task.query(Task.uuid == uuid).get()
task.key.delete()
self.redirect('/tasks')
class Notes(BaseHandler):
@user_required
def get(self):
user_info = self.user_info
uuid = user_info.get('uuid')
real_user = User.query(User.uuid == uuid).get()
notes = Note.query(Note.user_uuid == uuid).fetch()
template_values = {
'user': real_user,
'notes': notes,
}
path = os.path.join(os.path.dirname(__file__), 'templates/notes.html')
self.response.write(template.render(path, template_values))
class AddNote(BaseHandler):
@user_required
def get(self):
user_info = self.user_info
uuid = user_info.get('uuid')
real_user = User.query(User.uuid == uuid).get()
template_values = {
'user': real_user,
}
path = os.path.join(os.path.dirname(__file__), 'templates/addnote.html')
self.response.write(template.render(path, template_values))
@user_required
def post(self):
user = self.user_info
uuid = user.get('uuid')
real_user = User.query(User.uuid == uuid).get()
note = Note()
note.user_uuid = uuid
note.uuid = uuid1().get_hex()
note.user = ndb.Key(User, real_user.key.id())
note.name = self.request.get('name')
note.text = self.request.get('description')
note.put()
self.redirect('/notes')
class EditNote(BaseHandler):
@user_required
def get(self, uuid):
user_info = self.user_info
user_uuid = user_info.get('uuid')
real_user = User.query(User.uuid == user_uuid).get()
note = Note.query(Note.uuid == uuid).get()
template_values = {
'user': real_user,
'note': note,
}
path = os.path.join(os.path.dirname(__file__), 'templates/editnote.html')
self.response.write(template.render(path, template_values))
class RemoveNote(BaseHandler):
@user_required
def get(self, uuid):
note = Note.query(Note.uuid == uuid).get()
note.key.delete()
self.redirect('/notes')
class Reminders(BaseHandler):
@user_required
def get(self):
user_info = self.user_info
uuid = user_info.get('uuid')
user_one = User.query(User.uuid == uuid).get()
template_values = {
'user': user_one,
}
path = os.path.join(os.path.dirname(__file__), 'templates/reminders.html')
self.response.write(template.render(path, template_values))
class AddReminder(BaseHandler):
@user_required
def post(self):
user = self.user_info
uuid = user.get('uuid')
real_user = User.query(User.uuid == uuid).get()
reminder = Reminder()
reminder.user_uuid = uuid
reminder.uuid = uuid1().get_hex()
reminder.user = ndb.Key(User, real_user.key.id())
reminder.name = self.request.get('name')
reminder.date = self.request.get('date')
reminder.put()
self.redirect('/reminders')
class RemoveReminder(BaseHandler):
@user_required
def get(self, uuid):
reminder = Reminder.query(Reminder.uuid == uuid).get()
reminder.key.delete()
self.redirect('/reminders')
class AddReminder(BaseHandler):
@user_required
def post(self, uuid):
self.response.write("kurcina")
class RemoveReminder(BaseHandler):
@user_required
def post(self, uuid):
self.response.write("kurcina")
'''
Student Directory
'''
class Directory(BaseHandler):
@user_required
def get(self):
user_info = self.user_info
uuid = user_info.get('uuid')
user_one = User.query(User.uuid == uuid).get()
users = User.query().fetch()
template_values = {
'user': user_one,
'users': users,
}
path = os.path.join(os.path.dirname(__file__), 'templates/directory.html')
self.response.write(template.render(path, template_values))
class Profile(BaseHandler):
def get(self, uuid):
self.response.write("kurcina")
'''
Finances
'''
class Finance(BaseHandler):
@user_required
def get(self):
user_info = self.user_info
uuid = user_info.get('uuid')
user_one = User.query(User.uuid == uuid).get()
users = User.query().fetch()
template_values = {
'user': user_one,
'users': users,
}
path = os.path.join(os.path.dirname(__file__), 'templates/finances.html')
self.response.write(template.render(path, template_values))
'''
Academics
'''
class Academics(BaseHandler):
@user_required
def get(self):
user_info = self.user_info
uuid = user_info.get('uuid')
user_one = User.query(User.uuid == uuid).get()
template_values = {
'user': user_one,
}
path = os.path.join(os.path.dirname(__file__), 'templates/academics.html')
self.response.write(template.render(path, template_values))
class Calculator(BaseHandler):
@user_required
def get(self):
user_info = self.user_info
uuid = user_info.get('uuid')
user_one = User.query(User.uuid == uuid).get()
template_values = {
'user': user_one,
}
path = os.path.join(os.path.dirname(__file__), 'templates/calculator.html')
self.response.write(template.render(path, template_values))
class Calendar(BaseHandler):
@user_required
def get(self):
user_info = self.user_info
uuid = user_info.get('uuid')
user_one = User.query(User.uuid == uuid).get()
template_values = {
'user': user_one,
}
path = os.path.join(os.path.dirname(__file__), 'templates/calendar.html')
self.response.write(template.render(path, template_values))
class Schedule(BaseHandler):
@user_required
def get(self):
user_info = self.user_info
uuid = user_info.get('uuid')
user_one = User.query(User.uuid == uuid).get()
template_values = {
'user': user_one,
}
path = os.path.join(os.path.dirname(__file__), 'templates/schedule.html')
self.response.write(template.render(path, template_values))
| {
"repo_name": "bkovacev/gae-student-portal",
"path": "views.py",
"copies": "1",
"size": "20149",
"license": "mit",
"hash": 1683300293784673300,
"line_mean": 30.8309636651,
"line_max": 112,
"alpha_frac": 0.5783909871,
"autogenerated": false,
"ratio": 3.7086324314375116,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9777043286700676,
"avg_score": 0.001996026367367012,
"num_lines": 633
} |
__author__ = 'bkovacev'
import time
import webapp2_extras.appengine.auth.models
from google.appengine.ext import ndb
from webapp2_extras import security
from google.appengine.api import images
class Activity(ndb.Model):
"""
#TODO Remodel.
"""
user = ndb.KeyProperty(kind='User', required=True)
type_key = ndb.KeyProperty(required=True)
type = ndb.StringProperty(required=True)
date = ndb.DateTimeProperty(required=True, auto_now=True)
action = ndb.StringProperty(choices=('created', 'deleted'))
name = ndb.StringProperty()
class User(webapp2_extras.appengine.auth.models.User):
"""
#TODO refactor User Class and add another UserInfo model to hold unimportant info.
"""
uuid = ndb.StringProperty(required=True)
user_name = ndb.StringProperty(required=True)
type = ndb.StringProperty(choices=('Admin', 'Student', 'Caff', 'RD', 'Employee'), default='Student')
name = ndb.StringProperty(required=True)
email = ndb.StringProperty(required=True)
personal_email = ndb.StringProperty(required=True)
conversations = ndb.KeyProperty(repeated=True)
phone_number = ndb.StringProperty()
twitter_handle = ndb.StringProperty()
facebook_handle = ndb.StringProperty()
skype_handle = ndb.StringProperty()
linkedin_handle = ndb.StringProperty()
github_handle = ndb.StringProperty()
google_handle = ndb.StringProperty()
avatar = ndb.BlobKeyProperty()
interests = ndb.TextProperty()
sports = ndb.StringProperty()
positions = ndb.StringProperty()
job = ndb.StringProperty()
def get_recent_activities(self):
activities = Activity.query(Activity.user == self.key).order(-Activity.date).fetch(3)
return activities
def get_recent_messages(self):
conversations = Conversation.query(Conversation.users == self.key).order(Conversation.time).fetch(3)
from_users = []
from_users_uuid = []
conversation_messages = []
message_times = []
message_read = []
avatars = []
for conversation in conversations:
conversation.users.remove(self.key)
zmaj = conversation.users[0]
from_user = User.query(User.key == zmaj).get()
from_users.append(from_user.name)
from_users_uuid.append(from_user.uuid)
avatar = get_avatar_from_uuid(from_user.uuid)
avatars.append(avatar)
messages = conversation.messages[-1]
real_message = Message.query(Message.key == messages).get()
real_message_status = real_message.read
message_read.append(real_message_status)
conversation_messages.append(real_message.message)
message_times.append(real_message.time)
totals = zip(from_users, conversation_messages, from_users_uuid, message_times, avatars, message_read)
print totals
return totals
def get_all_messages(self):
conversations = Conversation.query(Conversation.users == self.key).order(Conversation.time).fetch()
print conversations
from_users = []
from_users_uuid = []
conversation_messages = []
message_times = []
conversation_times = []
for conversation in conversations:
zmaj = conversation.users[0]
from_user = User.query(User.key == zmaj).get()
from_users.append(from_user.name)
from_users_uuid.append(from_user.uuid)
conversation_times.append(conversation.time)
messages = conversation.messages[-1]
real_message = Message.query(Message.key == messages).get()
conversation_messages.append(real_message.message)
message_times.append(real_message.time)
total = zip(conversation_times, from_users, conversation_messages, from_users_uuid, message_times)
return total
def get_avatar(self):
if self.avatar:
serve_me = images.get_serving_url(self.avatar, size=None, crop=False, secure_url=None)
return serve_me
else:
return None
def split_name(self):
full_name = str(self.name)
first_name = full_name.split()
return first_name[0]
def set_password(self, raw_password):
"""Sets the password for the current user
:param raw_password:
The raw password which will be hashed and stored
"""
self.password = security.generate_password_hash(raw_password, length=12)
@classmethod
def get_by_auth_token(cls, user_id, token, subject='auth'):
"""Returns a user object based on a user ID and token.
:param user_id:
The user_id of the requesting user.
:param token:
The token string to be verified.
:returns:
A tuple ``(User, timestamp)``, with a user object and
the token timestamp, or ``(None, None)`` if both were not found.
"""
token_key = cls.token_model.get_key(user_id, subject, token)
user_key = ndb.Key(cls, user_id)
# Use get_multi() to save a RPC call.
valid_token, user = ndb.get_multi([token_key, user_key])
if valid_token and user:
timestamp = int(time.mktime(valid_token.created.timetuple()))
return user, timestamp
return None, None
'''
Models related to students - Messages, Tasks, Files (Documents), Overnight Permissions.
'''
class Conversation(ndb.Model):
users = ndb.KeyProperty(kind='User', repeated=True)
time = ndb.DateTimeProperty(auto_now_add=True)
uuid = ndb.StringProperty(required=True)
messages = ndb.KeyProperty(kind='Message', repeated=True)
class Message(ndb.Model):
sender = ndb.KeyProperty(required=True)
uuid = ndb.StringProperty(required=True)
message = ndb.TextProperty(required=True)
time = ndb.DateTimeProperty(required=True, auto_now_add=True)
conversation_key = ndb.KeyProperty(required=True, kind=Conversation)
read = ndb.BooleanProperty(default=False)
class Note(ndb.Model):
name = ndb.StringProperty(required=True)
uuid = ndb.StringProperty(required=True)
date = ndb.DateTimeProperty(auto_now=True)
text = ndb.TextProperty(required=True)
user = ndb.KeyProperty(required=True)
user_uuid = ndb.StringProperty(required=True)
def _post_put_hook(self, future):
activity = Activity()
user_uuid = self.user_uuid
user = User.query(User.uuid == user_uuid).get()
activity.user = ndb.Key(User, user.key.id())
model_id = future.get_result()
new_model_id = model_id.id()
activity.action = 'created'
activity.type_key = ndb.Key(Note, new_model_id)
activity.type = "Note"
activity.put()
@classmethod
def _pre_delete_hook(cls, key):
activity = Activity()
model_key = key.id()
note = Note.get_by_id(model_key)
user_key = note.user.key.id()
activity.user = ndb.Key(User, user_key)
activity.type_key = ndb.Key(Note, model_key)
activity.type = 'Note'
activity.name = note.name
activity.action = 'deleted'
activity.put()
class Task(ndb.Model):
uuid = ndb.StringProperty(required=True)
text = ndb.StringProperty(required=True)
priority = ndb.StringProperty(choices=('Important', 'Low', 'Normal'))
date = ndb.DateTimeProperty(auto_now=True)
user = ndb.KeyProperty(kind=User)
user_uuid = ndb.StringProperty(required=True)
status = ndb.StringProperty(choices=('Active', 'Completed'), default='Active')
def _post_put_hook(self, future):
activity = Activity()
user_uuid = self.user_uuid
user = User.query(User.uuid == user_uuid).get()
activity.user = ndb.Key(User, user.key.id())
task_id = future.get_result()
new_task_id = task_id.id()
activity.action = 'created'
activity.type_key = ndb.Key(Task, new_task_id)
activity.type = "Task"
activity.put()
@classmethod
def _pre_delete_hook(cls, key):
activity = Activity()
model_key = key.id()
task = Task.get_by_id(model_key)
user_key = task.user.id()
activity.user = ndb.Key(User, user_key)
activity.type_key = ndb.Key(Task, model_key)
activity.type = 'Task'
activity.name = task.text
activity.action = 'deleted'
activity.put()
class Document(ndb.Model):
name = ndb.StringProperty(required=True)
extension = ndb.StringProperty(required=True)
blob = ndb.BlobKeyProperty(required=True)
date_uploaded = ndb.DateTimeProperty(auto_now=True)
user = ndb.KeyProperty(kind=User)
user_uuid = ndb.StringProperty(required=True)
size = ndb.StringProperty(required=True)
def _post_put_hook(self, future):
activity = Activity()
user_uuid = self.user_uuid
user = User.query(User.uuid == user_uuid).get()
activity.user = ndb.Key(User, user.key.id())
task_id = future.get_result()
new_task_id = task_id.id()
activity.type_key = ndb.Key(Document, new_task_id)
activity.action = 'created'
activity.type = "Document"
activity.put()
@classmethod
def _pre_delete_hook(cls, key):
activity = Activity()
model_key = key.id()
document = Document.get_by_id(model_key)
activity.type_key = ndb.Key(Document, model_key)
activity.type = 'Document'
activity.name = document.name
activity.action = 'deleted'
activity.put()
class Reminder(ndb.Model):
name = ndb.StringProperty(required=True)
date = ndb.StringProperty(required=True)
user_uuid = ndb.StringProperty(required=True)
user = ndb.KeyProperty(kind=User, required=True)
uuid = ndb.StringProperty(required=True)
def _post_put_hook(self, future):
activity = Activity()
user_uuid = self.user_uuid
user = User.query(User.uuid == user_uuid).get()
activity.user = ndb.Key(User, user.key.id())
task_id = future.get_result()
new_task_id = task_id.id()
activity.type_key = ndb.Key(Reminder, new_task_id)
activity.action = 'created'
activity.type = "Reminder"
activity.put()
@classmethod
def _pre_delete_hook(cls, key):
activity = Activity()
model_key = key.id()
reminder = Reminder.get_by_id(model_key)
activity.type_key = ndb.Key(Reminder, model_key)
activity.type = 'Reminder'
activity.name = reminder.name
activity.action = 'deleted'
activity.put()
class OvernightPermission(ndb.Model):
host_name = ndb.StringProperty(required=True)
guest_name = ndb.StringProperty(required=True)
room_number = ndb.StringProperty(required=True)
dorm = ndb.StringProperty(required=True)
overnight_date = ndb.DateTimeProperty(required=True)
user = ndb.KeyProperty(kind=User)
def _post_put_hook(self, future):
activity = Activity()
model_id = future.get_result()
new_model_id = model_id.id()
activity.type_key = ndb.Key(OvernightPermission, new_model_id)
activity.type = "Overnight Permission"
activity.put()
'''
Models related to employee/admin functions - job, events
'''
class Job(ndb.Model):
name = ndb.StringProperty(required=True)
description = ndb.TextProperty(required=True)
date_posted = ndb.DateTimeProperty(auto_now=True)
date_expires = ndb.DateTimeProperty()
posted_by = ndb.KeyProperty(required=True)
status = ndb.StringProperty(choices=('Active', 'Interviewing', 'Closed'))
files = ndb.BlobKeyProperty(repeated=True)
class Event(ndb.Model):
name = ndb.StringProperty(required=True)
description = ndb.TextProperty(required=True)
date_begin = ndb.DateTimeProperty(required=True)
date_end = ndb.DateTimeProperty(required=True)
user = ndb.KeyProperty(kind=User)
def get_avatar_from_uuid(uuid):
if uuid:
user = User.query(User.uuid == uuid).get()
print user
if user.avatar is not None:
serve_me = images.get_serving_url(user.avatar, size=None, crop=False, secure_url=None)
if serve_me is not None:
return serve_me
else:
return None
else:
return None
| {
"repo_name": "bkovacev/gae-student-portal",
"path": "models.py",
"copies": "1",
"size": "12387",
"license": "mit",
"hash": -4557605148718367000,
"line_mean": 34.8005780347,
"line_max": 110,
"alpha_frac": 0.6393799952,
"autogenerated": false,
"ratio": 3.761615548132402,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4900995543332402,
"avg_score": null,
"num_lines": null
} |
__author__ = 'blackmesa'
# coding=utf-8
__author__ = 'sudo'
from configparser import ConfigParser
import requests
import bs4
session = requests.Session()
session.headers.update({'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:17.0) Gecko/17.0 Firefox/17.0'})
def login():
LOGIN_URL = "http://de.ogame.gameforge.com/main/login"
cparser = ConfigParser()
cparser.read("settings/settings.ini")
password = cparser.get('credentials', 'password')
username = cparser.get('credentials', 'username')
universe = cparser.get('credentials', 'universe')
data = dict()
data['kid'] = ""
data['pass'] = password
data['login'] = username
data['uni'] = "s"+universe+"-de.ogame.gameforge.com"
session.post(LOGIN_URL, data)
print('Logged in with: [{username} | {password}] on universe [{universe}]'.format(**locals()))
login()
page = session.get("http://s125-de.ogame.gameforge.com/game/index.php?page=resources").text
soup = bs4.BeautifulSoup(page)
# ugly voodoo stuff:
temp_buildings_parse = soup.find_all('div', attrs={"class": "buildingimg"})
buildable = [element.a["onclick"] for element in temp_buildings_parse if "onclick" in element.a.attrs]
# end of uvs
print("%s possible buildings." % len(buildable))
## Just build the first possible building
print(buildable[0].split("'")[1])
session.get(buildable[0].split("'")[1])
| {
"repo_name": "erstis-go-botting/sexy-bot",
"path": "examples/easy_build_something.py",
"copies": "1",
"size": "1385",
"license": "mit",
"hash": -3293300163620382000,
"line_mean": 25.1320754717,
"line_max": 110,
"alpha_frac": 0.6758122744,
"autogenerated": false,
"ratio": 3.1986143187066975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9262304154804715,
"avg_score": 0.02242448766039645,
"num_lines": 53
} |
__author__ = 'blackthorne'
import call
import logging
import cStringIO
import unittest
import datetime
def hexdump(src, length=16, sep='.'):
FILTER = ''.join([(len(repr(chr(x))) == 3) and chr(x) or sep for x in range(256)])
lines = []
for c in xrange(0, len(src), length):
chars = src[c:c+length]
hex = ' '.join(["%02x" % ord(x) for x in chars])
if len(hex) > 24:
hex = "%s %s" % (hex[:24], hex[24:])
printable = ''.join(["%s" % ((ord(x) <= 127 and FILTER[ord(x)]) or sep) for x in chars])
lines.append("%08x: %-*s |%s|\n" % (c, length*3, hex, printable))
print ''.join(lines)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M:%S',)
logger = logging.getLogger(__name__)
logger.debug('tests beginning..')
output = cStringIO.StringIO()
output.write('First line.\n')
print >>output, 'Second line.'
class LibcallTest(unittest.TestCase):
# calls subprocess without timeout
def testSubprocessWithoutTimeout(self):
a = call.Command(['ls','-l','/'],'subprocess', logger=logger)
b = a.start()
num_lines = sum(1 for line in a.stdout)
self.assertEqual(b, 0)
self.assertEqual(a.stderr, "")
self.assertTrue(num_lines > 50)
# calls subprocess without timeout (fail)
def testSubprocessWithoutTimeoutFail(self):
a = call.Command(['ls','2222-l','/'],'subprocess', logger=logger)
b = a.start()
num_lines = sum(1 for line in a.stdout)
self.assertIn(b, [1,2])
self.assertEqual(a.stderr, "ls: 2222-l: No such file or directory\n")
self.assertTrue(num_lines > 50)
#
# c = call.Command(['ls23','-l','/'],'subprocess', logger=logger, )
# c.start()
# c.stop()
# print c.return_code
# # call subprocess with timeout
# print '# call subprocess with timeout'
# d = call.Command(['sleep','40'],'subprocess', logger=logger, timeout=5)
# print d.status
# print d.start()
# print d.stop()
#
# python basic call without timeout
def testPythonCallWithoutTimeout(self):
print '# python call without timeout'
d = call.Command('print 2+2','python-basic', logger=logger)
d.start()
self.assertEquals(d.stdout, "4\n")
#
# code = """
# print 'hello'
# print 'world'
# """
# d = call.Command(code,'python-basic', logger=logger)
# print d.status
# print d.start()
# print d.stop()
#
# d = call.Command('aasdprint "hello"','python-basic', logger=logger)
# print d.status
# print d.start()
# print d.stop()
#
#
# python call with timeout
# def testPythonCallWithTimeout(self):
# print '# python call with timeout' # known limitation
# code = """
# import time
# time.sleep(10)
# print "hello"
# """
# d = call.Command(code,'python-basic', logger=logger, timeout=2)
# time_start = datetime.datetime.now()
# d.start()
# time_end = datetime.datetime.now()
# time_diff = time_end - time_start
# self.assertGreaterEqual(time_diff.seconds, 2)
# self.assertLess(time_diff.seconds, 9)
# self.assertNotEqual(d.stdout, "hello\n")
def testPythonCallBreakLoopWithTimeout(self):
code = """
import time
while True:
time.sleep(0.1)
print 'end'
"""
d = call.Command(code,'python-basic', logger=logger, timeout=2)
time_start = datetime.datetime.now()
d.start()
time_end = datetime.datetime.now()
time_diff = time_end - time_start
self.assertGreaterEqual(time_diff.seconds, 2)
self.assertNotEqual(d.stdout, "end\n")
#
# shell call without timeout
d = call.Command('uname -a','shell-env', logger=logger)
time_start = datetime.datetime.now()
def testShellCallBreakSleepWithTimeout(self):
# shell call with timeout
print '# shell call with timeout'
d = call.Command('sleep 50','shell-env', logger=logger, timeout=2)
time_start = datetime.datetime.now()
d.start()
time_end = datetime.datetime.now()
time_diff = time_end - time_start
self.assertGreaterEqual(time_diff.seconds, 2)
self.assertLess(time_diff.seconds, 5)
if __name__ == '__main__':
unittest.main()
print 'END OF TESTS' | {
"repo_name": "blackthorne/Libcall",
"path": "libcall/tests/tests.py",
"copies": "1",
"size": "4441",
"license": "apache-2.0",
"hash": 7139044174125079000,
"line_mean": 30.0629370629,
"line_max": 96,
"alpha_frac": 0.5904075659,
"autogenerated": false,
"ratio": 3.4968503937007873,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9575446909680393,
"avg_score": 0.00236220998407887,
"num_lines": 143
} |
__author__ = 'akoziol'
# Import the necessary modules
# OS is used for file/folder manipulations
import os
# Subprocess->call is used for making system calls
import subprocess
# Errno is used in the file creation command - I think it's similar to the $! variable in Perl
import errno
# Glob finds all the path names matching a specified pattern according to the rules used by the Unix shell
import glob
# Shutil is useful for file moving/copying
import shutil
# prints variables in a form which can be used as input to the interpreter - similar to Data::Dumper?
import pprint
# Regex
import re
import sys
import time
import sys
from multiprocessing import Pool
import numpy
import math
# Define the variables for the read length and fold coverage, respectively
readLength = [21]
#readLength = [30, 35, 40, 45, 50, 55, 60, 75, 80, 100, 150, 250]
#foldCoverage = [50]
foldCoverage = [1, 2, 5, 10, 15, 20, 25, 30, 35, 40, 50, 75, 100]
from collections import defaultdict
# Initialize the required dictionaries
#vcfData = {}
def make_dict():
return defaultdict(make_dict)
vcfData = defaultdict(make_dict)
# Define the range of k-mer sizes for indexing of targets
kmer = [5, 7, 9, 11, 13, 15, 17, 19, 20]
# kmer = [20]
# The path is still hardcoded as, most of the time, this script is run from within Pycharm.
os.chdir("/media/nas1/akoziol/Pipeline_development/SipprModelling/21merMapping")
path = os.getcwd()
os.chdir("%s/reference" % path)
referenceFile = glob.glob("*.fa*")
references = ["%s/reference/" % path + fastaFile for fastaFile in referenceFile]
#reference = "Escherichia_coli_O157_H7_str_Sakai.fas"
os.chdir("%s/targets" % path)
targets = glob.glob("*.fa")
outPath = "%s/outputs" % path
def make_path(inPath):
"""from: http://stackoverflow.com/questions/273192/check-if-a-directory-exists-and-create-it-if-necessary \
does what is indicated by the URL"""
try:
os.makedirs(inPath)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def createSimulatedFilesProcesses(reference, strain):
"""Creates a pool of processes, and maps data in a parallel fashion to createSimulatedFiles"""
print "Creating simulated files"
# Initialise the args list
simulatedArgs = []
# Every Python module has it's __name__ defined and if this is '__main__',
# it implies that the module is being run standalone by the user and we can do corresponding appropriate actions.
# http://ibiblio.org/g2swap/byteofpython/read/module-name.html
if __name__ == '__main__':
# Initialise the pool of processes - it defaults to the number of processors
simulatedFilepool = Pool()
# Create a tuple of the appropriate read lengths and fold coverages
# eg. (30, 1), (30, 2), ... (30, 100), (35, 1), (35, 2), ... (150, 100)
for rLength in readLength:
for fCov in foldCoverage:
simulatedArgs.append((rLength, fCov, reference, strain))
# Use the map function and the tuple created above to process the data rapidly
simulatedFilepool.map(createSimulatedFiles, simulatedArgs)
def createSimulatedFiles((rLength, fCov, reference, strain)):
"""Iterates over the readLength and foldCoverage lists to create folders (if necessary)\
and perform analyses"""
os.chdir(path)
# Create a new folder(if necessary) at the appropriate location
newPath = "%s/tmp/%s/rL%s/rL%s_fC%s" % (path, strain, rLength, rLength, fCov)
newFile = "%s/%s_%s" % (newPath, rLength, fCov)
adjCov = float(fCov) * float(rLength)/250
#print fCov, rLength, adjCov
artIlluminaCall = "art_illumina -i %s -l %s -f %s -o %s" % (reference, rLength, adjCov, newFile)
make_path(newPath)
#Call art_illumina to simulate the reads into the appropriate folders
#art_illumina -i /path-to-file/Escherichia_coli_O157_H7_str_Sakai.fas -l "readLength" -f "foldCoverage" \
#-m 225 -s 60 -o /path-to-folder/Appropriate_name
if not os.path.isfile("%s.fq" % newFile):
sys.stdout.write('.')
# Subprocess.call requires that the command be finished before the loop can continue
# this ensures that processes will not be started, and continue running, while the
# script believes that it is "safe" to start more processes, eventually leading to problems
subprocess.call(artIlluminaCall, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
else:
print sys.stdout.write('.')
def faidxTargetsProcesses():
"""Faidx multiprocessing helper function"""
print '\nProcessing targets with faidx'
# Initialise the args list
if __name__ == '__main__':
# Initialise the pool of processes - it defaults to the number of processors
faidxPool = Pool()
faidxPool.map(faidxTargets, targets)
faidxPool.terminate()
faidxPool.join()
def faidxTargets(file):
"""Creates .fai index files of the targets, which are necessary for the conversion
of sorted BAM files to fastq files."""
faidxFile = "%s.fai" % file
faidxPath = "%s/targets/faidxFiles" % path
make_path(faidxPath)
if not os.path.isfile("%s/%s" % (faidxPath, faidxFile)):
faidxCommand = "samtools faidx %s" % file
subprocess.call(faidxCommand, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
# Move the file and faidx-processed file to the appropriate folder for further processing
shutil.move(faidxFile, faidxPath)
shutil.copy(file, faidxPath)
sys.stdout.write('.')
else:
sys.stdout.write('.')
# def indexTargetsProcesses():
# print '\nIndexing targets'
# indexTargetArgs = []
# if __name__ == '__main__':
# indexTargetsPool = Pool()
#
# indexTargetArgs.append((target, size))
# # Initialise the pool of processes - it defaults to the number of processors
# indexTargetsPool.map(indexTargets, indexTargetArgs)
def indexTargets():
"""Performs smalt index on the targets using the range of k-mers stored in the variable kmer"""
print '\nIndexing targets'
for target in targets:
for size in kmer:
filename = target.split('.')[0]
# Create a new path to be created (if necessary) for the generation of the range of k-mers
indexPath = "%s/targets/%s/%s_%s" % (path, filename, filename, size)
# Call the make_path function to make folders as necessary
make_path(indexPath)
indexFileSMI = "%s.smi" % filename
indexFileSMA = "%s.sma" % filename
if not os.path.isfile("%s/%s" % (indexPath, indexFileSMI)):
indexCommand = "smalt index -k %s -s 1 %s %s/targets/%s" % (size, filename, path, target)
subprocess.call(indexCommand, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
shutil.move(indexFileSMI, indexPath)
shutil.move(indexFileSMA, indexPath)
sys.stdout.write('.')
else:
sys.stdout.write('.')
def mappingProcesses(reference):
"""Mapping threads!"""
os.chdir(path)
print '\nPerforming reference mapping'
mappingProcessesArgs = []
if __name__ == '__main__':
mappingProcessesPool = Pool()
# uses kmer, targets, readLength, foldCoverage
for rLength in readLength:
for fCov in foldCoverage:
for target in targets:
for size in kmer:
mappingProcessesArgs.append((reference, rLength, fCov, target, size))
mappingProcessesPool.map(mapping, mappingProcessesArgs)
mappingProcessesPool.terminate()
mappingProcessesPool.join()
def mapping((reference, rLength, fCov, target, size)):
"""Performs the mapping of the simulated reads to the targets"""
filename = target.split('.')[0]
megaName = "rL%s_fC%s_%s_kmer%s" % (rLength, fCov, filename, size)
filePath = "%s/tmp/%s/rL%s/rL%s_fC%s" % (path, reference, rLength, rLength, fCov)
newPath = "%s/%s" % (filePath, megaName)
make_path(newPath)
targetPath = "%s/targets/%s/%s_%s" % (path, filename, filename, size)
if not os.path.isfile("%s/%s.bam" % (newPath, megaName)):
smaltMap = "smalt map -o %s/%s.bam -f bam -x %s/%s %s/%s_%s.fq" \
% (newPath, megaName, targetPath, filename, filePath, rLength, fCov)
subprocess.call(smaltMap, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
sys.stdout.write('.')
else:
sys.stdout.write('.')
def sortingProcesses(reference):
print "\nSorting bam files"
sortingProcessesArgs = []
if __name__ == '__main__':
sortingProcessesPool = Pool()
# uses kmer, targets, readLength, foldCoverage
for rLength in readLength:
for fCov in foldCoverage:
for target in targets:
for size in kmer:
sortingProcessesArgs.append((reference, rLength, fCov, target, size))
sortingProcessesPool.map(sorting, sortingProcessesArgs)
sortingProcessesPool.terminate()
def sorting((reference, rLength, fCov, target, size)):
"""Performs samtools sort to return a sorted bam file"""
filename = target.split('.')[0]
megaName = "rL%s_fC%s_%s_kmer%s" % (rLength, fCov, filename, size)
sorted = megaName + "_sorted"
sortedMegaName = megaName + "_sorted.bam"
filePath = "%s/tmp/%s/rL%s/rL%s_fC%s" % (path, reference, rLength, rLength, fCov)
newPath = "%s/%s" % (filePath, megaName)
#Sort the BAM file
if not os.path.isfile("%s/%s" % (newPath, sortedMegaName)):
bamSort = "samtools sort %s/%s.bam %s/%s" % (newPath, megaName, newPath, sorted)
subprocess.call(bamSort, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
sys.stdout.write('.')
else:
sys.stdout.write('.')
def bamIndexingProcesses(reference):
print '\nIndexing bam files'
bamIndexingArgs = []
if __name__ == '__main__':
bamIndexingPool = Pool()
# uses kmer, targets, readLength, foldCoverage
for rLength in readLength:
for fCov in foldCoverage:
for target in targets:
for size in kmer:
bamIndexingArgs.append((reference, rLength, fCov, target, size))
bamIndexingPool.map(bamIndexing, bamIndexingArgs)
bamIndexingPool.terminate()
bamIndexingPool.join()
def bamIndexing((reference, rLength, fCov, target, size)):
"""Indexes the sorted bam files in order to visualize the assemblies with tablet - note this is OPTIONAL"""
filename = target.split('.')[0]
megaName = "rL%s_fC%s_%s_kmer%s" % (rLength, fCov, filename, size)
sortedMegaName = megaName + "_sorted.bam"
filePath = "%s/tmp/%s/rL%s/rL%s_fC%s" % (path, reference, rLength, rLength, fCov)
newPath = "%s/%s" % (filePath, megaName)
indexedName = megaName + "_sorted.bai"
if not os.path.isfile("%s/%s" % (newPath, indexedName)):
bamIndex = "samtools index %s/%s %s/%s" % (newPath, sortedMegaName, newPath, indexedName)
subprocess.call(bamIndex, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
sys.stdout.write('.')
else:
sys.stdout.write('.')
def createVCFProcesses(reference):
print '\nCreating vcf files'
createVCFArgs = []
if __name__ == '__main__':
createVCFPool = Pool()
# uses kmer, targets, readLength, foldCoverage
for rLength in readLength:
for fCov in foldCoverage:
for target in targets:
for size in kmer:
createVCFArgs.append((reference, rLength, fCov, target, size))
createVCFPool.map(createVCF, createVCFArgs)
createVCFPool.terminate()
createVCFPool.join()
def createVCF((reference, rLength, fCov, target, size)):
"""Creates the variant calling format files from which all relevant data can be pulled"""
filename = target.split('.')[0]
megaName = "rL%s_fC%s_%s_kmer%s" % (rLength, fCov, filename, size)
sortedMegaName = megaName + "_sorted.bam"
filePath = "%s/tmp/%s/rL%s/rL%s_fC%s" % (path, reference, rLength, rLength, fCov)
vcfFile = megaName + "_sorted.vcf"
newPath = "%s/%s" % (filePath, megaName)
faidxTarget = "%s/targets/faidxFiles/%s" % (path, target)
# Read this to understand why certain flags were used
# http://samtools.sourceforge.net/mpileup.shtml
if not os.path.isfile("%s/%s" % (newPath, vcfFile)):
vcfPipe = "samtools mpileup -A -BQ0 -d 1000000 -uf %s %s/%s | bcftools view -cg - > %s/%s" \
% (faidxTarget, newPath, sortedMegaName, newPath, vcfFile)
subprocess.call(vcfPipe, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
sys.stdout.write('.')
else:
sys.stdout.write('.')
def createOutputFiles(reference):
print "\nCreating outputs"
make_path(outPath)
os.chdir(outPath)
outFile = open("SipprModelling_%s_%s.csv" % (start, reference), "wb")
outFile.write("readLength\tfoldCoverage\ttarget\tkmerLength\tMedianQualityScore\t"
"QualityScoreSD\tMedianFoldCoverage\tFoldCoverageSD\tMedianPercentID\tqualityMetric\n")
for rLength in readLength:
for fCov in foldCoverage:
for target in targets:
for size in kmer:
total1 = 0
sys.stdout.write('.')
filename = target.split('.')[0]
megaName = "rL%s_fC%s_%s_kmer%s" % (rLength, fCov, filename, size)
filePath = "%s/tmp/%s/rL%s/rL%s_fC%s" % (path, reference, rLength, rLength, fCov)
vcfFile = megaName + "_sorted.vcf"
newPath = "%s/%s" % (filePath, megaName)
outputFile = "%s/%s" % (newPath, vcfFile)
fileName = outputFile.split(".")[0]
#fileName = fileName.split("_sorted")[0]
#nameData = fileName.split("_")
#rL = nameData[0].split("rL")[1]
#fC = nameData[1].split("fC")[1]
#target = nameData[2]
#size = nameData[3].split("kmer")[1]
# Initialise the counter, which will be used to track lines in the vcf file - if positions in the
# target are not mapped, then the position field will jump ahead of the counter
count = 1
# Initialise the arrays, which will keep track of the appropriate values for each dataset
arrQual = []
arrCov = []
arrSum = []
output = open(outputFile, "r")
for line in output:
# vcf files have 36 commented out lines at the top of each file - these are not necessary
if re.search('#', line):
pass
else:
total1 += 1
# Format of file
# CHROM POS ID REF ALT QUAL FILTER INFO FORMAT
# adk-12 8 . G . 32.7 . DP=1;AF1=0;AC1=0;DP4=0,1,0,0;MQ=29;FQ=-30.3 PL 0
# data[0] [1] [2] [3] [4] [5] [6] [7]
data = line.split("\t")
#target = data[0]
pos = data[1]
refSeq = data[3]
mapSeq = data[4]
qual = data[5]
# Depth of coverage is reported prior to the first ";"
dpLine = data[7].split(";")[0]
# For now, I'm skipping lines that indicated the presence of a possible indel
# - I may return to this later
if re.search("INDEL", dpLine):
pass
else:
# If the called base (mapSeq) is identical to the reference base (refSeq)
# - denoted by a ".", then set seq to equal refSeq, otherwise, pull the
# value of mapSeq for seq
avgQual = sum(arrQual)/total1
if mapSeq == ".":
seq = refSeq
match = 1
# This section corrects for the fact that during the conversion of bam files to vcf
# files, SNP calls and ambiguous calls look identical, except for the fact that for
# SNPs, the qualityScore (qual) tends to be higher than the surrounding bases,
# while ambiguous calls have a lower qualityScore - this loop screens for quality
# scores that are at least 10 lower than the score of the previous base
else:
if float(arrQual[-1] - 10) >= 0:
prevValue = float(arrQual[-1] - 10)
else:
prevValue = 0
if float(qual) <= prevValue:
seq = refSeq
match = 1
else:
# This attempts to catch if there are two ambiguous bases in a row;
# they will hopefully have the same value
if float(qual) == prevValue:
seq = refSeq
match = 1
else:
# "True" SNPs seem to have increased qualityScore compared to the
# surrounding values, this will catch that
if float(qual) > prevValue:
seq = mapSeq
match = 0
# Strip the "DP=" from dpLine
DP = dpLine.split("=")[1]
#vcfData[pos] = (fileName, target, refSeq, mapSeq, DP)
# If pos > count, then there is a gap in the mapping (or a deletion, but ignoring
# this possibility for now). For my purposes, I want to have data reported for
# every position, whether it is present in the vcf file or not, so I will use count
# as the position, "-" as the seq, and 0 as the quality and depth of coverage
if int(pos) > count:
#print int(pos) - count, pos, count, range(count, int(pos))
# the number of skipped positions is equal to the value for pos - count
# For each skipped position (i), set appropriate variables to appropriate values
for i in range(count, int(pos)):
posAdj = count
seqAdj = "-"
matchAdj = 0
qualAdj = 0
DPAdj = 0
#vcfData[fileName][rL][fC][target][size][int(posAdj)][seqAdj][matchAdj][qualAdj] = DP
arrQual.append(float(qualAdj))
arrCov.append(float(DPAdj))
arrSum.append(float(matchAdj))
count += 1
if int(pos) == count:
#vcfData[fileName][rL][fC][target][size][int(pos)][seq][match][qual] = DP
arrQual.append(float(qual))
arrCov.append(float(DP))
arrSum.append(float(match))
count += 1
else:
#vcfData[fileName][rL][fC][target][size][int(pos)][seq][match][qual] = DP
arrQual.append(float(qual))
arrCov.append(float(DP))
arrSum.append(float(match))
count += 1
# In the case of no data being present in a file,
total = count - 1
if total == 0:
avgQual = 0
stdQual = 0
avgCov = 0
stdCov = 0
avgID = 0
qualMet = 0
else:
avgQual = sum(arrQual)/total
stdQual = numpy.std(arrQual)
avgCov = sum(arrCov)/total
stdCov = numpy.std(arrCov)
avgID = sum(arrSum)/total * 100
qualMet = avgQual * avgCov
vcfData[reference][filename] = avgID
outFile.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
% (rLength, fCov, filename, size, avgQual, stdQual, avgCov, stdCov, avgID, qualMet))
output.close()
outFile.close()
def modifiedOutputs():
"""Docstring"""
outFile = open("SipprModelling_%s.csv" % (start), "wb")
outFile.write("Strain\t")
for target in sorted(targets):
filename = target.split('.')[0]
outFile.write("%s\t" % filename)
for strain in vcfData:
outFile.write("\n%s\t" % strain)
for gene, ID in sorted(vcfData[strain].items()):
outFile.write("%s\t" % ID)
print ""
outFile.close()
def pipeline(reference, strain):
"""Calls all the functions in a way that they can be multi-processed"""
createSimulatedFilesProcesses(reference, strain)
faidxTargetsProcesses()
#indexTargetsProcesses()
indexTargets()
#Start the mapping operations
mappingProcesses(strain)
sortingProcesses(strain)
bamIndexingProcesses(strain)
createVCFProcesses(strain)
createOutputFiles(strain)
modifiedOutputs()
def callPipeline():
"""Depending on whether there are multiple references to process, there are different requirements
for the output file - one reference file is likely for modelling multiple parameters, while multiple
files are likely used to compare the results from a single set of parameters on multiple genomes"""
count = 0
if len(references) == 1:
print "Please use the SipprModelling program for a single reference genome"
else:
for reference in references:
count += 1
refRegex = re.search(".+\/reference\/(.+).fas", reference)
strain = refRegex.group(1)
print "Processing file %s of %s" % (count, len(references))
pipeline(reference, strain)
start = time.time()
callPipeline()
print "\nElapsed Time: %s seconds" % (time.time() - start)
| {
"repo_name": "adamkoziol/SipprModeling",
"path": "SipprModellingMultiprocessingMultifile.py",
"copies": "1",
"size": "23959",
"license": "mit",
"hash": -3824011015604236300,
"line_mean": 45.4321705426,
"line_max": 125,
"alpha_frac": 0.547143036,
"autogenerated": false,
"ratio": 3.9911710811261036,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0050508300980328874,
"num_lines": 516
} |
__author__ = 'blais'
__author__ = 'akoziol'
# Import the necessary modules
# OS is used for file/folder manipulations
import os
# Subprocess->call is used for making system calls
import subprocess
# Errno is used in the file creation command - I think it's similar to the $! variable in Perl
import errno
# Glob finds all the path names matching a specified pattern according to the rules used by the Unix shell
import glob
# Shutil is useful for file moving/copying
import shutil
# prints variables in a form which can be used as input to the interpreter - similar to Data::Dumper?
import pprint
# Regex
import re
import time
import sys
from threading import Thread
from Queue import Queue
import shlex
import argparse
# Define the variables for the read length and fold coverage, respectively
#readLength = [30, 35, 40, 45, 50, 55, 60, 75, 80, 100, 150]
readLength = [54]
#foldCoverage = [1, 2, 5, 10, 15, 20, 25, 30, 35, 40, 50, 75, 100]
foldCoverage = [1, 2, 3]
# Initialize the required dictionaries
vcfData = {}
# Define the range of k-mer sizes for indexing of targets
#kmer = [5, 7, 9, 11, 13, 15]
kmer = [9]
os.chdir("/media/nas/akoziol/Pipeline_development/SipprModelling")
path = os.getcwd()
reference = "/media/nas/akoziol/Pipeline_development/SipprModelling/reference/Escherichia_coli_O157_H7_str_Sakai.fas"
def make_path(inPath):
"""from: http://stackoverflow.com/questions/273192/check-if-a-directory-exists-and-create-it-if-necessary \
does what is indicated by the URL"""
try:
os.makedirs(inPath)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
SimulatedFileQueue = Queue()
def createSimulatedFiles(queue):
"""Iterates over the readLength and foldCoverage lists to create folders (if necessary)\
and perform analyses"""
while True:
os.chdir(path)
rLength, fCov = SimulatedFileQueue.get()
# Create a new folder(if necessary) at the appropriate location
newPath = "%s/tmp/rL%s/rL%s_fC%s" % (path, rLength, rLength, fCov)
newFile = "%s/%s_%s_" % (newPath, rLength, fCov)
artIlluminaCall = "art_illumina -i %s -l %s -f %s -m 225 -s 60 -o %s" % (reference, rLength, fCov, newFile)
make_path(newPath)
sys.stdout.write('.')
# Call art_illumina to simulate the reads into the appropriate folders
# art_illumina -i /path-to-file/Escherichia_coli_O157_H7_str_Sakai.fas -l "readLength" -f "foldCoverage" \
# -m 225 -s 60 -o /path-to-folder/Appropriate_name
if not os.path.isfile("%s1.fq" % newFile):
subprocess.Popen(shlex.split(artIlluminaCall), stdout=open(os.devnull, 'wb'))
# signals to queue job is done
SimulatedFileQueue.task_done()
def createSimulatedFilesThreads():
""" ghkwjhg """
print "Creating simulated files"
for i in range(len(readLength) * len(foldCoverage)):
threads = Thread(target=createSimulatedFiles, args=(SimulatedFileQueue,))
threads.setDaemon(True)
threads.start()
for rLength in readLength:
for fCov in foldCoverage:
SimulatedFileQueue.put((rLength, fCov))
#wait on the queue until everything has been processed
SimulatedFileQueue.join()
faidxQueue = Queue()
def faidxTargets(faidxQueue):
"""Creates .fai index files of the targets, which are necessary for the conversion
of sorted BAM files to fastq files."""
while True:
file = faidxQueue.get()
faidxFile = "%s.fai" % file
faidxPath = "%s/targets/faidxFiles" % path
make_path(faidxPath)
sys.stdout.write('.')
if not os.path.isfile("%s/%s" % (faidxPath, faidxFile)):
faidxCommand = "samtools faidx %s" % file
subprocess.Popen(shlex.split(faidxCommand), stdout=open(os.devnull, 'wb'))
shutil.move(faidxFile, faidxPath)
shutil.move(file, faidxPath)
else:
pass
# signals to queue job is done
faidxQueue.task_done()
def faidxTargetsThreads(targets):
print ''
print 'Processing targets with faidx'
for i in range(len(targets)):
threads = Thread(target=faidxTargets, args=(faidxQueue,))
threads.setDaemon(True)
threads.start()
for target in targets:
faidxQueue.put(target)
#wait on the queue until everything has been processed
faidxQueue.join()
indexQueue = Queue()
def indexTargets(indexQueue):
"""Performs smalt index on the targets using the range of k-mers stored in the variable kmer"""
while True:
target, size = indexQueue.get()
filename = target.split('.')[0]
# Create a new path to be created (if necessary) for the generation of the range of k-mers
indexPath = "%s/targets/%s/%s_%s" % (path, filename, filename, size)
# Call the make_path function to make folders as necessary
make_path(indexPath)
indexFileSMI = "%s.smi" % filename
indexFileSMA = "%s.sma" % filename
sys.stdout.write('.')
if not os.path.isfile("%s/%s" % (indexPath, indexFileSMI)):
indexCommand = "smalt index -k %s -s 1 %s %s/targets/%s" % (size, filename, path, target)
subprocess.Popen(shlex.split(indexCommand), stdout=open(os.devnull, 'wb'))
shutil.move(indexFileSMI, indexPath)
shutil.move(indexFileSMA, indexPath)
else:
pass
# signals to queue job is done
indexQueue.task_done()
def indexTargetsThreads(targets):
#print ''
print '\nIndexing targets'
for i in range(len(targets)):
threads = Thread(target=indexTargets, args=(indexQueue,))
threads.setDaemon(True)
threads.start()
for target in targets:
for size in kmer:
indexQueue.put((target, size))
#wait on the queue until everything has been processed
indexQueue.join()
mappingQueue = Queue()
class Mapping(Thread):
"""Test mapping class"""
def __init__(self, mappingQueue):
Thread.__init__(self)
self.mappingQueue = mappingQueue
def run(self):
while True:
targets, rLength, fCov, target, size = mappingQueue.get()
filename = target.split('.')[0]
megaName = "rL%s_fC%s_%s_kmer%s" % (rLength, fCov, filename, size)
filePath = "%s/tmp/rL%s/rL%s_fC%s" % (path, rLength, rLength, fCov)
newPath = "%s/%s" % (filePath, megaName)
make_path(newPath)
targetPath = "%s/targets/%s/%s_%s" % (path, filename, filename, size)
sys.stdout.write('.')
if not os.path.isfile("%s/%s.bam" % (newPath, megaName)):
smaltMap = "smalt map -o %s/%s.bam -n 24 -f bam -x -l pe %s/%s %s/%s_%s_1.fq %s/%s_%s_2.fq" \
% (newPath, megaName, targetPath, filename, filePath, rLength, fCov, filePath, rLength, fCov)
subprocess.Popen(shlex.split(smaltMap), stdout=open(os.devnull, 'wb'))
print smaltMap
else:
pass
time.sleep(0.01)
# signals to queue job is done
mappingQueue.task_done()
def mappingThreads(targets):
"""Mapping threads!"""
os.chdir(path)
print '\nPerforming reference mapping'
length = len(readLength) * len(foldCoverage * len(targets) * len(kmer))
for i in range(length):
threads = Mapping(mappingQueue)
threads.setDaemon(True)
threads.start()
# uses kmer, targets, readLength, foldCoverage
for rLength in readLength:
for fCov in foldCoverage:
for target in targets:
for size in kmer:
mappingQueue.put((targets, rLength, fCov, target, size))
#wait on the queue until everything has been processed
mappingQueue.join()
class Sorting(Thread):
def __init__(self, sortingQueue):
Thread.__init__(self)
self.mappingQueue = sortingQueue
def run(self):
while True:
targets, rLength, fCov, target, size = sortingQueue.get()
filename = target.split('.')[0]
megaName = "rL%s_fC%s_%s_kmer%s" % (rLength, fCov, filename, size)
sorted = megaName + "_sorted"
sortedMegaName = megaName + "_sorted.bam"
filePath = "%s/tmp/rL%s/rL%s_fC%s" % (path, rLength, rLength, fCov)
newPath = "%s/%s" % (filePath, megaName)
make_path(newPath)
#Sort the BAM file
if not os.path.isfile("%s/%s" % (newPath, sortedMegaName)):
bamSort = "samtools sort %s/%s.bam %s/%s" % (newPath, megaName, newPath, sorted)
subprocess.Popen(shlex.split(bamSort), stdout=open(os.devnull, 'wb'))
#os.system(bamSort)
sys.stdout.write('.')
else:
sys.stdout.write('.')
# signals to queue job is done
sortingQueue.task_done()
def pipeline():
"""Calls all the functions in a way that they can be multithreaded"""
createSimulatedFilesThreads()
os.chdir("%s/targets" % path)
global targets
targets = glob.glob("*.fa")
faidxTargetsThreads(targets)
indexTargetsThreads(targets)
# Start the mapping operations
mappingThreads(targets)
pipeline() | {
"repo_name": "adamkoziol/SipprModeling",
"path": "modellingClasses.py",
"copies": "1",
"size": "9269",
"license": "mit",
"hash": 3818036614657197000,
"line_mean": 34.5172413793,
"line_max": 117,
"alpha_frac": 0.6295177473,
"autogenerated": false,
"ratio": 3.5256751616584254,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9636839270185591,
"avg_score": 0.003670727754566765,
"num_lines": 261
} |
__author__ = 'blais'
import multiprocessing
import os
import sys
import subprocess
import errno
import glob
from multiprocessing import Pool
# Define the variables for the read length and fold coverage, respectively
readLength = [21, 22, 23, 24]
foldCoverage = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
kmer = [5, 7, 9]
os.chdir("/media/nas/akoziol/Pipeline_development/SipprModelling")
path = os.getcwd()
reference = "/media/nas/akoziol/Pipeline_development/SipprModelling/reference/Escherichia_coli_O157_H7_str_Sakai.fas"
os.chdir("%s/targets" % path)
targets = glob.glob("*.fa")
def createSimulatedFiles((rLength, fCov)):
"""Iterates over the readLength and foldCoverage lists to create folders (if necessary)\
and perform analyses"""
os.chdir(path)
#for fCov in foldCoverage:
# Create a new folder(if necessary) at the appropriate location
newPath = "%s/tmp/rL%s/rL%s_fC%s" % (path, rLength, rLength, fCov)
newFile = "%s/%s_%s_" % (newPath, rLength, fCov)
artIlluminaCall = "art_illumina -i %s -l %s -f %s -m 225 -s 60 -o %s" % (reference, rLength, fCov, newFile)
make_path(newPath)
# Call art_illumina to simulate the reads into the appropriate folders
# art_illumina -i /path-to-file/Escherichia_coli_O157_H7_str_Sakai.fas -l "readLength" -f "foldCoverage" \
# -m 225 -s 60 -o /path-to-folder/Appropriate_name
if not os.path.isfile("%s1.fq" % newFile):
#subprocess.Popen(shlex.split(artIlluminaCall), stdout=open(os.devnull, 'wb'))
sys.stdout.write('.')
subprocess.call(artIlluminaCall, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
else:
print sys.stdout.write('.')
def createSimulatedFilesProcesses(readLength, foldCoverage):
args = []
if __name__ == '__main__':
pool = Pool()
for rLength in readLength:
for fCov in foldCoverage:
args.append((rLength, fCov))
pool.map(createSimulatedFiles, args)
def make_path(inPath):
"""from: http://stackoverflow.com/questions/273192/check-if-a-directory-exists-and-create-it-if-necessary \
does what is indicated by the URL"""
try:
os.makedirs(inPath)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
daemonProcess(readLength, foldCoverage) | {
"repo_name": "adamkoziol/SipprModeling",
"path": "test.py",
"copies": "1",
"size": "2331",
"license": "mit",
"hash": -5624553209837212000,
"line_mean": 34.3333333333,
"line_max": 117,
"alpha_frac": 0.6735306735,
"autogenerated": false,
"ratio": 3.1246648793565686,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9282680708091555,
"avg_score": 0.00310296895300252,
"num_lines": 66
} |
__author__ = 'Blake'
import copy
from functools import partial
from synpost.fn.helpers import min_max
class PluginMeta(type):
def __new__(cls, name, parents, dct):
if not 'name' in dct:
dct['name'] = name.lower().replace('plugin', '').replace('site', '')
if not 'action' in dct:
dct['action'] = 'build'
if not 'priority' in dct:
dct['priority'] = 100
return super(PluginMeta, cls).__new__(cls, name, parents, dct)
def __call__(cls, Action):
a = copy.copy(Action)
return partial(cls.execute, a)
def __str__(cls):
return 'plugin-%s' % cls.plugin
class PluginCore(object):
__metaclass__ = PluginMeta
plugin = 'core'
action = 'core'
priority = -1
def __init__(self, Action):
self.go_fn = partial(self.execute, Action)
@staticmethod
def execute(Action):
raise NotImplementedError
# return True/False
class SitePluginCore(PluginCore):
__metaclass__ = PluginMeta
plugin = 'site_core'
action = 'site'
priority = -1
def __init__(self, Site):
super(SitePluginCore, self).__init__(Site)
@staticmethod
def execute(Site):
raise NotImplementedError
# return Site
| {
"repo_name": "blakev/synpost",
"path": "synpost/plugins/core.py",
"copies": "1",
"size": "1275",
"license": "mit",
"hash": -3715593717965198000,
"line_mean": 20.9827586207,
"line_max": 80,
"alpha_frac": 0.5796078431,
"autogenerated": false,
"ratio": 3.8871951219512195,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9951508205704314,
"avg_score": 0.0030589518693812136,
"num_lines": 58
} |
__author__ = 'Blake'
from synpost.plugins.core import PluginMeta as PluginType
class Action(object):
def __init__(self, plugins = None, pipeline = None):
if not plugins:
plugins = []
if not pipeline:
pipeline = []
self.description = 'DefaultAction'
self.go_pipeline = pipeline
self.plugins = plugins
self.insert_plugins_to_pipeline()
def go(self):
results = []
for fn in self.go_pipeline:
results.append(fn())
return results
def go_pipeline_names(self):
return map(lambda func: func.__name__, self.go_pipeline)
def insert_plugins_to_pipeline(self):
if not self.plugins:
return
# sorted plugins by priority for pipeline insertion
sorted_plugins = sorted(self.plugins, key=lambda x: x.priority)
for index, fn in enumerate(self.go_pipeline):
if sorted_plugins:
top_plugin = sorted_plugins[0]
if not isinstance(top_plugin, PluginType):
raise ValueError('%s not of type<PluginType>' % top_plugin)
score = int((float(index) / len(self.go_pipeline)) * 100)
if top_plugin.priority <= score:
init_plugin = sorted_plugins.pop(0)(self)
self.go_pipeline.insert(index, init_plugin)
# extend the remaining (>last score and <100) to the pipeline
self.go_pipeline.extend(map(lambda x: x(self), sorted_plugins))
def __str__(self):
return self.description
def __repr__(self):
return self.__str__ | {
"repo_name": "blakev/synpost",
"path": "synpost/objects/action.py",
"copies": "1",
"size": "1630",
"license": "mit",
"hash": 3046031050051586000,
"line_mean": 28.125,
"line_max": 79,
"alpha_frac": 0.5809815951,
"autogenerated": false,
"ratio": 4.190231362467866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5271212957567866,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Blake'
from synpost.plugins.core import SitePluginCore
class AssetByUuidSitePlugin(SitePluginCore):
priority = 50
action = 'site'
@staticmethod
def execute(Site):
if not Site.is_available('uuid_map'):
return Site
uuid_chain = {}
for article in Site.ordered_articles:
uuid_chain[article.uuid] = article.identifiers
Site.set_from_plugin('uuid_map', uuid_chain)
return Site
class PrevNextSitePlugin(SitePluginCore):
priority = 100
action = 'site'
@staticmethod
def execute(Site):
from synpost.fn.helpers import sliding_window
meta_attr = PrevNextSitePlugin.name # prevnext
if not Site.is_available(meta_attr):
return Site
page_chain = {}
for article in sliding_window([None, None] + Site.ordered_articles, 3):
upnext, current, previous = article
if current is None:
continue
page_chain[current.uuid] = { # create the <- previous / next -> chain
'prev_article': previous.uuid if previous else None,
'next_article': upnext.uuid if upnext else None
}
Site.set_from_plugin(meta_attr, page_chain)
return Site
| {
"repo_name": "blakev/synpost",
"path": "synpost/plugins/siteplugin_prevnext.py",
"copies": "1",
"size": "1297",
"license": "mit",
"hash": 7759951492744031000,
"line_mean": 22.1607142857,
"line_max": 81,
"alpha_frac": 0.599074788,
"autogenerated": false,
"ratio": 4.157051282051282,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5256126070051281,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Blake'
import os
import re
from collections import namedtuple
from jinja2 import Environment as JinjaEnvironment, FileSystemLoader as JinjaLoader
from synpost.globals import Values as Globals
from synpost.fn.io import generic_collect
ThemeFile = namedtuple('ThemeFile', 'filename valiname filepath extension type')
Valifile = namedtuple('ValiFile', 'valiname type')
# default themes folder, look for other themes IN HERE; by name and path
theme_folder = os.path.join(os.getcwd(), 'synpost', 'themes')
class Theme(object):
REQUIRED_PIECES = ['article', 'footer', 'header', 'main', 'sidebar']
REQUIRED_STYLES = ['main']
REQUIRED_SCRIPTS = []
pieces_regex = re.compile(r'[\s\S]+\.(html|htm|jinja|txt)', re.I)
styles_regex = re.compile(r'[\s\S]+\.(css|less|sass)', re.I)
scripts_regex = re.compile(r'[\s\S]+\.(js|coffee)', re.I)
images_regex = re.compile(r'[\s\S]+\.(gif|png|jpg)', re.I)
def __init__(self, name, at_path, additional_paths = None):
self.name = name
self.path = at_path
# non-theme folders that can use the jinja2 Environment in Theme object
if not additional_paths:
additional_paths = []
if not isinstance(additional_paths, list):
additional_paths = [additional_paths]
self.additional_paths = additional_paths
# the parts of a theme-pack
self.collection_points = {
'pieces': (os.path.join(self.path, 'pieces'), Theme.pieces_regex),
'styles': (os.path.join(self.path, 'assets', 'css'), Theme.styles_regex),
'scripts': (os.path.join(self.path, 'assets', 'js'), Theme.scripts_regex),
'images': (os.path.join(self.path, 'assets', 'img'), Theme.images_regex)
}
self.collected_items = {}
for ftype, identity in self.collection_points.items():
self.collected_items[ftype] = generic_collect(identity[0], identity[1], ftype, ThemeFile)
self.jinja_environment = self.collection_points['pieces'][0] # path the pieces folder
self.additional_paths.append(self.jinja_environment)
self.jinja_environment = JinjaEnvironment(loader=JinjaLoader(self.additional_paths), cache_size=Globals.JINJA_CACHE_FILES)
def get(self, object_type):
objects = self.collected_items.get(object_type.lower(), None)
return [] if not objects else objects
def asset_by_filename(self, asset_type, filename):
asset_collection = self.collected_items.get(asset_type.lower(), self.collection_points['pieces'])
candidates = filter(lambda theme_file: theme_file.filename.lower() == filename.lower(), asset_collection)
if not candidates:
return None
else: return candidates[0]
def validated(self):
our_set = self.validation_set
default_set = DefaultTheme.validation_set
return default_set.issubset(our_set)
@property
def validation_set(self):
theme_file_set = set()
for theme_file in [item for sublist in self.collected_items.values() for item in sublist]:
theme_file_set.add(Valifile(theme_file.valiname, theme_file.type))
return theme_file_set
@staticmethod
def find_themes():
return filter(lambda x: os.path.isdir(os.path.join(theme_folder, x)), os.listdir(theme_folder))
@staticmethod
def theme_from_folder(path, additional_paths = None):
if not os.path.isdir(path):
raise ValueError('%s is not a valid path' % path)
# take the supplied path, split it on the OS seperator; either \ or /
# then, take the last element which is our starting folder
name = path.split(os.path.sep)[-1]
return Theme(name, path, additional_paths)
@staticmethod
def theme_from_name(name, additional_paths = None):
if name not in Theme.find_themes():
name = 'default'
return Theme.theme_from_folder(os.path.join(theme_folder, name), additional_paths)
DefaultTheme = Theme.theme_from_name('default')
MinimalTheme = Theme.theme_from_name('minimal')
| {
"repo_name": "blakev/synpost",
"path": "synpost/objects/theme.py",
"copies": "1",
"size": "4114",
"license": "mit",
"hash": 7507073140990266000,
"line_mean": 37.4485981308,
"line_max": 130,
"alpha_frac": 0.6550802139,
"autogenerated": false,
"ratio": 3.709648331830478,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4864728545730478,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Blake'
import os
import re
from collections import namedtuple
SiteFile = namedtuple('SiteFile', 'filename valiname filepath extension type')
# coerce all files from collect_files_from into objects with properties
def generic_collect(path, regex, ftype, nametuple = None):
if nametuple is None:
nametuple = SiteFile
ret = []
for item in collect_files_from(path, regex):
name = item.split(os.path.sep)[-1] # filename
ext = os.path.splitext(item)[1] # file extension
# validation name, is the filename without extension
valiname = '.'.join(name.split('.')[:-1])
ret.append(nametuple(name, valiname, item, ext, ftype))
return ret
# grab all the files matching with_filter from dest down...
def collect_files_from(dest, with_filter = None):
if with_filter is None:
with_filter = re.compile(r'.*', re.I)
for root, folders, files in os.walk(dest):
files = filter(lambda x: with_filter.match(x), files)
for f in files:
yield os.path.join(root, f)
# creates a folder structure from a given
# root folder_path and d, the dictionary (list, or str)
# containing all the folders to be created
# overwrite allows new folders to be created, if
# it's set to false then no new folders can be made
# and it will skip over them; this MAY throw an error
# later, but at least the contents of the folders it
# does reach will be cleared out
def create_folders_from_dict(root, d, overwrite = True):
def make_dat_folder(somepath):
if not os.path.exists(somepath) and overwrite:
os.makedirs(somepath)
for f in os.listdir(somepath):
file_path = os.path.join(somepath, f)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except IOError, e:
print 'Error deleting file %s: %s' % (file_path, e)
make_dat_folder(root)
if isinstance(d, dict):
for folder, subfolders in d.items():
make_dat_folder(os.path.join(root, folder))
create_folders_from_dict(os.path.join(root, folder), subfolders, overwrite)
elif isinstance(d, list):
for folder in d:
if isinstance(folder, str):
make_dat_folder(os.path.join(root, folder))
else:
create_folders_from_dict(root, folder, overwrite)
elif isinstance(d, str):
make_dat_folder(os.path.join(root, d))
| {
"repo_name": "blakev/synpost",
"path": "synpost/fn/io.py",
"copies": "1",
"size": "2521",
"license": "mit",
"hash": 6480167290787344000,
"line_mean": 24.4646464646,
"line_max": 87,
"alpha_frac": 0.6255454185,
"autogenerated": false,
"ratio": 3.762686567164179,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48882319856641787,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Blake'
import os
import re
import json
import shutil
from synpost.globals import Values as Globals
import synpost.objects.content as Content
from synpost.objects.action import Action
from synpost.fn.io import collect_files_from, create_folders_from_dict
from synpost.fn.conversions import path_to_asset
MARKDOWN_FILES = re.compile(r'[\s\S]+\.md', re.I)
required_folders = {
'static': ['imgs', 'js', 'css', 'fonts'],
'pages': [],
'articles': []
}
content_locations = {
'articles': ['articles'],
'pages': ['pages'],
'styles': ['static', 'css'],
'scripts': ['static', 'js'],
'images': ['static', 'imgs']
}
class Build(Action):
def __init__(self, site, plugins = None):
if not plugins:
plugins = []
self.site = site
if self.site.plugins:
self.site = self.site.go()
self.description = 'BuildAction'
self.config = site.config
self.dest_folder = self.config['project_destination']
self.source_folder = self.config['project_source']
pipeline = [
self.delete_old_folders,
self.create_new_folders,
self.copy_static_assets,
self.build_dynamic_assets,
self.copy_index
]
super(Build, self).__init__(plugins, pipeline)
def delete_old_folders(self):
try:
shutil.rmtree(self.dest_folder)
os.makedirs(self.dest_folder)
except Exception, e:
return e
else:
return True
def create_new_folders(self):
# create the folder tree
try:
create_folders_from_dict(self.dest_folder, required_folders, True)
except Exception, e:
return e
else:
return True
def __static_assets(self, is_static = True):
return filter(lambda x: x.is_static == is_static, self.site.all_items)
def copy_static_assets(self):
for item in self.__static_assets(True):
# get the new path relative to destination folder
path_prefix = list(content_locations[item.type])
# affix the original filename to the end of the path
path_prefix.append(item.filename)
# insert the project destination (build dir) to the front
path_prefix.insert(0, self.dest_folder)
# join the whole path together
new_path = os.path.join(*path_prefix)
# copy the original file into the new path we just created
shutil.copy2(item.path, new_path)
return True
def build_dynamic_assets(self):
for item in self.__static_assets(False):
# extract path and new file name
npath, nname = os.path.split(item.href.strip('/'))
# append new folder path with project destination (build dir)
if item.type == 'pages':
new_path = os.path.join(self.dest_folder, 'pages')
else:
new_path = os.path.join(self.dest_folder, npath)
# create the path to the folder
if not os.path.exists(new_path):
os.makedirs(new_path)
with open(os.path.join(new_path, nname), 'w') as out_file:
out_file.write(item.finalized_html)
return True
def copy_index(self):
index_path = os.path.join(self.dest_folder, 'pages', 'index.html')
if not os.path.exists(index_path):
with open(os.path.join(self.dest_folder, 'index.html'), 'w') as out_file:
out_file.writelines(Globals.DEFAULT_INDEX_HTML)
else:
shutil.copy2(index_path, self.dest_folder)
return True
| {
"repo_name": "blakev/synpost",
"path": "synpost/build.py",
"copies": "1",
"size": "3715",
"license": "mit",
"hash": 7454657811869437000,
"line_mean": 29.7024793388,
"line_max": 85,
"alpha_frac": 0.5819650067,
"autogenerated": false,
"ratio": 3.861746361746362,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4943711368446362,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Blake'
import os
import shutil
from synpost.plugins.core import PluginCore
class MovePagesPlugin(PluginCore):
action = 'build'
priority = 100
@staticmethod
def execute(Action):
page_files_in_root = Action.config.get('page_files_in_root', True)
assets_as_pages = Action.config.get('assets_as_pages', True)
if not page_files_in_root:
return True
pages_directory = os.path.join(Action.config['project_destination'], 'pages')
files_to_move = set()
theme_assets = set()
# Action.site.theme.REQUIRED_PIECES + "additional_page_assets" in config
conf_pieces = [os.path.split(x)[1] for x in Action.config.get('additional_page_assets', [])]
conf_pieces = ['.'.join(os.path.split(x)[1].split('.')[:-1]) for x in conf_pieces]
req_pieces = list(Action.site.theme.REQUIRED_PIECES)
req_pieces.extend(conf_pieces)
for f in os.listdir(pages_directory):
full_file = os.path.join(pages_directory, f)
if os.path.isfile(full_file):
if page_files_in_root:
files_to_move.add(full_file)
if not assets_as_pages:
valiname = '.'.join(os.path.split(full_file)[1].split('.')[:-1])
if valiname in req_pieces:
theme_assets.add(full_file)
# take the theme_assets out of the "all files we're moving" if we don't want them
# to overwrite files in the actual theme directory
new_files_full = list(files_to_move.difference(theme_assets))
new_files_name = [os.path.split(n)[1] for n in new_files_full]
pages_path = os.path.join(Action.dest_folder, 'pages')
available_pages = filter(os.path.isfile, [os.path.join(pages_path, o) for o in os.listdir(pages_path)])
for fullpath, name in zip(new_files_full, new_files_name):
if fullpath in available_pages:
shutil.move(fullpath, os.path.join(Action.dest_folder, name))
return True
| {
"repo_name": "blakev/synpost",
"path": "synpost/plugins/plugin_movepages.py",
"copies": "1",
"size": "2076",
"license": "mit",
"hash": -9083565942894766000,
"line_mean": 35.4210526316,
"line_max": 111,
"alpha_frac": 0.6045279383,
"autogenerated": false,
"ratio": 3.579310344827586,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4683838283127586,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Blake'
import os
from synpost.extensions.metaextension import MetaExtension
# root folders/categories for all items to go inside
STATIC_CATEGORIES = ['articles', 'pages']
class Category(MetaExtension):
def __init__(self, asset, attribute):
super(Category, self).__init__(asset, attribute)
@staticmethod
def category_from_path(p, base_path = None):
drive, parts = os.path.splitdrive(p)
if not base_path:
pass
else:
# remove the source base directory from the path;
parts = parts.split(base_path)[-1]
# split the filename from the folders in front of it
# get all of the pieces except the filename on the end
parts = parts.split(os.path.sep)[:-1]
# clean up the seperator junk
parts = [x.lower() for x in parts if x != '']
# TEST_ENV/articles/cars/red/redcar vroom.md => ['cars', 'red']
return [p for p in parts if p not in STATIC_CATEGORIES]
@staticmethod
def analyze(asset):
# add the folder path leading up to the root of the project
# as category types to the article/page/asset
if asset.site.config.get('folder_as_categories', True):
path_categories = Category.category_from_path(asset.path, asset.site.config.get('project_source', '.'))
else:
path_categories = []
# combine the methods of category analysis
c = asset.metadata.get('categories', []) + path_categories
# filter the categories so they're all lowercase
c = map(lambda x: x.lower(), c)
MetaExtension.overwrite_values('categories', asset, c)
return c
| {
"repo_name": "blakev/synpost",
"path": "synpost/extensions/categories.py",
"copies": "1",
"size": "1717",
"license": "mit",
"hash": -8163497288748322000,
"line_mean": 34.0408163265,
"line_max": 115,
"alpha_frac": 0.6144437973,
"autogenerated": false,
"ratio": 4.068720379146919,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5183164176446919,
"avg_score": null,
"num_lines": null
} |
__author__ = 'blastron'
sass_lines = [
"Apologize to your audio software.",
"I hope you realize that what you are doing is wrong.",
"Prepare yourself for the consequences.",
"Every bit you crush is a callous waste of sound.",
"You are a 1-bit monster.",
"Octo might forgive you, but I never will."
]
# Check for mandatory components that the user may not have
import imp
try:
imp.find_module("numpy")
except ImportError:
print "Chipify requires NumPy: http://www.scipy.org/scipylib/download.html/"
exit()
import numpy
import sys
import wave
import random
import math
if len(sys.argv) is not 2:
print "USAGE: python chipify.py FILENAME"
exit()
filename = sys.argv[1]
try:
input_file = wave.open(filename, "r")
except IOError:
print "Unable to open file %s." % filename
exit()
# Ensure this is a valid format
if input_file.getnchannels() != 1:
print "Unsupported number of channels (%i). Must be a mono file." % input_file.getnchannels()
exit()
# Determine the number of input and output samples
input_framerate = input_file.getframerate()
input_frame_count = input_file.getnframes()
input_frame_width = input_file.getsampwidth()
input_max_value = int("ff" * input_frame_width, 16)
output_framerate = 4000
output_frame_count = int(input_frame_count * (output_framerate / float(input_framerate)))
print "Loading from file " + filename
print "%i input samples at %i KHz, %i output samples at %i KHz" % (input_frame_count, input_framerate / 1000,
output_frame_count, output_framerate / 1000)
print "Target output size: %i bytes." % (math.ceil(output_frame_count / 8.0))
print "-----"
print random.choice(sass_lines)
print "-----"
print "Reading input file into memory..."
target_data_type = "int%i" % (input_frame_width * 8)
raw_input_data = input_file.readframes(-1)
input_frames = numpy.fromstring(raw_input_data, target_data_type)
print "Building low-pass filter..."
relative_cutoff_frequency = output_framerate / float(input_framerate)
transition_band = 0.05
# Determine number of samples in our filter.
N = int(math.ceil(4 / transition_band))
if not N % 2: N += 1
n = numpy.arange(N)
# Compute sinC filter
h = numpy.sinc(2 * relative_cutoff_frequency * (n - (N - 1) / 2.))
# Compute Blackman window
w = 0.42 - 0.5 * numpy.cos(2 * numpy.pi * n / (N - 1)) + 0.08 * numpy.cos(4 * numpy.pi * n / (N - 1))
# Multiply sinC filter with the window, then normalize to get unity gain
lowpass_filter = (h * w) / numpy.sum(h)
print "Applying low-pass filter..."
filtered_input_frames = numpy.convolve(input_frames, lowpass_filter).astype(input_frames.dtype)
print "Crushing signal, mercilessly..."
input_frames_per_output_frame = input_frame_count / float(output_frame_count)
input_frames_consumed = 0
current_frame = 0
current_frame_index = 0
leftover_frame_weight = 0
output_bits = []
while input_frames_consumed < input_frame_count:
input_frames_to_consume = input_frames_per_output_frame
total_input = 0
if leftover_frame_weight > 0:
input_frames_to_consume -= leftover_frame_weight
total_input += current_frame * leftover_frame_weight
leftover_frame_weight = 0
while input_frames_to_consume > 0:
current_frame = filtered_input_frames[current_frame_index]
current_frame_index += 1
current_frame_weight = min(1, input_frames_to_consume)
total_input += current_frame * current_frame_weight
leftover_frame_weight = 1 - current_frame_weight
input_frames_consumed += 1
input_frames_to_consume -= 1
averaged_input = total_input / float(input_frames_per_output_frame)
output_bits.append(1 if averaged_input < 0.5 else 0)
# Pad the output with zeroes if we don't have an even number of bits
if len(output_bits) % 8 != 0:
output_bits += [0] * (8 - len(output_bits) % 8)
print "Writing crushed wave to disk..."
output_wave = wave.open(filename + ".out.wav", "w")
output_wave.setnchannels(1)
output_wave.setsampwidth(1)
output_wave.setframerate(output_framerate)
output_wave.writeframes("".join(chr(255) if i else chr(0) for i in output_bits))
output_wave.close()
print "Writing Octo-compatible text to disk..."
# Write bytes
output_bytes = []
for i in range(len(output_bits) / 8):
byte = output_bits[i * 8]
for j in range(7):
byte = byte << 1
byte += output_bits[i * 8 + j + 1]
output_bytes.append(hex(byte))
output = open(filename + ".out.txt", "w")
output.write(" ".join(output_bytes))
output.close()
| {
"repo_name": "whoozle/Octo",
"path": "tools/Chipify/chipify.py",
"copies": "4",
"size": "4585",
"license": "mit",
"hash": 85802641558327620,
"line_mean": 30.404109589,
"line_max": 111,
"alpha_frac": 0.6763358779,
"autogenerated": false,
"ratio": 3.2152875175315567,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5891623395431558,
"avg_score": null,
"num_lines": null
} |
__author__ = 'blaz'
import sys
import os
import time
import threading
import select
import sys
import pybonjour
import settings
import socket
import serial
import datetime, time
from logger import *
class BrowseService(threading.Thread):
"""
Class for browsing services advertised using Bonjour (MDNS) It runs in a seperate thread in the background
"""
def __init__(self, regType):
threading.Thread.__init__(self)
self.regType = regType
self.resolved = []
self.timeout = 5
self.services = {}
def run(self):
#print "Starting thread"
logging.info("Starting Bonjour service browsing thread")
self.resolve_service(self.regType)
def resolve_callback(self, sdRef, flags, interfaceIndex, errorCode, fullname,
hosttarget, port, txtRecord):
if errorCode == pybonjour.kDNSServiceErr_NoError:
#print 'Resolved service:'
#print ' fullname =', fullname
#print ' hosttarget =', hosttarget
#print ' port =', port
self.services[fullname.split('.')[0]]={'fullname': fullname, 'hosttarget': hosttarget, 'port': port}
self.resolved.append(True)
def browse_callback(self, sdRef, flags, interfaceIndex, errorCode, serviceName,
regtype, replyDomain):
if errorCode != pybonjour.kDNSServiceErr_NoError:
return
if not (flags & pybonjour.kDNSServiceFlagsAdd):
#print 'Service removed'
logging.info("Service removed")
if serviceName in self.services.keys():
del self.services[serviceName]
return
#print 'Service added; resolving'
logging.info("Service added; resolving")
resolve_sdRef = pybonjour.DNSServiceResolve(0,
interfaceIndex,
serviceName,
regtype,
replyDomain,
self.resolve_callback)
try:
while not self.resolved:
ready = select.select([resolve_sdRef], [], [], self.timeout)
if resolve_sdRef not in ready[0]:
logging.info('Resolve timed out')
break
pybonjour.DNSServiceProcessResult(resolve_sdRef)
else:
self.resolved.pop()
finally:
resolve_sdRef.close()
#python browseServices.py _VirtualSpeaker._udp
def resolve_service(self, regtype='_VirtualSpeaker._udp'):
browse_sdRef = pybonjour.DNSServiceBrowse(regtype = regtype,
callBack = self.browse_callback)
try:
try:
while True:
ready = select.select([browse_sdRef], [], [])
if browse_sdRef in ready[0]:
pybonjour.DNSServiceProcessResult(browse_sdRef)
except KeyboardInterrupt:
pass
finally:
browse_sdRef.close()
#####
#MAIN
#####
#Start background Service Resolution (Searching for speakers)
browse = BrowseService(settings.SERVICE_NAME)
logging.info("Starting VirtualDevice named: %s", settings.SPEAKER_NAME)
browse.daemon=True
browse.start()
logging.info("Starting service browsing: %s", settings.SERVICE_NAME)
#Initialize UDP SERVER
# create dgram udp socket
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except socket.error:
logging.error("Error, failed to create socket")
#print 'Failed to create socket'
sys.exit()
"""
Loop forever, read arduino input and stream it to speakers
"""
# serial port settings
try:
ser = serial.Serial(settings.SERIAL_PORT, settings.BAUD_RATE)
except Exception as err:
logging.fatal("Cant find Serial port for key reading. We will now quit. Details: %s", err)
exit()
then = datetime.datetime.now() + datetime.timedelta(seconds=2)
keysPressed = list()
while(1):
#Get advertised speakers
#print browse.services
#Read from Arduino
line = ser.readline()
line = line.split('\t')
'''print line[0], "ms ",
for i in line[1:]:
if int(i) > 300:
print 1,'\t',
else:
print 0,'\t',
print '''
try:
lineBool = [True if int(x) > settings.CAPACITIVE_LIMIT else False for x in line[1:]]
except Exception, err:
lineBool = [False for x in line[1:]]
logging.error("Error: %s", err.message)
if len(keysPressed) == 0:
keysPressed = [False for x in range(len(lineBool))]
#Stream to sockets
for key,value in browse.services.items():
try :
'''
#Set the whole string
for i in range(min(len(lineBool), len(keysPressed))):
#print "lineBoolLen:", len(lineBool), " keyPressedLen:", len(keysPressed), " i:",i
if lineBool[i] != keysPressed[i]:
msg = settings.FIRST_NOTE + i
#info = "Sending message to ",value['hosttarget'] ,"on port ",value['port'], ": ", str(msg)
#logging.debug(info)
s.sendto(str(msg), (value['hosttarget'], value['port'])) '''
s.sendto(str(keysPressed), (value['hosttarget'], value['port']))
if (then < datetime.datetime.now()):
msg = "init:" + str(settings.INSTRUMENT_ID)
s.sendto(msg, (value['hosttarget'], value['port']))
#logging.info("VirtualInstrument initialization sent to: %s", value['hosttarget'] )
except socket.error, msg:
error_code = 'Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
logging.error("Error with socket, code: %s", error_code)
except Exception, msg:
logging.error("Error: %s", msg.message)
if (then < datetime.datetime.now()):
then = datetime.datetime.now() + datetime.timedelta(seconds=10)
keysPressed = lineBool
| {
"repo_name": "blazdivjak/rzpproject",
"path": "VirtualInstrument.py",
"copies": "1",
"size": "6164",
"license": "mit",
"hash": 3192977577192329000,
"line_mean": 33.2444444444,
"line_max": 112,
"alpha_frac": 0.5663530175,
"autogenerated": false,
"ratio": 4.176151761517615,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5242504779017615,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bloomj'
from OPLCollector import *
from Optimizer import *
try:
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession, Row, functions
from pyspark.sql.types import StructType, StructField, StringType, IntegerType, DoubleType
except ImportError as e:
print ("Error importing Spark Modules", e)
sys.exit(1)
class Tableau:
"""
Demonstrates equivalence between OPL and SQL using Spark and the Warehousing example.
Demonstrates equivalence between OPL and SQL using Spark and the Warehousing example.
The context and theory of this class are presented in an IBM DSX sample notebook entitled
Optimization Modeling and Relational Data (which uses the code from this file).
"""
def controller(self):
# Create the warehousing data model
networkDataModel = ADMBuilder() \
.addSchema("warehouses", buildSchema(
("location", StringType()),
("fixedCost", DoubleType()),
("capacityCost", DoubleType()))) \
.addSchema("routes", buildSchema(
("location", StringType()),
("store", StringType()),
("shippingCost", DoubleType()))) \
.addSchema("stores", buildSchema(
("storeId", StringType()))) \
.addSchema("mapCoordinates", buildSchema(
("location", StringType()),
("lon", DoubleType()),
("lat", DoubleType()))) \
.build()
demandDataModel = ADMBuilder() \
.addSchema("demands", buildSchema(
("store", StringType()),
("scenarioId", StringType()),
("amount", DoubleType()))) \
.addSchema("scenarios", buildSchema(
("id", StringType()),
("totalDemand", DoubleType()),
("periods", DoubleType()))) \
.build()
warehousingResultDataModel = ADMBuilder() \
.addSchema("objectives", buildSchema(
("problem", StringType()),
("dExpr", StringType()),
("scenarioId", StringType()),
("iteration", IntegerType()),
("value", DoubleType()))) \
.addSchema("openWarehouses", buildSchema(
("location", StringType()),
("scenarioId", StringType()),
("iteration", IntegerType()),
("open", IntegerType()),
("capacity", DoubleType()))) \
.addSchema("shipments", buildSchema(
("location", StringType()),
("store", StringType()),
("scenarioId", StringType()),
("iteration", IntegerType()),
("amount", DoubleType()))) \
.build()
# Note: the "MapCoordinates table and the "scenarioId" and "iteration" fields are not used in this notebook but are included for use in other contexts. URL
credentials_1= {}
networkDataSource = getFromObjectStorage(credentials_1, filename="Warehousing-data.json")
demandDataSource = getFromObjectStorage(credentials_1, filename="Warehousing-sales_data-nominal_scenario.json")
warehousingData = OPLCollector("warehousingData", networkDataModel).setJsonSource(networkDataSource).fromJSON()
warehousingData.addTables(OPLCollector("demandData", demandDataModel).setJsonSource(demandDataSource).fromJSON())
warehousingData.displayTable("warehouses", sys.stdout)
#Create the tableau data model
tableauData= OPLCollector("tableauData")
tableauADMBuilder= tableauData.buildADM()
tableauADMBuilder.addSchema("integerColumns", buildSchema(
("variable", StringType()),
("lower", IntegerType()),
("upper", IntegerType()),
("value", IntegerType())))
tableauADMBuilder.addSchema("booleanColumns", SchemaBuilder()\
.copyFields(tableauADMBuilder.referenceSchema("integerColumns"))\
.buildSchema())
tableauADMBuilder.addSchema("floatColumns", buildSchema(
("variable", StringType()),
("lower", DoubleType()),
("upper", DoubleType()),
("value", DoubleType())))
tableauADMBuilder.addSchema("rows", buildSchema(
("cnstraint", StringType()),
("sense", StringType()),
("rhs", DoubleType())))
tableauADMBuilder.addSchema("entries", buildSchema(
("cnstraint", StringType()),
("variable", StringType()),
("coefficient", DoubleType())))
tableauADMBuilder.addSchema("objectives", buildSchema(
("name", StringType()),
("sense", StringType()),
("value", DoubleType())))
tableauDataModel = tableauADMBuilder.build()
# Create the data model to transform the warehousing data into the tableau
tableauTransformations = OPLCollector("tableauTransformations")
tableauTransformationsADMBuilder = tableauTransformations.buildADM()
tableauTransformationsADMBuilder.addSchema("columns_open", SchemaBuilder()\
.copyFields(tableauData.getADM().get("booleanColumns"))\
.addField("location", StringType())\
.buildSchema())
tableauTransformationsADMBuilder.addSchema("columns_capacity", SchemaBuilder()\
.copyFields(tableauData.getADM().get("floatColumns"))\
.addField("location", StringType())\
.buildSchema())
tableauTransformationsADMBuilder.addSchema("columns_ship", SchemaBuilder()\
.copyFields(tableauData.getADM().get("floatColumns")) \
.addField("location", StringType())\
.addField("store", StringType())\
.buildSchema())
tableauTransformationsADMBuilder.addSchema("rows_ctCapacity", SchemaBuilder()\
.copyFields(tableauData.getADM().get("rows"))\
.buildSchema())
tableauTransformationsADMBuilder.addSchema("rows_ctDemand", SchemaBuilder()\
.copyFields(tableauData.getADM().get("rows"))\
.buildSchema())
tableauTransformationsADMBuilder.addSchema("rows_ctSupply", SchemaBuilder()\
.copyFields(tableauData.getADM().get("rows"))\
.buildSchema())
tableauTransformationsADMBuilder.addSchema("rows_dexpr", SchemaBuilder()\
.copyFields(tableauData.getADM().get("rows"))\
.buildSchema())
tableauTransformationsADMBuilder.addSchema("entries_ctCapacity_capacity", SchemaBuilder()\
.copyFields(tableauData.getADM().get("entries"))\
.buildSchema())
tableauTransformationsADMBuilder.addSchema("entries_ctCapacity_ship", SchemaBuilder()\
.copyFields(tableauData.getADM().get("entries"))\
.buildSchema())
tableauTransformationsADMBuilder.addSchema("entries_ctDemand_ship", SchemaBuilder()\
.copyFields(tableauData.getADM().get("entries"))\
.buildSchema())
tableauTransformationsADMBuilder.addSchema("entries_ctSupply_open", SchemaBuilder()\
.copyFields(tableauData.getADM().get("entries"))\
.buildSchema())
tableauTransformationsADMBuilder.addSchema("entries_ctSupply_ship", SchemaBuilder()\
.copyFields(tableauData.getADM().get("entries"))\
.buildSchema())
tableauTransformationsADMBuilder.addSchema("entries_dexpr_open", SchemaBuilder()\
.copyFields(tableauData.getADM().get("entries"))\
.buildSchema())
tableauTransformationsADMBuilder.addSchema("entries_dexpr_capacity", SchemaBuilder()\
.copyFields(tableauData.getADM().get("entries"))\
.buildSchema())
tableauTransformationsADMBuilder.addSchema("entries_dexpr_ship", SchemaBuilder()\
.copyFields(tableauData.getADM().get("entries"))\
.buildSchema())
tableauTransformationsADMBuilder.build()
tableauTransformer = tableauTransformations.buildData()
# Create input dataframes
warehouses = warehousingData.getTable("warehouses")
stores = warehousingData.getTable("stores")
routes = warehousingData.getTable("routes")
demands = warehousingData.getTable("demands")
scenarios = warehousingData.getTable("scenarios")
scenarioId = scenarios.first()["id"]
# Encode the columns
tableauTransformer.addTable("columns_open",
warehouses.select("location")\
.withColumn("variable", functions.concat(functions.lit("open_"), warehouses["location"]))\
.withColumn("upper", functions.lit(1))\
.withColumn("lower", functions.lit(0))\
.withColumn("value", functions.lit(0)))
tableauTransformer.addTable("columns_capacity",
warehouses.select("location")\
.withColumn("variable", functions.concat(functions.lit("capacity_"), warehouses["location"]))\
.withColumn("upper", functions.lit(1.0e20))\
.withColumn("lower", functions.lit(0.0))\
.withColumn("value", functions.lit(0.0)))
tableauTransformer.addTable("columns_ship",
routes.select("location", "store")\
.withColumn("variable", functions.concat(functions.lit("ship_"), routes["location"], functions.lit("_"),
routes["store"]))\
.withColumn("upper", functions.lit(1.0))\
.withColumn("lower", functions.lit(0.0))\
.withColumn("value", functions.lit(0.0)))
# Encode the Constraints
tableauTransformer.addTable("rows_ctCapacity",
warehouses.select("location")\
.withColumn("cnstraint", functions.concat(functions.lit("ctCapacity_"), warehouses["location"]))\
.withColumn("sense", functions.lit("GE"))\
.withColumn("rhs", functions.lit(0.0)))
tableauTransformer.addTable("rows_ctDemand",
stores.select("storeId")\
.withColumn("cnstraint", functions.concat(functions.lit("ctDemand_"), stores["storeId"]))\
.withColumn("sense", functions.lit("GE"))\
.withColumn("rhs", functions.lit(1.0))\
.withColumnRenamed("storeId", "store"))
tableauTransformer.addTable("rows_ctSupply",
routes.select("location", "store")\
.withColumn("cnstraint", functions.concat(functions.lit("ctSupply_"), routes["location"], functions.lit("_"),
routes["store"]))\
.withColumn("sense", functions.lit("GE"))\
.withColumn("rhs", functions.lit(0.0)))
tableauTransformer.addTable("rows_dexpr",
SPARK_SESSION.createDataFrame(
[ Row(cnstraint= "capitalCost", sense= "dexpr", rhs= 0.0),
Row(cnstraint= "operatingCost", sense= "dexpr", rhs= 0.0),
Row(cnstraint= "totalCost", sense= "dexpr", rhs= 0.0)],
tableauTransformations.getADM().get("rows_dexpr"))\
.select("cnstraint", "sense", "rhs")) #orders the columns properly
# Reshape the Coefficient Data into the Tableau
tableauTransformer.addTable(
"entries_ctCapacity_capacity",
tableauTransformer.referenceTable("rows_ctCapacity")\
.join(tableauTransformer.referenceTable("columns_capacity"), "location")\
.select("cnstraint", "variable")\
.withColumn("coefficient", functions.lit(1.0)))
# demand at the store at the end of each route
demandOnRoute = routes.join(
demands.where(demands["scenarioId"] == functions.lit(scenarioId)), "store")\
.select("location", "store", "amount").withColumnRenamed("amount", "demand")
tableauTransformer.addTable(
"entries_ctCapacity_ship",
tableauTransformer.referenceTable("rows_ctCapacity")\
.join(tableauTransformer.referenceTable("columns_ship"), "location")\
.join(demandOnRoute, ["location", "store"])\
.withColumn("coefficient", -demandOnRoute["demand"])\
.select("cnstraint", "variable", "coefficient"))
tableauTransformer.addTable(
"entries_ctDemand_ship",
tableauTransformer.referenceTable("rows_ctDemand")\
.join(tableauTransformer.referenceTable("columns_ship"), "store")\
.select("cnstraint", "variable")\
.withColumn("coefficient", functions.lit(1.0)))
tableauTransformer.addTable(
"entries_ctSupply_open",
tableauTransformer.referenceTable("rows_ctSupply")\
.join(tableauTransformer.referenceTable("columns_open"), "location")\
.select("cnstraint", "variable")\
.withColumn("coefficient", functions.lit(1.0)))
tableauTransformer.addTable(
"entries_ctSupply_ship",
tableauTransformer.referenceTable("rows_ctSupply")\
.join(tableauTransformer.referenceTable("columns_ship"), ["location", "store"])\
.select("cnstraint", "variable")\
.withColumn("coefficient", functions.lit(-1.0)))
rows_dexpr = tableauTransformer.referenceTable("rows_dexpr")
tableauTransformer.addTable(
"entries_dexpr_open",
(rows_dexpr.where((rows_dexpr["cnstraint"] == functions.lit("capitalCost"))\
| (rows_dexpr["cnstraint"] == functions.lit("totalCost"))))\
.join(tableauTransformer.referenceTable("columns_open")\
.join(warehouses, "location"), how="cross")\
.select("cnstraint", "variable", "fixedCost")\
.withColumnRenamed("fixedCost", "coefficient"))
tableauTransformer.addTable(
"entries_dexpr_capacity",
(rows_dexpr.where((rows_dexpr["cnstraint"] == functions.lit("capitalCost"))\
| (rows_dexpr["cnstraint"] == functions.lit("totalCost"))))\
.join(tableauTransformer.referenceTable("columns_capacity")\
.join(warehouses, "location"), how="cross")\
.select("cnstraint", "variable", "capacityCost")\
.withColumnRenamed("capacityCost", "coefficient"))
tableauTransformer.addTable(
"entries_dexpr_ship",
(rows_dexpr.where((rows_dexpr["cnstraint"] == functions.lit("operatingCost"))\
| (rows_dexpr["cnstraint"] == functions.lit("totalCost"))))\
.join(
(tableauTransformer.referenceTable("columns_ship")\
.join((routes.join(demandOnRoute, ["location", "store"])\
.withColumn("coefficient", demandOnRoute["demand"] * routes["shippingCost"])),
["location", "store"])), how="cross")\
.select("cnstraint", "variable", "coefficient"))
tableauTransformer.build()
# Build the input data for the tableau optimization problem
# Drop the instance-specific keys (location and store), which are not supported in the tableau model
tableauData.buildData()\
.addTable("booleanColumns",
tableauTransformations.getTable("columns_open").drop("location"))\
.addTable("floatColumns",
tableauTransformations.getTable("columns_capacity").drop("location")\
.union(tableauTransformations.getTable("columns_ship").drop("location").drop("store")))\
.addEmptyTable("integerColumns")\
.addTable("rows",
tableauTransformations.getTable("rows_ctCapacity").drop("location")\
.union(tableauTransformations.getTable("rows_ctDemand").drop("store"))\
.union(tableauTransformations.getTable("rows_ctSupply").drop("location").drop("store"))\
.union(tableauTransformations.getTable("rows_dexpr")))\
.addTable("entries",
tableauTransformations.getTable("entries_ctSupply_open")\
.union(tableauTransformations.getTable("entries_ctSupply_ship"))\
.union(tableauTransformations.getTable("entries_ctCapacity_capacity"))\
.union(tableauTransformations.getTable("entries_ctCapacity_ship"))\
.union(tableauTransformations.getTable("entries_ctDemand_ship"))\
.union(tableauTransformations.getTable("entries_dexpr_open"))\
.union(tableauTransformations.getTable("entries_dexpr_capacity"))\
.union(tableauTransformations.getTable("entries_dexpr_ship")))\
.addTable("objectives",
SPARK_SESSION.createDataFrame(
[Row(name= "totalCost", sense= "minimize", value= 0.0)],
tableauData.getADM().get("objectives"))
.select("name", "sense", "value"))\
.build()
# Replace with actual items
TableauDotMod = None
url = None
key = None
tableau_data_model, tableau_inputs, tableau_optimization_problem, tableau_outputs= None
tableauProblem = Optimizer("TableauProblem", credentials={"url": url, "key": key})\
.setOPLModel("TableauProblem.mod",
modelText=[tableau_data_model, tableau_inputs, tableau_optimization_problem, tableau_outputs])\
.setResultDataModel(ADMBuilder()\
.addSchema("booleanDecisions", tableauData.getSchema("booleanColumns"))\
.addSchema("integerDecisions", tableauData.getSchema("integerColumns"))\
.addSchema("floatDecisions", tableauData.getSchema("floatColumns"))\
.addSchema("optimalObjectives", tableauData.getSchema("objectives"))\
.build())
tableauResult = tableauProblem.solve(tableauData)
tableauProblem.getSolveStatus()
# Recover the solution
warehousingResult = OPLCollector("warehousingResult", warehousingResultDataModel)
resultsBuilder = warehousingResult.buildData()
resultsBuilder.addTable("objectives",
tableauResult.getTable("optimalObjectives").select("name", "value")\
.withColumnRenamed("name", "dExpr")\
.withColumn("problem", functions.lit("warehousing"))\
.withColumn("scenarioId", functions.lit(scenarioId))\
.withColumn("iteration", functions.lit(0)))
resultsBuilder.addTable("openWarehouses",
(tableauResult.getTable("booleanDecisions").select("variable", "value").withColumnRenamed("value", "open")\
.join(tableauTransformations.getTable("columns_open"), "variable")).drop("variable")\
.join(
tableauResult.getTable("floatDecisions").select("variable", "value").withColumnRenamed("value", "capacity")\
.join(tableauTransformations.getTable("columns_capacity"), "variable").drop("variable"),
"location")
.select("location", "open", "capacity")\
.withColumn("scenarioId", functions.lit(scenarioId))\
.withColumn("iteration", functions.lit(0)))
floatDecisions = tableauResult.getTable("floatDecisions").select("variable", "value")
resultsBuilder.addTable("shipments",
floatDecisions\
.join(tableauTransformations.getTable("columns_ship"), "variable").drop("variable")\
.join(demandOnRoute, ["location", "store"])\
.withColumn("amount", demandOnRoute["demand"]*(floatDecisions["value"]))\
.select("location", "store", "amount")\
.withColumn("scenarioId", functions.lit(scenarioId))\
.withColumn("iteration", functions.lit(0)))
resultsBuilder.build()
warehousingResult.displayTable("objectives")
warehousingResult.displayTable("openWarehouses")
# to see the lengthy shipments table, uncomment the next line
# warehousingResult.displayTable("shipments")
# end controller
# end class Tableau
| {
"repo_name": "JeremyBloom/Optimization-Examples",
"path": "Tableau.py",
"copies": "1",
"size": "20854",
"license": "mit",
"hash": 2056460358115131400,
"line_mean": 53.0237467018,
"line_max": 166,
"alpha_frac": 0.5891435696,
"autogenerated": false,
"ratio": 4.429481733220051,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00969365505916359,
"num_lines": 379
} |
__author__ = 'bloomj'
try:
import folium
except:
if hasattr(sys, 'real_prefix'):
# we are in a virtual env.
!pip install folium
else:
!pip install - -user folium
import folium
from pyspark.sql.types import StringType, IntegerType, DoubleType
class Maps(object):
"""
Displays a map showing warehouses and shipping routes for the Warehousing application.
"""
def __init__(self, stores=None, warehouses=None, routes=None, routesToShow=None, mapCoordinates=None):
"""
Creates a new Maps instance.
Matches the warehouse locations and stores with their map coordinates.
Creates a base map for adding location and shipment information.
Note: If your data has more than one scenario, you should select the scenario when creating the dataframes used in this map, e.g.
routes= warehousingResult.getTable("shipments").select('*').where(shipments.scenarioId == "nominal")
@param stores: a Spark dataframe containing store data (note: must contain a column "store" containing the store locations and may contain other columns as well)
@param warehouses: a Spark dataframe containing warehouse data (note: must contain a column "location" containing the warehouse locations and may contain other columns as well)
@param routes: a Spark dataframe containing route data (note: must contain columns "location" and "store" containing the end-point locations and may contain other columns as well)
@param routesToShow: a Spark dataframe containing (location, store) pairs specifying the subset of routes to show (default=None shows all routes)
@param mapCoordinates: a Spark dataframe containing the geographic coordinates of the warehouse and store locations
"""
# Match map coordinates with store and warehouse locations
self.warehousesWithCoord = warehouses.join(mapCoordinates, "location")
coord = mapCoordinates.withColumnRenamed("location", "store")
self.storesWithCoord = stores.join(coord, "store")
if routesToShow is not None:
selectedRoutes = routes.join(routesToShow,
[routes.location == routesToShow.location, routes.store == routesToShow.store]) \
.select("*") \
.drop(routesToShow.location).drop(routesToShow.store)
else:
selectedRoutes = routes
routesWithCoord = selectedRoutes.join(mapCoordinates, "location") \
.withColumnRenamed("lon", "locationLon") \
.withColumnRenamed("lat", "locationLat")
self.routesWithCoord = routesWithCoord.join(coord, "store") \
.withColumnRenamed("lon", "storeLon") \
.withColumnRenamed("lat", "storeLat")
# Determine map center and range
self.mapCenter = self.storesWithCoord.agg({"lat": "avg", "lon": "avg"}).first().asDict()
self.mapCenter.update(self.storesWithCoord.agg({"lat": "min", "lon": "min"}).first().asDict())
self.mapCenter.update(self.storesWithCoord.agg({"lat": "max", "lon": "max"}).first().asDict())
def getBasicMap(self):
"""
Returns a basic map with no data displayed.
Use it to add data markers.
"""
return folium.Map(location=[self.mapCenter["avg(lat)"], self.mapCenter["avg(lon)"]],
min_lat=self.mapCenter["min(lat)"], max_lat=self.mapCenter["max(lat)"],
min_lon=self.mapCenter["min(lon)"], max_lon=self.mapCenter["max(lon)"],
zoom_start=4)
@staticmethod
def makeLabel(label):
"""
Used in formatCaption method
@param label: a (possibly empty) string
@return: a string
"""
if len(label) > 0:
return label + ": "
else:
return label
@staticmethod
def formatCaption(row, labelColumns, dataColumns):
"""
Creates a caption for a popup on a map item from a Row in a Spark dataframe
@param row: the current Row in a Spark dataframe
@param labelColumns: the names of the columns with the identifier information to be displayed in a popup caption
(dictionary with string keys representing the label column names and string values representing the labels to use in the caption (the data themselves are strings))
@param dataColumns: the names of the columns with the data to be displayed in a popup caption
(dictionary with string keys representing the data column names and string values representing the labels to use in the caption (the data themselves are numbers))
@return an html text string
"""
text = ""
first = True
if labelColumns: # is not empty
for col, label in labelColumns.iteritems():
if first:
text = Maps.makeLabel(label) + row[col]
first = False
else:
text = text + "<br />" + Maps.makeLabel(label) + row[col]
if dataColumns: # is not empty
for col, label in dataColumns.iteritems():
if first:
text = Maps.makeLabel(label) + str(row[col])
first = False
else:
text = text + "<br />" + Maps.makeLabel(label) + str(row[col])
return text
def showWarehouses(self, tableMap=None, labelColumns={}, dataColumns={}):
"""
Displays data from a table of warehouses in markers on a map. The table must include columns for the coordinates of each store.
@param tableMap: the map to which this data is to be added (a folium.Map, which defaults to basicMap)
@param dataColumns: the names of the columns with the data to be displayed in a popup caption
(dictionary with string keys representing the data column names and string values representing the labels to use in the caption (the data themselves are numbers))
@return tableMap: the map with markers added for the new data
"""
if tableMap is None:
tableMap = self.getBasicMap()
table = self.warehousesWithCoord
for r in table.collect():
row = r.asDict()
text = Maps.formatCaption(row, labelColumns, dataColumns)
caption = folium.Popup(folium.element.IFrame(html=text, width=200, height=75), max_width=2650)
folium.Marker([row["lat"], row["lon"]], popup=caption).add_to(tableMap)
return tableMap
def showStores(self, tableMap=None, labelColumns={}, dataColumns={}):
"""
Displays data from a table of stores in markers on a map. The table must include columns for the coordinates of each store.
@param tableMap: the map to which this data is to be added (a folium.Map, which defaults to basicMap)
@param dataColumns: the names of the columns with the data to be displayed in a popup caption
(dictionary with string keys representing the data column names and string values representing the labels to use in the caption (the data themselves are numbers))
@return tableMap: the map with markers added for the new data
"""
if tableMap is None:
tableMap = self.getBasicMap()
table = self.storesWithCoord
for r in table.collect():
row = r.asDict()
text = Maps.formatCaption(row, labelColumns, dataColumns)
caption = folium.Popup(folium.element.IFrame(html=text, width=200, height=75), max_width=2650)
folium.CircleMarker([row["lat"], row["lon"]], popup=caption, radius=20, color='#FF0000',
fill_color='#FF0000') \
.add_to(tableMap)
return tableMap
def showRoutes(self, tableMap=None, labelColumns={}, dataColumns={}, color='#FF0000'):
"""
Displays data from a table of routes in markers on a map. The table must include columns for the coordinates of each end of the route.
@param tableMap: the map to which this data is to be added (a folium.Map, which defaults to basicMap)
@param dataColumns: the names of the columns with the data to be displayed in a popup caption
(dictionary with string keys representing the data column names and string values representing the labels to use in the caption (the data themselves are numbers))
@return tableMap: the map with markers added for the new data
"""
if tableMap is None:
tableMap = self.getBasicMap()
table = self.routesWithCoord
for r in table.collect():
row = r.asDict()
text = Maps.formatCaption(row, labelColumns, dataColumns)
caption = folium.Popup(folium.element.IFrame(html=text, width=200, height=75), max_width=2650)
tableMap.add_children(
folium.PolyLine([[row["locationLat"], row["locationLon"]], [row["storeLat"], row["storeLon"]]], color,
popup=caption))
# end class Maps
| {
"repo_name": "JeremyBloom/Optimization-Examples",
"path": "Maps.py",
"copies": "1",
"size": "9331",
"license": "mit",
"hash": -6601105806906867000,
"line_mean": 49.2692307692,
"line_max": 187,
"alpha_frac": 0.624477548,
"autogenerated": false,
"ratio": 4.434885931558935,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002501011795769908,
"num_lines": 182
} |
import os
import argparse
import sys
import warnings
def quants_to_dict(quantFiles, tpm_index):
tpmD = {}
for q in quantFiles:
with open(q, 'r') as f:
for line in f:
if (line.startswith('Name') or line.startswith('#')):
continue
line = line.strip().split('\t')
contig = line[0]
prev_tpm = tpmD.get(contig, 0)
tpm = prev_tpm + float(line[tpm_index])
tpmD[contig] = tpm
return tpmD
def main(assembly, quant_files, tpm_threshold, out, tpm_col_index):
if len(quant_files) == 0:
warnings.warn('No quant files passed in; '+ assembly + 'cannot be filtered.')
else:
quantD = quants_to_dict(quant_files, tpm_col_index)
filteredSet = set([contig for contig,tpm in quantD.items() if tpm >= tpm_threshold])
with open(assembly, 'r') as f:
with open(out, 'w') as o:
for line in f:
if line.startswith('>'):
write_bases = False
contig = line.rstrip()[1:].split(' ')[0] # salmon names = only before the first space
if contig in filteredSet:
o.write(line)
write_bases=True
else:
if write_bases:
o.write(line)
if(__name__ == '__main__'):
parser = argparse.ArgumentParser()
parser.add_argument('--assembly', action='store', type=str)
parser.add_argument('--tpm', action='store', type=float,default=0.5)
parser.add_argument('--quant_files', action='append', default=[])
parser.add_argument('-o', '--out', action='store', type=str)
parser.add_argument('--tpm_column_index', action='store', type=int,default=3)
args = parser.parse_args()
main(args.assembly, args.quant_files, args.tpm, args.out, args.tpm_column_index)
| {
"repo_name": "bluegenes/MakeMyTranscriptome",
"path": "scripts/util/filter_contigs_by_tpm.py",
"copies": "1",
"size": "1985",
"license": "bsd-3-clause",
"hash": 4465984347683032000,
"line_mean": 37.1730769231,
"line_max": 110,
"alpha_frac": 0.5370277078,
"autogenerated": false,
"ratio": 3.780952380952381,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9697843591153905,
"avg_score": 0.024027299519695337,
"num_lines": 52
} |
from time import strftime
import tarfile
import zipfile
from mmt_defaults import PATH_TOOLS, PATH_ROOT
from external_tools import TOOLS_DICT
import os
from os.path import exists, join, split
import json
import argparse
import sys
import functools
import subprocess
import platform
from tasks_v2 import Supervisor
if(sys.version[0] == '3'):
from urllib.request import urlretrieve, ContentTooShortError
from shutil import which as which
else:
from urllib import urlretrieve, ContentTooShortError
from py2_which import which_python2 as which
tool_supervisor_log = '{0!s}/.tool_supervisor_log'.format(PATH_TOOLS)
def tool_check(t,fullpaths_exe, exe, allow_path=False):
external_tools = False
path_var = False
if all(exists(x) for x in fullpaths_exe):
external_tools = True
if allow_path:
if all(which(x) for x in exe):
path_var = True
elif all(which(os.path.basename(x)) for x in exe): # is this necessary?
path_var = True
t.exe = [os.path.basename(x) for x in exe]
return any([external_tools, path_var]) # return both vals to distinguish btwn our installs vs path installs...
def check_tools(toolsD):
toolsToInstall = {}
for name, t in toolsD.items():
if not tool_check(t,t.full_exe,t.exe):
toolsToInstall[name] = t
return toolsToInstall
def check_dir_and_log():
if(not os.path.isdir(PATH_TOOLS)):
os.mkdir(PATH_TOOLS)
if(not os.path.isfile(join(PATH_TOOLS,'tools_log'))):
write_log({})
def run_tasks(tasks, cpu=4):
for t in tasks:
print(t.name)
t.stdout = join(PATH_TOOLS, t.name+'.stdout')
t.stderr = join(PATH_TOOLS, t.name+'.stderr')
s = Supervisor(tasks=tasks, force_run=False, log=tool_supervisor_log, cpu=cpu)
s.run()
for t in tasks: #if everything executes properly, rm the task logs
if exists(t.stdout):
os.remove(t.stdout)
if exists(t.stderr):
os.remove(t.stderr)
def safe_retrieve(source, target, urltype):
extension = ''
if all( [len(urltype) >0, not urltype.startswith('.')]):
extension = '.' + urltype
temp = target+'.temp'
print('getting ' + source)
try:
urlretrieve(source, temp)
os.rename(temp, target+extension)
except:
print("WARNING : could not retrieve " + source)
try:
expand_target(target, extension)
except:
print("WARNING : could not extract " + target+extension)
def expand_target(target, extension):
if (extension == '.tar.gz' or extension == '.tgz'):
tfile = tarfile.open((target+extension), 'r:gz')
tfile.extractall(PATH_TOOLS)
os.remove(target+extension)
elif (extension == '.gz'):
f = gzip.open((target+extension), 'rb')
g = open(target, 'wb')
for line in f:
g.write(line)
f.close()
g.close()
os.remove(target+extension)
elif (extension == '.zip'):
z = zipfile.ZipFile((target+extension))
z.extractall(PATH_TOOLS)
os.remove(target+extension)
else:
raise ValueError('Can\'t expand '+target+ ' -- please check the urltype/extension. Acceptable values are: "", "zip", "gz", "tgz", or "tar.gz".')
def get(log_table, urltype, source, target, file_check=True):
if(file_check and exists(target)):
return
safe_retrieve(source, target, urltype)
basename = os.path.basename(target)
log_table[basename] = strftime('%b-%d-%Y')
def read_log():
log = open(join(PATH_TOOLS, 'tool_log'))
log_table = json.load(log)
log.close()
return log_table
def write_log(log_table):
log = open(join(PATH_TOOLS, 'tool_log'), 'w')
json.dump(log_table, log, sort_keys=True, indent=4)
log.close()
def main(install=False, toolList = [], tool_check=True, cpu=4):
check_dir_and_log()
toolsD = {}
for toolname in toolList:
t = TOOLS_DICT[toolname]
toolsD[t.name] = t
if tool_check: # --hard option == install all tools that were passed in, no matter what we already have
toolsD = check_tools(toolsD) #dict with only tools that need to be installed
log_table = read_log()
tasks = []
partial_get = lambda a, b, c : get(log_table, a, b ,c, tool_check)
if(install and platform.system().lower() == 'linux'):
for name, tool in toolsD.items():
if tool.install: # don't download tools that are not openly licensed or do not provide linux binaries
partial_get(tool.urltype, tool.url, tool.target)
install_task = tool.install_task
if install_task is not None:
tasks.append(install_task)
for flag, exe in zip(tool.executeable_flags, tool.full_exe):
if(flag):
cmd = 'chmod u+x {0!s}'.format(exe)
subprocess.call(cmd, shell=True)
else:
print(tool.instructions)
else:
for name, tool in toolsD.items():
print('\n Installation instructions for: ' + tool.name)
print('\n\t Download tool at this link: ' + tool.url)
print(tool.instructions)
run_tasks(tasks, cpu)
write_log(log_table)
if(__name__ == '__main__'):
parser = argparse.ArgumentParser()
parser.add_argument('--install', action='store_true', default=False)
parser.add_argument('--hard', action='store_true', default=False)
parser.add_argument('-t', '--tool', action='append', default=[])
parser.add_argument('--cpu', type=int, default=4)
args = parser.parse_args()
#if args.hard:
# args.install = True
main(args.install, args.tool, not args.hard, args.cpu)
| {
"repo_name": "bluegenes/MakeMyTranscriptome",
"path": "scripts/manage_tools.py",
"copies": "1",
"size": "5926",
"license": "bsd-3-clause",
"hash": -376996449616382140,
"line_mean": 33.6987951807,
"line_max": 153,
"alpha_frac": 0.6012487344,
"autogenerated": false,
"ratio": 3.6068167985392576,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47080655329392573,
"avg_score": null,
"num_lines": null
} |
from tools_class import ext_tool as tc
from mmt_defaults import PATH_TOOLS
from os.path import join,dirname,abspath
import platform
#samtools --> need to either add here, or make sure user has downloaded somehow..
TOOL_LIST = []
### Trinity ###
trinity_source_url = 'https://github.com/trinityrnaseq/trinityrnaseq/archive/v2.1.1.tar.gz'
trinity_source_target = join(PATH_TOOLS, 'trinityrnaseq-2.1.1')
trinity_exe = ['Trinity', 'util/support_scripts/get_Trinity_gene_to_trans_map.pl', 'trinity-plugins/fastool']
trinity_instructions = '\n\t After downloading the binary: \n\n\t\t tar zxf trinityrnaseq-2.1.1.tar.gz \n\t\t ' \
'cd trinityrnaseq-1.1.1 \n\t\t make \n ' \
'\n\tThen: soft link /path/to/trinityrnaseq-2.1.1 into the MMT "external_tools" folder\n' \
'\t Or: 1. add /path/to/trinityrnaseq-2.1.1 to your $path variable\n' \
'\t 2. add /path/to/trinityrnaseq-2.1.1/util/support_scripts/ to your $path variable\n'
trinity_cmd = 'make'
trinity_website = 'https://github.com/trinityrnaseq/trinityrnaseq/wiki'
#trinity_dependencies= ['bowtie-1'] ### not doing anything with this yet... need to add to tools class?
trinity_tool = tc('trinity', trinity_source_url, trinity_source_target, trinity_exe, trinity_instructions, web=trinity_website)
trinity_tool.set_install(trinity_cmd)
TOOL_LIST.append(trinity_tool)
#rcorrector
rcorrector_url = 'https://github.com/mourisl/Rcorrector/archive/master.zip'
rcorrector_urltype = 'zip'
rcorrector_target = join(PATH_TOOLS, 'master')
rcorrector_folder_name = 'Rcorrector-master'
rcorrector_exe = ['run_rcorrector.pl']
rcorrector_instructions = '\n\t After downloading: \n\n\t\t unzip master.zip \n\t\t ' \
'cd Rcorrector-master \n\t\t make \n ' \
'\n\tThen: soft link /path/to/Rcorrector-master into the MMT "external_tools" folder\n' \
'\t Or: add /path/to/Rcorrector-master to your $path variable\n'
rcorrector_cmd = 'make'
rcorrector_website = 'https://github.com/mourisl/Rcorrector'
rcorrector_tool = tc('rcorrector', rcorrector_url, rcorrector_target, rcorrector_exe, rcorrector_instructions, web=rcorrector_website, urltype=rcorrector_urltype, folder_name=rcorrector_folder_name)
rcorrector_tool.change_exe_fullpath(join(PATH_TOOLS,rcorrector_folder_name))
rcorrector_tool.set_install(rcorrector_cmd)
TOOL_LIST.append(rcorrector_tool)
### seqtk ###
#seqtk_url = 'https://github.com/lh3/seqtk/archive/master.zip'
#seqtk_urltype = 'zip'
#seqtk_target = join(PATH_TOOLS, 'master')
#seqtk_folder_name = 'seqtk-master'
#seqtk_exe = ['seqtk']
#seqtk_instructions = '\n\t After downloading: \n\n\t\t unzip master.zip \n\t\t ' \
# 'cd seqtk-master \n\t\t make \n ' \
# '\n\tThen: soft link /path/to/seqtk-master into the MMT "external_tools" folder\n' \
# '\t Or: add /path/to/seqtk-master to your $path variable\n'
#seqtk_cmd = 'make'
#seqtk_website = 'https://github.com/lh3/seqtk'
#seqtk_tool = tc('seqtk', seqtk_url, seqtk_target, seqtk_exe,seqtk_instructions, web=seqtk_website, urltype=seqtk_urltype, folder_name=seqtk_folder_name)
#seqtk_tool.change_exe_fullpath(join(PATH_TOOLS,seqtk_folder_name))
#seqtk_tool.set_install(seqtk_cmd)
#TOOL_LIST.append(seqtk_tool)
### Trimmomatic ###
trimmomatic_binary_url = 'http://www.usadellab.org/cms/uploads/supplementary/Trimmomatic/Trimmomatic-0.35.zip'
trimmomatic_binary_target = join(PATH_TOOLS, 'Trimmomatic-0.35')
trimmomatic_exe = ['trimmomatic-0.35.jar', 'adapters/TruSeq3-PE.fa','adapters/TruSeq3-SE.fa']
trimmomatic_instructions = '\n\t After downloading the binary: \n\n\t\t unzip Trimmomatic-0.35.zip \n ' \
'\n\tThen: soft link /path/to/Trimmomatic-0.35 into the MMT "external_tools" folder\n' \
'\t Or: add /path/to/Trimmomatic-0.35 to your $path variable\n'
trimmomatic_urltype = 'zip'
trimmomatic_website = 'http://www.usadellab.org/cms/index.php?page=trimmomatic'
trimmomatic_tool = tc('trimmomatic', trimmomatic_binary_url, trimmomatic_binary_target, trimmomatic_exe, trimmomatic_instructions, urltype=trimmomatic_urltype, web=trimmomatic_website )
TOOL_LIST.append(trimmomatic_tool)
### Prinseq ###
prinseq_url = 'http://sourceforge.net/projects/prinseq/files/standalone/prinseq-lite-0.20.4.tar.gz'
prinseq_target = join(PATH_TOOLS, 'prinseq-lite-0.20.4')
prinseq_exe = ['prinseq-lite.pl']
prinseq_instructions = '\n\t After downloading the binary: \n\n\t\t tar zxf prinseq-lite-0.20.4.tar.gz \n\t\t ' \
'\n\tThen: soft link /path/to/prinseq-lite-0.20.4 into the MMT "external_tools" folder\n' \
'\t Or: add /path/to/prinseq-lite-0.20.4 to your $path variable\n'
prinseq_website = 'http://prinseq.sourceforge.net'
prinseq_tool = tc('prinseq', prinseq_url, prinseq_target, prinseq_exe, prinseq_instructions, web = prinseq_website)
TOOL_LIST.append(prinseq_tool)
### Transdecoder ###
PATH_TRANSDECODER = 'TransDecoder' #which exe to use!???
transdecoder_url = 'https://github.com/TransDecoder/TransDecoder/archive/2.0.1.tar.gz'
transdecoder_target = join(PATH_TOOLS, 'TransDecoder-2.0.1')
transdecoder_exe = ['TransDecoder.LongOrfs', 'TransDecoder.Predict', 'util/bin/cd-hit']
transdecoder_instructions ='\n\t After downloading the binary: \n\n\t\t tar zxf 2.0.1.tar.gz \n\t\t ' \
'cd TransDecoder-2.0.1 \n\t\t make \n ' \
'\n\tThen: soft link /path/to/TransDecoder-2.0.1 into the MMT "external_tools" folder\n' \
'\t Or: add /path/to/TransDecoder-2.0.1 to your $path variable\n'
transdecoder_cmd= 'make'
transdecoder_website = 'http://transdecoder.github.io'
transdecoder_tool = tc('transdecoder', transdecoder_url, transdecoder_target, transdecoder_exe, transdecoder_instructions, web=transdecoder_website)
transdecoder_tool.set_install(transdecoder_cmd)
TOOL_LIST.append(transdecoder_tool)
### Transrate ###
transrate_linux_url = 'https://bintray.com/artifact/download/blahah/generic/transrate-1.0.1-linux-x86_64.tar.gz'
transrate_linux_target = join(PATH_TOOLS, 'transrate-1.0.1-linux-x86_64')
transrate_exe = ['transrate']
transrate_instructions = '\n\t After downloading the binary: \n\n\t\t tar zxf transrate-1.0.1-linux-x86_64.tar.gz \n\t\t ' \
'cd transrate-1.0.1-linux-x86_64 \n\t\t transrate --install-deps=ref \n ' \
'\n\tThen: soft link /path/to/transrate-1.0.1-linux-x86_64 into the MMT "external_tools" folder\n' \
'\t Or: add /path/to/transrate-1.0.1-linux-x86_64 to your $path variable\n'
transrate_cmd = 'transrate --install-deps=ref'
transrate_website = 'http://hibberdlab.com/transrate/'
transrate_tool = tc('transrate', transrate_linux_url, transrate_linux_target, transrate_exe, transrate_instructions, web=transrate_website)
transrate_tool.set_install(transrate_cmd)
TOOL_LIST.append(transrate_tool)
### BUSCO ###
#busco_url = 'http://busco.ezlab.org/files/BUSCO_v1.1b1.tar.gz'
#busco_url = 'http://busco.ezlab.org/files/BUSCO_v1.2.tar.gz'
busco_url = 'https://gitlab.com/ezlab/busco.git'
#busco_target = join(PATH_TOOLS, 'BUSCO_v1.1b1')
busco_target = join(PATH_TOOLS, 'busco')
busco_exe = ['BUSCO.py']
busco_instructions = '\n\t tbd' #'\n\t After downloading the binary: \n\n\t\t tar zxf BUSCO_v1.2.tar.gz \n\n ' \
# '\n\t\t cd B\n' \
# '\n\t\t chmod u+x BUSCO_v1.2.py\n' \
#'\tThen: soft link /path/to/BUSCO_v1.1b1 into the MMT "external_tools" folder\n' \
#'\t Or: add /path/to/BUSCO_v1.2 to your $path variable\n'
busco_website='http://busco.ezlab.org'
busco_tool = tc('busco', busco_url, busco_target, busco_exe, busco_instructions, web=busco_website)
TOOL_LIST.append(busco_tool)
#plant_busco
#busco_plant_url = 'http://buscos.ezlab.org/files/plant_early_release.tar.gz'
#busco_plant_target = join(PATH_TOOLS, 'plant_early_release')
#busco_plant_exe = ['BUSCO_plants.py']
#busco_plant_instructions = '\n\t After downloading the binary: \n\n\t\t tar zxf plant_early_release.tar.gz \n\n ' \
# '\n\t\t cd plant_early_release\n' \
# '\n\t\t chmod u+x BUSCO_plants.py\n' \
# '\tThen: soft link /path/to/plant_early_release into the MMT "external_tools" folder\n' \
# '\t Or: add /path/to/plant_early_release to your $path variable\n'
#
#busco_plant_tool = tc('busco_plant', busco_plant_url, busco_plant_target, busco_plant_exe, busco_plant_instructions, web=busco_website)
#TOOL_LIST.append(busco_plant_tool)
### HMMER ###
hmmer_linux_url = 'http://eddylab.org/software/hmmer3/3.1b2/hmmer-3.1b2-linux-intel-x86_64.tar.gz'
#'http://selab.janelia.org/software/hmmer3/3.1b2/hmmer-3.1b2-linux-intel-x86_64.tar.gz'
hmmer_linux_target = join(PATH_TOOLS, 'hmmer-3.1b2-linux-intel-x86_64')
hmmer_exe = ['binaries/hmmscan','binaries/hmmpress', 'binaries/hmmsearch']
hmmer_instructions = '\n\t After downloading the binary: \n\n\t\t tar zxf hmmer-3.1b2-linux-intel-x86_64.tar.gz \n\n ' \
'\tThen: soft link /path/to/hmmer-3.1b2-linux-intel-x86_64 into the MMT "external_tools" folder\n' \
'\t Or: add /path/to/hmmer-3.1b2-linux-intel-x86_64/binaries to your $path variable\n'
hmmer_website = 'http://hmmer.org'
hmmer_tool = tc('hmmer',hmmer_linux_url, hmmer_linux_target, hmmer_exe, hmmer_instructions, web=hmmer_website)
TOOL_LIST.append(hmmer_tool)
### Diamond ###
diamond_linux_url = 'http://github.com/bbuchfink/diamond/releases/download/v0.7.10/diamond-linux64.tar.gz'
diamond_target = join(PATH_TOOLS, 'diamond')
diamond_exe = ['diamond']
diamond_instructions = '\n\t After downloading the binary: \n\n\t\t tar zxf diamond-linux64.tar.gz \n\n ' \
'\n\tThen: soft link path/to/diamond into the MMT "external_tools" folder\n' \
'\t Or: add /path/to/diamond to your $path variable\n'
diamond_website = 'https://github.com/bbuchfink/diamond'
diamond_tool = tc('diamond', diamond_linux_url, diamond_target, diamond_exe, diamond_instructions,web=diamond_website)
diamond_tool.change_exe_fullpath(PATH_TOOLS) # bc reg is PATH_TOOLS/target/exe
TOOL_LIST.append(diamond_tool)
### Salmon ###
salmon_linux_url = 'https://github.com/COMBINE-lab/salmon/releases/download/v0.7.2/Salmon-0.7.2_linux_x86_64.tar.gz'
salmon_linux_target = join(PATH_TOOLS, 'Salmon-0.7.2_linux_x86_64')
salmon_exe = ['bin/salmon']
salmon_instructions = '\n\t After downloading the binary: \n\n\t\t tar zxf SalmonBeta-0.6.0_DebianSqueeze.tar.gz \n\n ' \
'\tThen: soft link SalmonBeta-0.6.1_DebianSqueeze into the MMT "external_tools" folder\n' \
'\t Or: 1. add /path/to/SalmonBeta-0.6.1_DebianSqueeze/bin to your $path variable\n' \
'\t 2. add /path/to/SalmonBeta-0.6.1_DebianSqueeze/lib to your shared libraries variable\n'
salmon_website = 'http://combine-lab.github.io/salmon'
salmon_tool = tc('salmon', salmon_linux_url, salmon_linux_target, salmon_exe, salmon_instructions, web=salmon_website)
TOOL_LIST.append(salmon_tool)
### FastQC ###
fastqc_linux_url = 'http://www.bioinformatics.babraham.ac.uk/projects/fastqc/fastqc_v0.11.4.zip'
fastqc_linux_target = join(PATH_TOOLS,'fastqc_v0.11.4')
fastqc_exe = ['fastqc']
fastqc_instructions = '\n\t After downloading the binary: \n\n\t\t unzip fastqc_v0.11.4.zip \n\n ' \
'\n\tThen: soft link /path/to/fastqc_v0.11.4 into the MMT "external_tools" folder\n' \
'\t Or: add /path/to/fastqc_v0.11.4 to your $path variable \n'
fastqc_folder_name = 'FastQC' #unzips into FastQC dir
fastqc_urltype = 'zip'
fastqc_website = 'http://www.bioinformatics.babraham.ac.uk/projects/fastqc'
fastqc_exe_flags = [True]
fastqc_tool = tc('fastqc', fastqc_linux_url, fastqc_linux_target, fastqc_exe, fastqc_instructions, urltype='zip', folder_name=fastqc_folder_name, web=fastqc_website, executeable_flags=fastqc_exe_flags)
fastqc_tool.change_exe_fullpath(join(PATH_TOOLS,'FastQC')) #unzips into FastQC dir
TOOL_LIST.append(fastqc_tool)
bedtools_url = 'https://github.com/arq5x/bedtools2/releases/download/v2.25.0/bedtools-2.25.0.tar.gz'
bedtools_target = join(PATH_TOOLS,'bedtools-2.25.0')
bedtools_cmd = 'make'
bedtools_instructions = '\n\t After downloading the binary: \n\n\t\t tar zxf bedtools-2.25.0 \n ' \
'\t\t cd bedtools2 \n' \
'\t\t make \n\n' \
'\tThen: soft link /path/to/bedtools2 into the MMT "external_tools" folder\n' \
'\t Or: add /path/to/bedtools2/bin to your $path variable \n\n' \
'\tFor more help, see: http://bedtools.readthedocs.org/en/latest/content/installation.html\n'
bedtools_exe = ['bin/intersectBed']
bedtools_folder_name = 'bedtools2' #unpacks to 'bedtools2'
bedtools_website = 'https://github.com/arq5x/bedtools2'
bedtools_tool = tc('bedtools',bedtools_url, bedtools_target, bedtools_exe, bedtools_instructions, folder_name=bedtools_folder_name, web=bedtools_website)
#bedtools_tool.change_exe_fullpath(join(PATH_TOOLS,folder_name))
TOOL_LIST.append(bedtools_tool)
#rnaspades
rnaspades_url = 'http://spades.bioinf.spbau.ru/rnaspades0.1.1/rnaSPAdes-0.1.1-Linux.tar.gz'
rnaspades_target = join(PATH_TOOLS, 'rnaSPAdes-0.1.1-Linux')
rnaspades_exe = ['bin/rnaspades.py']
rnaspades_instructions = 'rnaspades is currently not fully supported in MMT. Installation instructions will go here once it is fully supported.'
rnaspades_tool = tc('rnaspades', rnaspades_url, rnaspades_target, rnaspades_exe, rnaspades_instructions)
TOOL_LIST.append(rnaspades_tool)
blastplus_url = 'ftp://ftp.ncbi.nlm.nih.gov/blast/executables/blast+/LATEST/ncbi-blast-2.3.0+-x64-linux.tar.gz'
blastplus_target = 'ncbi-blast-2.3.0+-x64-linux'
blastplus_folder_name = 'ncbi-blast-2.3.0+'
blastplus_instructions = '\n\t After downloading the binary: \n\n\t\t tar zxf ncbi-blast-2.3.0+-x64-linux.tar.gz \n ' \
'\n\t cd ncbi-blast-2.3.0+ \n' \
'\n\tThen: soft link /path/to/ncbi-blast-2.3.0+ into the MMT "external_tools" folder\n' \
'\t Or: add /path/to/ncbi-blast-2.3.0+/bin to your $path variable \n\n'
blastplus_exe = ['bin/makeblastdb','bin/blastx', 'bin/blastp']
blastplus_website = 'https://blast.ncbi.nlm.nih.gov/Blast.cgi?PAGE_TYPE=BlastDocs&DOC_TYPE=Download'
blastplus_tool = tc('blast', blastplus_url, blastplus_target,blastplus_exe,blastplus_instructions,folder_name=blastplus_folder_name, web=blastplus_website)
#blastplus_tool.change_exe_fullpath(join(PATH_TOOLS,folder_name))
TOOL_LIST.append(blastplus_tool)
bowtie2_url = 'http://sourceforge.net/projects/bowtie-bio/files/bowtie2/2.2.6/bowtie2-2.2.6-linux-x86_64.zip'
bowtie2_urltype = 'zip'
bowtie2_target = join(PATH_TOOLS,'bowtie2-2.2.6-linux-x86_64')
bowtie2_instructions = '\n\t After downloading the binary: \n\n\t\t unzip bowtie2-2.2.6-linux-x86_64.zip \n ' \
'\n\t cd bowtie2-2.2.6 \n' \
'\n\tThen: soft link /path/to/bowtie2-2.2.6 into the MMT "external_tools" folder\n' \
'\t Or: add /path/to/bowtie2-2.2.6 to your $path variable \n\n'
bowtie2_folder_name = 'bowtie2-2.2.6'
bowtie2_exe = ['bowtie2-build','bowtie2']
bowtie2_website = 'http://bowtie-bio.sourceforge.net/bowtie2/index.shtml'
bowtie2_exe_flags = [True, True]
bowtie2_tool = tc('bowtie2', bowtie2_url, bowtie2_target, bowtie2_exe, bowtie2_instructions, urltype=bowtie2_urltype, folder_name=bowtie2_folder_name, web=bowtie2_website, executeable_flags=bowtie2_exe_flags)
TOOL_LIST.append(bowtie2_tool)
express_url = 'http://bio.math.berkeley.edu/eXpress/downloads/express-1.5.1/express-1.5.1-linux_x86_64.tgz'
express_urltype = '.tgz'
express_target = join(PATH_TOOLS, 'express-1.5.1-linux_x86_64.tgz')
express_instructions = '\n\t After downloading the binary: \n\n\t\t tar zxf express-1.5.1-linux_x86_64.tgz \n ' \
'\n\tThen: soft link /path/to/express-1.5.1-linux_x86_64 into the MMT "external_tools" folder\n' \
'\t Or: add /path/to/express-1.5.1-linux_x86_64 to your $path variable \n\n'
express_exe = ['express']
express_website = 'http://bio.math.berkeley.edu/eXpress/index.html'
express_tool = tc('express',express_url, express_target, express_exe, express_instructions, urltype=express_urltype, web=express_website)
TOOL_LIST.append(express_tool)
##### tools we can't install ####
cegma_url = 'http://korflab.ucdavis.edu/datasets/cegma/CEGMA_v2.5.tar.gz'
cegma_target = join(PATH_TOOLS,'CEGMA_v2.5')
cegma_exe = ['cegma']
cegma_instructions = '\n\t MMT cannot install cegma. Please see http://korflab.ucdavis.edu/datasets/cegma/#SCT3 for installation instructions.\n'
cegma_website = 'http://korflab.ucdavis.edu/datasets/cegma/'
cegma_tool = tc('cegma', cegma_url, cegma_target, cegma_exe, cegma_instructions, install=False, web=cegma_website)
cegma_tool.change_exe_fullpath('') # they need to put cegma into their $path
TOOL_LIST.append(cegma_tool)
kallisto_url = 'https://github.com/pachterlab/kallisto/releases/download/v0.42.4/kallisto_linux-v0.42.4.tar.gz'
kallisto_target = join(PATH_TOOLS, 'kallisto_linux-v0.42.4')
kallisto_instructions = "\n\tAs kallisto is distributed under a non-commercial license, MMT cannot download kallisto for you. Please see https://pachterlab.github.io/kallisto/about.html for information about kallisto. To use, install kallisto yourself and place tool in your $path variable\n"
kallisto_exe = ['kallisto']
kallisto_website = 'https://pachterlab.github.io/kallisto/'
kallisto_tool = tc("kallisto", kallisto_url, kallisto_target, kallisto_exe, kallisto_instructions, install=False, web=kallisto_website)
kallisto_tool.change_exe_fullpath('') # look for exe in $path
TOOL_LIST.append(kallisto_tool)
#PATH_RNAMMER = '/matta1/biotools/redhat/rnammer-1.2/rnammer'
rnammer_url = ''
rnammer_target = '' #join(PATH_TOOLS,'rnammer-1.2')
rnammer_exe = ['RnammerTranscriptome.pl']
rnammer_instructions = '\n\tRNAMMER is freely available for academic use only. See http://www.cbs.dtu.dk/services/RNAmmer/ for download and installation instructions. RNAMMER is currently supported as an optional tool, but this support may be removed at any time in favor of openly licensed tools.\n'
rnammer_website = 'http://www.cbs.dtu.dk/cgi-bin/sw_request?rnammer'
rnammer_tool = tc('rnammer', rnammer_url, rnammer_target, rnammer_exe, rnammer_instructions, install=False, web=rnammer_website)
#rnammer_tool.change_exe_fullpath('') # look for exe in $path
TOOL_LIST.append(rnammer_tool)
tmhmm_url = ''
tmhmm_target = ''
tmhmm_exe = ['tmhmm']
tmhmm_instructions = '\n\tTMHMM is freely available for academic use only. See http://www.cbs.dtu.dk/cgi-bin/nph-sw_request?tmhmm for download and installation instructions. TMHMM is currently supported as an optional tool, but this support may be removed at any time in favor of openly licensed tools.\n'
tmhmm_website = 'http://www.cbs.dtu.dk/cgi-bin/nph-sw_request?tmhmm'
tmhmm_tool = tc('tmhmm', tmhmm_url, tmhmm_target, tmhmm_exe, tmhmm_instructions, install=False, web=tmhmm_website)
tmhmm_tool.change_exe_fullpath('') # look for exe in $path
TOOL_LIST.append(tmhmm_tool)
signalp_url = ''
signalp_target = ''
signalp_exe = ['signalp']
signalp_instructions = '\n\tSignalp is freely available for academic use only. See http://www.cbs.dtu.dk/cgi-bin/nph-sw_request?signalp for download and installation instructions. SignalP is currently supported as an optional tool, but this support may be removed at any time in favor of openly licensed tools.\n'
signalp_website= 'http://www.cbs.dtu.dk/cgi-bin/nph-sw_request?signalp'
signalp_tool = tc('signalp', signalp_url, signalp_target, signalp_exe, signalp_instructions, install=False, web=signalp_website)
signalp_tool.change_exe_fullpath('') # look for exe in $path
TOOL_LIST.append(signalp_tool)
TOOLS_DICT = {tool.name: tool for tool in TOOL_LIST}
| {
"repo_name": "bluegenes/MakeMyTranscriptome",
"path": "scripts/external_tools.py",
"copies": "1",
"size": "20538",
"license": "bsd-3-clause",
"hash": -397815896911887900,
"line_mean": 61.7826086957,
"line_max": 313,
"alpha_frac": 0.6840003895,
"autogenerated": false,
"ratio": 2.6555469356089993,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38395473251089995,
"avg_score": null,
"num_lines": null
} |
import os
from os.path import join, exists, dirname
from tasks_v2 import Task
class ext_tool:
def __init__(self,name,url,target,executables, instructions, web='',urltype='tar.gz',dependencies = [], folder_name = '', install=True, executeable_flags=None):
self.name = name
self.url = url
self.target = target
self.exe = executables
self.executeable_flags = executeable_flags if(executeable_flags is not None) else [False for e in self.exe]
self.target = target
self.instructions = instructions
self.install = install
self.urltype = urltype
self.dependencies = dependencies
self.full_exe = [join(self.target, x) for x in self.exe]
self.install_task = None
self.check_install_task = None
self.folder_name = target
if len(folder_name) > 0:
self.folder_name = join(dirname(self.target), folder_name)
self.full_exe = [join(self.folder_name, x) for x in self.exe]
self.web = web
def set_install(self, cmd, task_deps=[], log_flag=True):
install_trgs = self.full_exe
cd_cmd = 'cd {0!s}; '.format(self.folder_name)
install_cmd = cd_cmd + cmd
install_name ='install_' + self.name
out,err = (None,None)
# out, err = GEN_LOGS(name) if(log_flag) else (None, None)
self.install_task=Task(command=install_cmd,dependencies=task_deps,targets=install_trgs,name=install_name,stdout=out,stderr=err)
def check_install(self,check_cmd,task_deps=[], log_flag=True):
install_trgs = self.full_exe
check_name ='check_install_' + self.name
out,err = (None,None)
# out, err = GEN_LOGS(name) if(log_flag) else (None, None)
self.check_install_task=Task(command=check_cmd,dependencies=task_deps,targets=install_trgs,name=check_install_name,stdout=out,stderr=err)
def change_exe_fullpath(self, path):
self.full_exe = [join(path, x) for x in self.exe]
def __call__(self):
return [self.name, self.url, self.target, self.exe,self.urltype, self.install, self.instructions]
| {
"repo_name": "bluegenes/MakeMyTranscriptome",
"path": "scripts/tools_class.py",
"copies": "1",
"size": "2198",
"license": "bsd-3-clause",
"hash": 8358604300291748000,
"line_mean": 41.0980392157,
"line_max": 164,
"alpha_frac": 0.6251137398,
"autogenerated": false,
"ratio": 3.445141065830721,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9458563499301704,
"avg_score": 0.022338261265803194,
"num_lines": 51
} |
__author__ = 'bmiller'
import os, shutil
mymodules = ['document', 'math', 'operator', 'processing', 'random', 're', 'time', 'test', 'turtle', 'unittest', 'urllib', 'webgl']
p26root = '/System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/'
def make_stub(fname,fpath):
modname = fname.replace('.py','')
if modname not in mymodules:
f = open(fpath, 'w')
f.write('''raise NotImplementedError("%s is not yet implemented in Skulpt")\n''' % modname)
f.close()
for root, dirs, files in os.walk(p26root):
for dname in dirs:
newdir = os.path.join(root,dname)
newdir = newdir.replace(p26root,'src/lib/')
if not os.path.exists(newdir):
print("making", newdir)
os.makedirs(newdir)
newfile = os.path.join(newdir,'__init__.py')
make_stub(dname, newfile)
for fname in [f for f in files if f.endswith(".py")]:
newfile = root.replace(p26root,'src/lib/')
if newfile.endswith('src/lib/'):
newfile = os.path.join(newfile, fname)
print("making file", newfile)
make_stub(fname,newfile) | {
"repo_name": "ArcherSys/ArcherSys",
"path": "skulpt/stdlibstubs.py",
"copies": "1",
"size": "1151",
"license": "mit",
"hash": 4704379083999486000,
"line_mean": 35,
"line_max": 131,
"alpha_frac": 0.5977410947,
"autogenerated": false,
"ratio": 3.336231884057971,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44339729787579707,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bms'
import logging
from django.test import TestCase
import django.forms as djforms
import django.forms.widgets as djwidgets
import django_forms_bootstrapped as forms
import django_forms_bootstrapped.widgets as widgets
log = logging.getLogger(__name__)
class FormTestCase(TestCase):
def setUp(self):
pass
def test_simple_form(self):
class SimpleForm(forms.Form):
name = forms.CharField(max_length=50)
sform = SimpleForm()
rendered = sform.as_p()
html = (
u'<p class="form-group">'
u'<label class="control-label" for="id_name">Name:</label> '
u'<input class="form-control" id="id_name" maxlength="50" name="name" type="text" />'
u'</p>'
)
self.assertEqual(rendered, html,
'testing simple form with no bootstrap')
def test_simple_form_bootstrap_options(self):
class SimpleForm(forms.Form):
name = forms.CharField(max_length=50)
bootstrap_options = {
'as_p_use_divs': True,
}
sform = SimpleForm()
rendered = sform.as_p()
html = (
u'<div class="form-group">'
u'<label class="control-label" for="id_name">Name:</label> '
u'<input class="form-control" id="id_name" maxlength="50" name="name" type="text" />'
u'</div>'
)
self.assertEqual(rendered, html,
'testing simple form with bootstrap options')
SimpleForm.bootstrap_options['form_layout'] = 'form-horizontal'
sform = SimpleForm()
rendered = sform.as_p()
html = (
u'<div class="form-group">'
u'<label class="control-label col-sm-2" for="id_name">Name:</label> '
u'<div class="col-sm-10">'
u'<input class="form-control" id="id_name" maxlength="50" name="name" type="text" />'
u'</div>'
u'</div>'
)
self.assertEqual(rendered, html, 'testing simple form_layout')
SimpleForm.bootstrap_options['form_layout_sizes'] = (4, 6)
sform = SimpleForm()
rendered = sform.as_p()
html = (
u'<div class="form-group">'
u'<label class="control-label col-sm-4" for="id_name">Name:</label> '
u'<div class="col-sm-6">'
u'<input class="form-control" id="id_name" maxlength="50" name="name" type="text" />'
u'</div>'
u'</div>'
)
self.assertEqual(rendered, html, 'Testing simple form_layout_sizes')
def test_charfield(self):
class SimpleForm(forms.Form):
body = forms.CharField(widget=widgets.Textarea)
sform = SimpleForm()
rendered = sform.as_p()
html = (
u'<p class="form-group">'
u'<label class="control-label" for="id_body">Body:</label> '
u'<textarea class="form-control" cols="40" id="id_body" name="body" rows="10">\r\n</textarea>'
u'</p>'
)
self.assertEqual(rendered, html)
def test_choicefield(self):
class SimpleForm(forms.Form):
choose = forms.ChoiceField(
choices=((1, 'choice 1'), (2, 'choice 2')))
bootstrap_options = {}
sform = SimpleForm()
rendered = sform.as_p()
html = (
u'<p class="form-group">'
u'<label class="control-label" for="id_choose">Choose:</label> '
u'<select class="form-control" id="id_choose" name="choose">\n'
u'<option value="1">choice 1</option>\n'
u'<option value="2">choice 2</option>\n'
u'</select>'
u'</p>'
)
self.assertEqual(rendered, html)
SimpleForm.bootstrap_options['as_p_use_divs'] = True
sform = SimpleForm()
rendered = sform.as_p()
html = (
u'<div class="form-group">'
u'<label class="control-label" for="id_choose">Choose:</label> '
u'<select class="form-control" id="id_choose" name="choose">\n'
u'<option value="1">choice 1</option>\n'
u'<option value="2">choice 2</option>\n'
u'</select>'
u'</div>'
)
self.assertEqual(rendered, html)
SimpleForm.bootstrap_options['form_layout'] = 'form-horizontal'
sform = SimpleForm()
rendered = sform.as_p()
html = (
u'<div class="form-group">'
u'<label class="control-label col-sm-2" for="id_choose">Choose:</label> '
u'<div class="col-sm-10">'
u'<select class="form-control" id="id_choose" name="choose">\n'
u'<option value="1">choice 1</option>\n'
u'<option value="2">choice 2</option>\n'
u'</select>'
u'</div>'
u'</div>'
)
self.assertEqual(rendered, html)
def test_choicefield_with_radio(self):
class DJSimpleForm(djforms.Form):
choose = forms.ChoiceField(
choices=((1, 'choice 1'), (2, 'choice 2')),
widget=djwidgets.RadioSelect,
)
sform = DJSimpleForm()
rendered = sform.as_p()
html = (
u'<p>'
u'<label for="id_choose_0">Choose:</label> '
u'<ul id="id_choose">\n'
u'<li><label for="id_choose_0"><input id="id_choose_0" name="choose" type="radio" value="1" /> choice 1</label></li>\n'
u'<li><label for="id_choose_1"><input id="id_choose_1" name="choose" type="radio" value="2" /> choice 2</label></li>\n'
u'</ul>'
u'</p>'
)
self.assertEqual(rendered, html, 'testing django radio')
class SimpleForm(forms.Form):
choose = forms.ChoiceField(
choices=((1, 'choice 1'), (2, 'choice 2')),
widget=widgets.RadioSelect,
)
bootstrap_options = {}
sform = SimpleForm()
rendered = sform.as_p()
html = (
u'<p class="form-group">'
u'<label class="control-label" for="id_choose_0">Choose:</label> '
u'<div class="radio">'
u'<label for="id_choose_0">'
u'<input id="id_choose_0" name="choose" type="radio" value="1" /> choice 1'
u'</label>'
u'</div>\n'
u'<div class="radio">'
u'<label for="id_choose_1">'
u'<input id="id_choose_1" name="choose" type="radio" value="2" /> choice 2'
u'</label>'
u'</div>'
u'</p>'
)
self.assertEqual(rendered, html, 'simple radio')
SimpleForm.bootstrap_options['as_p_use_divs'] = True
sform = SimpleForm()
rendered = sform.as_p()
html = (
u'<div class="form-group">'
u'<label class="control-label" for="id_choose_0">Choose:</label> '
u'<div class="radio">'
u'<label for="id_choose_0">'
u'<input id="id_choose_0" name="choose" type="radio" value="1" /> choice 1'
u'</label>'
u'</div>\n'
u'<div class="radio">'
u'<label for="id_choose_1">'
u'<input id="id_choose_1" name="choose" type="radio" value="2" /> choice 2'
u'</label>'
u'</div>'
u'</div>'
)
self.assertEqual(rendered, html, 'radio with divs')
SimpleForm.bootstrap_options['form_layout'] = 'form-horizontal'
sform = SimpleForm()
rendered = sform.as_p()
html = (
u'<div class="form-group">'
u'<label class="control-label col-sm-2" for="id_choose_0">Choose:</label> '
u'<div class="col-sm-10">'
u'<div class="radio">'
u'<label for="id_choose_0">'
u'<input id="id_choose_0" name="choose" type="radio" value="1" /> choice 1'
u'</label>'
u'</div>\n'
u'<div class="radio">'
u'<label for="id_choose_1">'
u'<input id="id_choose_1" name="choose" type="radio" value="2" /> choice 2'
u'</label>'
u'</div>'
u'</div>'
u'</div>'
)
self.assertEqual(rendered, html, 'radio in form-horizontal')
def test_datetimefield(self):
class SimpleForm(forms.Form):
date_in = forms.DateTimeField()
bootstrap_options = {}
sform = SimpleForm()
rendered = sform.as_p()
html = (
u'<p class="form-group">'
u'<label class="control-label" for="id_date_in">Date in:</label> '
u'<input class="form-control" id="id_date_in" name="date_in" type="text" />'
u'</p>'
)
self.assertEqual(rendered, html)
SimpleForm.bootstrap_options['as_p_use_divs'] = True
sform = SimpleForm()
rendered = sform.as_p()
html = (
u'<div class="form-group">'
u'<label class="control-label" for="id_date_in">Date in:</label> '
u'<input class="form-control" id="id_date_in" name="date_in" type="text" />'
u'</div>'
)
self.assertEqual(rendered, html)
SimpleForm.bootstrap_options['form_layout'] = 'form-horizontal'
sform = SimpleForm()
rendered = sform.as_p()
html = (
u'<div class="form-group">'
u'<label class="control-label col-sm-2" for="id_date_in">Date in:</label> '
u'<div class="col-sm-10">'
u'<input class="form-control" id="id_date_in" name="date_in" type="text" />'
u'</div>'
u'</div>'
)
self.assertEqual(rendered, html)
def test_filefield(self):
class SimpleForm(forms.Form):
file_in = forms.FileField()
bootstrap_options = {}
sform = SimpleForm()
rendered = sform.as_p()
html = (
u'<p>'
u'<label for="id_file_in">File in:</label> '
u'<input class="form-control" id="id_file_in" name="file_in" type="file" />'
u'</p>'
)
self.assertEqual(rendered, html)
SimpleForm.bootstrap_options['as_p_use_divs'] = True
sform = SimpleForm()
rendered = sform.as_p()
html = (
u'<div>'
u'<label for="id_file_in">File in:</label> '
u'<input class="form-control" id="id_file_in" name="file_in" type="file" />'
u'</div>'
)
self.assertEqual(rendered, html)
SimpleForm.bootstrap_options['form_layout'] = 'form-horizontal'
sform = SimpleForm()
rendered = sform.as_p()
html = (
u'<div>'
u'<label class="col-sm-2" for="id_file_in">File in:</label> '
u'<div class="col-sm-10">'
u'<input class="form-control" id="id_file_in" name="file_in" type="file" />'
u'</div>'
u'</div>'
)
self.assertEqual(rendered, html)
| {
"repo_name": "bmsilva/django-forms-bootstrapped",
"path": "django_forms_bootstrapped/tests/test_forms.py",
"copies": "1",
"size": "11079",
"license": "mit",
"hash": 4167612409363470000,
"line_mean": 34.854368932,
"line_max": 131,
"alpha_frac": 0.5075367813,
"autogenerated": false,
"ratio": 3.7265388496468215,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4734075630946821,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bms'
import sys
from django.conf import settings
def main():
settings.configure(
DEBUG=True,
DATABASES={'default': {'ENGINE': 'django.db.backends.sqlite3'}},
#ROOT_URLCONF='myapp.urls',
INSTALLED_APPS=(
# 'django.contrib.auth',
# 'django.contrib.contenttypes',
# 'django.contrib.sessions',
# 'django.contrib.admin',
'django_forms_bootstrapped',
),
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
},
'formatters': {
'simple': {
'format': '%(asctime)s %(levelname)s %(name)s: %(message)s',
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
},
'loggers': {
'django_forms_bootstrapped': {
'handlers': ['console'],
'level': 'DEBUG',
},
},
},
)
from django.test.utils import get_runner
runner = get_runner(settings)(verbosity=2, interactive=True)
failures = runner.run_tests(['django_forms_bootstrapped'])
sys.exit(failures)
if __name__ == '__main__':
main()
| {
"repo_name": "bmsilva/django-forms-bootstrapped",
"path": "run_tests.py",
"copies": "1",
"size": "1564",
"license": "mit",
"hash": -5155171105587350000,
"line_mean": 26.9285714286,
"line_max": 80,
"alpha_frac": 0.439258312,
"autogenerated": false,
"ratio": 4.520231213872832,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5459489525872833,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bmustafa'
import numpy as np
import warnings
import matplotlib.pyplot as plt
from skimage.segmentation import random_walker
def rw_segment(image, seeds, threshold=0.95, beta=90, bg_label=-1, return_bg_label = False, debug=False):
# Uses scikit-image's implementation of Leo Grady's Random Walker algorithm
#
# Inputs:
# image - array-like of image. Expected volumetric - i.e. (width, height, slice)
# seeds - array-like of known labels. same dimensions as image
# threshold - value to threshold probability to
# beta - gradient penalization coefficient.
# Higher beta increases the effect of intensity changes (i.e. lower probabilities)
# bg_label - this is the label assigned to the background.
# If no value is passed, it is assumed that the background IS labelled and that
# it is the highest valued label in the seeds.
# return_bg_label - Returned mask will not include any labeled pixels for background data
# debug - If debug is True, displays segmentation data
# Usage:
# DEBUG MODE:
# If debug = 1, it will sequentially go through every slice in the image
# Displaying the scan, the input seeds and the output segmentation
#
# BACKGROUND LABEL: Set to bg_label = 0 if background is unlabelled
slice_count = seeds.shape[2]
#Labels are unique values in the mask
labels = np.unique(seeds).astype(int)
#Remove zero label
labels = np.delete(labels, 0)
#Cast seed array to integer
seeds = seeds.astype(int)
#Initialise array of new labels
new_labels = np.zeros(image.shape)
#Handle background label
if bg_label == -1:
#Assume largest label is the background
bg_label = np.amax(labels)
elif bg_label == 0:
#Warn for no background label
warnings.warn("WARNING: Input does have labelled background. "
"Results are typically much better with background seeds.")
for sliceNo in range(0, slice_count):
#RandomWalkAlgorithm
#isolate image data + seeds for this slice
data = np.squeeze(image[:, :, sliceNo])
markers = np.squeeze(seeds[:, :, sliceNo])
try:
probs = random_walker(data, markers, beta=beta, mode='cg_mg', return_full_prob=True)
#First, wherever the probability of other labels is higher, set label probability to zero
for lab_id_src in range(0, probs.shape[0]):
for lab_id_trg in range(0, probs.shape[0]):
if lab_id_trg != lab_id_src:
probs[lab_id_src, probs[lab_id_trg, :, :] > probs[lab_id_src, :, :]] = 0
#Threshold probabilities
probs[probs >= threshold] = 1
probs[probs < threshold] = 0
#Merge into marker array
for lab_id_src in range(0, probs.shape[0]):
#Don't include background label in output
if labels[lab_id_src] != bg_label | return_bg_label:
new_labels[:, :, sliceNo] += probs[lab_id_src, :, :]*labels[lab_id_src]
if debug:
# Plot results
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(8, 3.2),
sharex=True, sharey=True)
ax1.imshow(data, cmap='gray', interpolation='nearest')
ax1.axis('off')
ax1.set_adjustable('box-forced')
ax1.set_title('MRI slice %s'% (sliceNo + 1))
ax2.imshow(markers, cmap='magma', interpolation='nearest')
ax2.axis('off')
ax2.set_adjustable('box-forced')
ax2.set_title('Markers')
ax3.imshow(new_labels[:, :, sliceNo]/np.amax(labels) + 0.3*data/np.amax(data), cmap='gray', interpolation='nearest')
ax3.axis('off')
ax3.set_adjustable('box-forced')
ax3.set_title('Segmentation')
fig.tight_layout()
plt.show()
except Exception:
warnings.warn("WARNING: Error computing segmentation for slice {}. Outputting original seeds.".format(sliceNo))
new_labels[:,:,sliceNo] = markers
return new_labels
| {
"repo_name": "Basil-M/AMGEN-Summer-Project-2017",
"path": "python/code/Random walker/rw_segmenter.py",
"copies": "1",
"size": "4387",
"license": "mit",
"hash": 7968893898825929000,
"line_mean": 42.4356435644,
"line_max": 132,
"alpha_frac": 0.580123091,
"autogenerated": false,
"ratio": 4.0210815765352885,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5101204667535288,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bnelson'
from SIDServer import Objects
import io
import yaml
import datetime as dt
import math
import numpy as np
from matplotlib.mlab import psd
import h5py
import os
import zipfile
import bz2
class PrintHelper:
def __init__(self):
"""
"""
@staticmethod
def print_level():
return 1
@staticmethod
def print(message, level=0):
if level >= PrintHelper.print_level():
print("{0} : {1}".format(dt.datetime.now(), message))
class ConfigUtility:
def __init__(self):
"""
Constructor
"""
@staticmethod
def load(filename):
stream = io.open(filename, mode="r")
config_dictionary = yaml.load(stream)
config = Objects.Config(config_dictionary)
return config
class HDF5Utility:
def __init__(self):
"""
Constructor
"""
@staticmethod
def add_raw_data_set(group, name, time, data_type, sample_data):
ds = group.create_dataset(name, (len(sample_data), ), dtype=data_type, data=sample_data)
ds.attrs["Time"] = time.isoformat()
return ds
@staticmethod
def close_file(file):
file.flush()
file.close()
@staticmethod
def open_file(filename, config):
site_config = config.Site
audio_config = config.Audio
data_file = h5py.File(filename, "a")
raw_data_group = None
frequency_spectrum_data_group = None
stations_group = None
if config.SidWatch.SaveRawData:
raw_data_group = data_file.get("raw_sid_data")
if raw_data_group is None:
raw_data_group = data_file.create_group("raw_sid_data")
raw_data_group.attrs["StationName"] = site_config.Name
raw_data_group.attrs["MonitorId"] = site_config.MonitorId
raw_data_group.attrs["Latitude"] = site_config.Latitude
raw_data_group.attrs["Longitude"] = site_config.Longitude
raw_data_group.attrs["UtcOffset"] = site_config.UtcOffset
raw_data_group.attrs["Timezone"] = site_config.Timezone
raw_data_group.attrs["SamplingRate"] = audio_config.SamplingRate
raw_data_group.attrs["SamplingFormat"] = audio_config.SamplingFormat
if config.SidWatch.SaveFrequencies:
frequency_spectrum_data_group = data_file.get("frequency_spectrum_data")
if frequency_spectrum_data_group is None:
frequency_spectrum_data_group = data_file.create_group("frequency_spectrum_data")
if config.SidWatch.SaveStationData:
stations_group = data_file.get("monitored_stations")
if stations_group is None:
stations_group = data_file.create_group("monitored_stations")
for station in config.Stations:
station_group = stations_group.get(station.CallSign)
if station_group is None:
station_group = stations_group.create_group(station.CallSign)
station_group.attrs["CallSign"] = station.CallSign
station_group.attrs["Color"] = station.Color
station_group.attrs["Frequency"] = station.Frequency
station_group.attrs["MonitoredBin"] = station.MonitoredBin
return { "File": data_file,
"RawDataGroup": raw_data_group,
"StationsGroup": stations_group,
"FrequencySpectrumDataGroup": frequency_spectrum_data_group }
@staticmethod
def add_signal_strength(station, stations_group, dataset_name, time, signal_strength):
station_group = stations_group.get(station.CallSign)
if station_group is not None:
name = time.isoformat
ds = station_group.create_dataset(dataset_name, (1, ), data=signal_strength)
ds.attrs["Time"] = time.isoformat()
@staticmethod
def add_frequency_spectrum(frequency_spectrum_data_group, dataset_name, time, frequencies, Pxx):
joined_array = np.vstack([frequencies.real, Pxx])
ds = frequency_spectrum_data_group.create_dataset(dataset_name,
shape=(2, len(frequencies)),
dtype=np.float64,
data=joined_array)
ds.attrs["Time"] = time.isoformat()
@staticmethod
def read_file(filename):
data_file = h5py.File(filename, "a")
raw_data_group = data_file.get("raw_sid_data")
frequency_spectrum_data_group = data_file.get("frequency_spectrum_data")
stations_group = data_file.get("monitored_stations")
return {"File": data_file,
"RawDataGroup": raw_data_group,
"StationsGroup": stations_group,
"FrequencySpectrumDataGroup": frequency_spectrum_data_group}
class FrequencyUtility:
def __init__(self):
"""
Constructor
"""
@staticmethod
def process_psd(data, nfft=1024, audio_sampling_rate=96000):
"""
:param data:
:param nfft: Nonequispaced FFT
:param audio_sampling_rate:
:return:
"""
return psd(data, nfft, audio_sampling_rate)
class DateUtility:
def __init__(self):
"""
Constructor
"""
@staticmethod
def get_next_run_time(current_date_time):
last_seconds = int(math.floor(current_date_time.second / 5)) * 5
last_time = dt.datetime(current_date_time.year,
current_date_time.month,
current_date_time.day,
current_date_time.hour,
current_date_time.minute,
last_seconds)
return last_time + dt.timedelta(0, 5)
class ZipUtility:
@staticmethod
def zip_file_and_delete_original(file_to_zip):
zip_archive_name = ""
if file_to_zip.endswith(".h5"):
zip_archive_name = file_to_zip.replace(".h5", ".bz2")
else:
zip_archive_name = file_to_zip + ".bz2"
head, tail = os.path.split(file_to_zip)
zip_archive = zipfile.ZipFile(zip_archive_name, 'w', compression=zipfile.ZIP_BZIP2)
zip_archive.write(file_to_zip, arcname=tail)
zip_archive.close()
os.remove(file_to_zip)
return zip_archive_name
| {
"repo_name": "SidWatch/pySIDServerDataProcessor",
"path": "source/SIDServer/Utilities.py",
"copies": "1",
"size": "6570",
"license": "mit",
"hash": 7018871561048283000,
"line_mean": 31.2058823529,
"line_max": 100,
"alpha_frac": 0.5736681887,
"autogenerated": false,
"ratio": 4.05305367057372,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006078272491807828,
"num_lines": 204
} |
__author__ = 'bnelson'
import datetime as dt
import dateutil.parser
import os
import time as threadtime
import numpy as np
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.s3.connection import OrdinaryCallingFormat
from SIDServer.Objects import File
from SIDServer.Objects import StationReading
from SIDServer.Objects import SiteSpectrumReading
from SIDServer.Objects import SiteSpectrum
from SIDServer.Utilities import HDF5Utility
from SIDServer.Utilities import DateUtility
from SIDServer.Utilities import FrequencyUtility
from SIDServer.Utilities import ZipUtility
from SIDServer.Utilities import PrintHelper
from SIDServer.DatabaseAccess import DataAccessObject
class SendToSidWatchServerController:
def __init__(self, config):
"""
Constructor
"""
self.Config = config
self.Done = True
def start(self):
self.Done = False
source_bucket_name = self.Config.SidWatchServer.SourceBucketName
destination_bucket_name = self.Config.SidWatchServer.DestinationBucketName
access_key = self.Config.SidWatchServer.AccessKey
secret_key = self.Config.SidWatchServer.SecretKey
temp_folder = self.Config.SidWatchServer.TempFolder
while not self.Done:
PrintHelper.print('Checking for files', 1)
connection = S3Connection(access_key, secret_key, calling_format=OrdinaryCallingFormat())
destination_bucket = connection.get_bucket(destination_bucket_name)
source_bucket = connection.get_bucket(source_bucket_name)
objects = source_bucket.list()
for key in objects:
file_name = key.key
working_file = temp_folder + file_name
PrintHelper.print('Downloading {0}'.format(file_name), 1)
key.get_contents_to_filename(working_file)
PrintHelper.print('Processing {0}'.format(file_name))
result = HDF5Utility.read_file(working_file)
#Create a file record
data_file = result["File"]
raw_data_group = result["RawDataGroup"]
stations_group = result["StationsGroup"]
frequency_spectrum_group = result["FrequencySpectrumDataGroup"]
monitor_id = data_file.attrs['MonitorId']
created_time = dateutil.parser.parse(data_file.attrs['CreatedDateTime'])
utc_offset = data_file.attrs['UtcOffset']
timezone = data_file.attrs['Timezone']
dao = DataAccessObject(self.Config)
site = dao.get_site(monitor_id)
if site is not None:
file = dao.get_file(file_name)
if file is None:
file = self.build_new_file(dao, file_name, site.Id, created_time)
#process the stations
sg_keys = stations_group.keys()
for sg_key in sg_keys:
self.process_station(dao, file, stations_group[sg_key])
#process the frequency spectrum
fsg_keys = frequency_spectrum_group.keys()
for fsg_key in fsg_keys:
self.process_site_spectrum(dao,
file,
frequency_spectrum_group,
frequency_spectrum_group[fsg_key],
destination_bucket)
#currently raw data isn't processed due to the size required. It is made
#available in the zipped hd5 files via S3 web sharing.
data_file.close()
#send the zipped archive to the destination
self.send_processed_file(monitor_id, working_file, destination_bucket)
#delete the archive from the source
source_bucket.delete_key(key.key)
PrintHelper.print('Processing complete on {0}'.format(file_name), 1)
else:
PrintHelper.print('Site {0} was not found'.format(monitor_id))
data_file.close()
#need to move to process later since site doesn't exist
dao.DB.commit()
dao.close()
PrintHelper.print('Sleeping for 60 seconds', 1)
threadtime.sleep(60)
else:
PrintHelper.print('Bad user or password information provided.', 4)
def build_new_file(self, dao, file_name, site_id, created_time):
file = File()
file.Archived = False
file.Available = False
file.CreatedAt = dt.datetime.utcnow()
file.UpdatedAt = file.CreatedAt
file.FileName = file_name
file.Processed = False
file.SiteId = site_id
file.DateTime = created_time
dao.save_file(file)
return file
def send_processed_file(self, monitor_id, working_file, destination_bucket):
if working_file is not None:
zip_file_name = ZipUtility.zip_file_and_delete_original(working_file)
head, tail = os.path.split(zip_file_name)
PrintHelper.print('Starting to move file to S3 ({0})'.format(tail))
item_key = Key(destination_bucket)
item_key.key = '//{0}//{1}'.format(monitor_id, tail)
item_key.set_contents_from_filename(zip_file_name)
PrintHelper.print('Completed moving file to S3 ({0})'.format(tail))
os.remove(zip_file_name)
PrintHelper.print('Removed local copy of file ({0})'.format(tail))
else:
PrintHelper.print('Working file was not specified to zip and move ')
def process_station(self, dao, file, group):
callsign = group.attrs["CallSign"]
station = dao.get_station(callsign)
if file is not None:
if station is not None:
ds_keys = group.keys()
bulk_data = []
for sg_key in ds_keys:
dataset = group[sg_key]
time = dataset.attrs['Time']
rdt = dateutil.parser.parse(time)
signal_strength = dataset[0]
reading = StationReading()
reading.SiteId = file.SiteId
reading.StationId = station.Id
reading.ReadingDateTime = rdt
reading.FileId = file.Id
reading.CreatedAt = dt.datetime.utcnow()
reading.UpdatedAt = reading.CreatedAt
reading.ReadingMagnitude = signal_strength
bulk_data.append(reading.to_insert_array())
PrintHelper.print('Processing Station {0}: Count - {1}'.format(callsign, len(bulk_data)),1)
dao.save_many_station_reading(bulk_data)
dao.DB.commit()
else:
PrintHelper.print('Station is not found in database')
else:
PrintHelper.print('File was not supplied')
def process_site_spectrum(self, dao, file, group, dataset, destination_bucket):
PrintHelper.print('Processing Frequency Spectrum Dataset - {0}'.format(dataset.name), 1)
if file is not None:
if dataset is not None:
time = dataset.attrs['Time']
reading_datetime = dateutil.parser.parse(time)
reading_datetime.replace(microsecond=0)
site_spectrum = dao.get_site_spectrum(file.SiteId, reading_datetime)
if site_spectrum is None:
site_spectrum = SiteSpectrum()
site_spectrum.SiteId = file.SiteId
site_spectrum.ReadingDateTime = time
site_spectrum.FileId = file.Id
site_spectrum.CreatedAt = dt.datetime.utcnow()
site_spectrum.UpdatedAt = site_spectrum.CreatedAt
else:
site_spectrum.UpdatedAt = dt.datetime.utcnow()
site_spectrum.NFFT = int(group.attrs.get('NFFT', 1024))
site_spectrum.SamplesPerSeconds = int(group.attrs.get('SamplingRate', 96000))
site_spectrum.SamplingFormat = int(group.attrs.get('SamplingFormat', 24))
PrintHelper.print("Spectrum save started")
site_spectrum.Id = dao.save_site_spectrum(site_spectrum)
dao.DB.commit()
PrintHelper.print("SiteSpectrumId - {0}".format(site_spectrum.Id), 1)
PrintHelper.print("Spectrum save complete")
self.process_site_spectrum_data_to_json(destination_bucket, site_spectrum, dataset)
else:
PrintHelper.print('Dataset not supplied')
else:
PrintHelper. print('File was not supplied')
def process_site_spectrum_data_to_json(self, destination_bucket, site_spectrum, dataset):
if site_spectrum is not None:
if dataset is not None:
shape = dataset.shape
array = np.zeros(shape)
dataset.read_direct(array)
rows = shape[0]
if rows == 2:
width = shape[1]
json_open = '{' + '"SpectrumId":{0}, "SpectrumData":'.format(site_spectrum.Id) + '{'
bulk_data = [json_open]
for x in range(0, width):
frequency = array[0, x]
reading_magnitude = array[1, x]
if x < width-1:
bulk_data.append('"{0}" : {1},'.format(frequency, reading_magnitude))
else:
bulk_data.append('"{0}" : {1}'.format(frequency, reading_magnitude))
bulk_data.append('}}')
json_data = ''.join(bulk_data)
PrintHelper.print("Writing Spectrum Data to S3")
item_key = Key(destination_bucket)
item_key.key = '//frequency_spectrums//{0}.json'.format(site_spectrum.Id)
item_key.set_contents_from_string(json_data)
item_key.close()
PrintHelper.print("Completed writing data to S3")
else:
PrintHelper.print('Frequency Spectrum data set not the correct shape')
else:
PrintHelper.print('Dataset not supplied')
else:
PrintHelper.print('site spectrum not supplied')
def process_site_spectrum_data(self, dao, site_spectrum, dataset):
if site_spectrum is not None:
if dataset is not None:
shape = dataset.shape
array = np.zeros(shape)
dataset.read_direct(array)
rows = shape[0]
if rows == 2:
width = shape[1]
bulk_data = []
for x in range(0, width):
frequency = array[0, x]
reading_magnitude = array[1, x]
reading = SiteSpectrumReading()
reading.Id = 0
reading.SiteSpectrumId = site_spectrum.Id
reading.Frequency = frequency
reading.ReadingMagnitude = reading_magnitude
reading_data = reading.to_insert_array()
bulk_data.append(reading_data)
PrintHelper.print("Spectrum bulk insert beginning")
dao.save_many_site_spectrum_reading(bulk_data)
PrintHelper.print("Spectrum bulk insert complete")
else:
PrintHelper.print('Frequency Spectrum data set not the correct shape')
else:
PrintHelper.print('Dataset not supplied')
else:
PrintHelper.print('site spectrum not supplied')
def stop(self):
self.Done = True
| {
"repo_name": "SidWatch/pySIDServerDataProcessor",
"path": "source/SIDServer/Controllers.py",
"copies": "1",
"size": "12201",
"license": "mit",
"hash": -3010909326260304000,
"line_mean": 39.4006622517,
"line_max": 107,
"alpha_frac": 0.5500368822,
"autogenerated": false,
"ratio": 4.602414183327046,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016266779833226392,
"num_lines": 302
} |
"""
nimsdata.medimg.nimsmontage
===========================
NIMSMontage provides montage writing capabilities for MR datasets read by any subclass of NIMSMRReader.
Provides a NIMSMRWriter subclass for creating image pyramids.
"""
import os
import math
import logging
import sqlite3
import cStringIO
import numpy as np
from PIL import Image
import medimg
log = logging.getLogger(__name__)
def get_tile(dbfile, z, x, y):
"""Get a specific image tile from an sqlite db."""
con = sqlite3.connect(dbfile)
with con:
cur = con.cursor()
cur.execute('SELECT image FROM tiles where z=? AND x=? AND y=?', (z, x, y))
image = cur.fetchone()[0]
return str(image)
def get_info(dbfile):
"""Return the tile_size, x_size, and y_size from the sqlite pyramid db."""
try:
con = sqlite3.connect(dbfile)
with con:
cur = con.cursor()
cur.execute('SELECT * FROM info')
tile_size, x_size, y_size = cur.fetchone()
except NIMSMontageError as e:
log.warning(e.message)
return tile_size, x_size, y_size
def generate_montage(imagedata, timepoints=[], bits16=False):
"""Generate a montage."""
# Figure out the image dimensions and make an appropriate montage.
# NIfTI images can have up to 7 dimensions. The fourth dimension is
# by convention always supposed to be time, so some images (RGB, vector, tensor)
# will have 5 dimensions with a single 4th dimension. For our purposes, we
# can usually just collapse all dimensions above the 3rd.
# TODO: we should handle data_type = RGB as a special case.
# TODO: should we use the scaled data (getScaledData())? (We do some auto-windowing below)
# This transpose (usually) makes the resulting images come out in a more standard orientation.
# TODO: we could look at the qto_xyz to infer the optimal transpose for any dataset.
data = imagedata.transpose(np.concatenate(([1, 0], range(2, imagedata.ndim))))
num_images = np.prod(data.shape[2:])
if data.ndim < 2:
raise NIMSMontageError('NIfTI file must have at least 2 dimensions')
elif data.ndim == 2:
# a single slice: no need to do anything
num_cols = 1
data = np.atleast_3d(data)
elif data.ndim == 3:
# a simple (x, y, z) volume- set num_cols to produce a square(ish) montage.
rows_to_cols_ratio = float(data.shape[0])/float(data.shape[1])
num_cols = int(math.ceil(math.sqrt(float(num_images)) * math.sqrt(rows_to_cols_ratio)))
elif data.ndim >= 4:
# timeseries (x, y, z, t) or more
num_cols = data.shape[2]
data = data.transpose(np.concatenate(([0, 1, 3, 2], range(4, data.ndim)))).reshape(data.shape[0], data.shape[1], num_images)
if len(timepoints) > 0:
data = data[..., timepoints]
num_rows = int(np.ceil(float(data.shape[2])/float(num_cols)))
montage = np.zeros((data.shape[0] * num_rows, data.shape[1] * num_cols), dtype=data.dtype)
for im_num in range(data.shape[2]):
slice_r, slice_c = im_num / num_cols * data.shape[0], im_num % num_cols * data.shape[1]
montage[slice_r:slice_r + data.shape[0], slice_c:slice_c + data.shape[1]] = data[:, :, im_num]
# montage = montage.copy() # is this necessary? need a deep copy?
if montage.dtype == np.uint8 and bits16:
montage = np.cast['uint16'](data)
elif montage.dtype != np.uint8 or (montage.dtype != np.uint16 and bits16):
montage = montage.astype(np.float32) # do scaling/clipping with floats
clip_vals = np.percentile(montage, (20.0, 99.0)) # auto-window the data by clipping
montage = montage.clip(clip_vals[0], clip_vals[1]) - clip_vals[0]
if bits16:
montage = np.cast['uint16'](np.round(montage/(clip_vals[1]-clip_vals[0])*65535))
else:
montage = np.cast['uint8'](np.round(montage/(clip_vals[1]-clip_vals[0])*255.0))
return montage
def generate_pyramid(montage, tile_size):
"""
Slice up a NIfTI file into a multi-res pyramid of tiles.
We use the file name convention suitable for PanoJS (http://www.dimin.net/software/panojs/):
The zoom level (z) is an integer between 1 and n, where 0 is fully zoomed in and n is zoomed out.
E.g., z=n is for 1 tile covering the whole world, z=n-1 is for 2x2=4 tiles, ... z=0 is the original resolution.
"""
montage_image = Image.fromarray(montage, 'L')
montage_image = montage_image.crop(montage_image.getbbox()) # crop away edges that contain only zeros
sx, sy = montage_image.size
if sx * sy < 1:
raise NIMSMontageError('degenerate image size (%d, %d): no tiles will be created' % (sx, sy))
if sx < tile_size and sy < tile_size: # Panojs chokes if the lowest res image is smaller than the tile size.
tile_size = max(sx, sy)
pyramid = {}
divs = max(1, int(np.ceil(np.log2(float(max(sx, sy))/tile_size))) + 1)
for z in range(divs):
ysize = int(round(float(sy)/pow(2, z)))
xsize = int(round(float(ysize)/sy*sx))
xpieces = int(math.ceil(float(xsize)/tile_size))
ypieces = int(math.ceil(float(ysize)/tile_size))
log.debug('level %s, size %dx%d, splits %d,%d' % (z, xsize, ysize, xpieces, ypieces))
# TODO: we don't need to use 'thumbnail' here. This function always returns a square
# image of the requested size, padding and scaling as needed. Instead, we should resize
# and chop the image up, with no padding, ever. panojs can handle non-square images
# at the edges, so the padding is unnecessary and, in fact, a little wrong.
im = montage_image.copy()
im.thumbnail([xsize, ysize], Image.ANTIALIAS)
im = im.convert('L') # convert to grayscale
for x in range(xpieces):
for y in range(ypieces):
tile = im.copy().crop((x*tile_size, y*tile_size, min((x+1)*tile_size, xsize), min((y+1)*tile_size, ysize)))
buf = cStringIO.StringIO()
tile.save(buf, 'JPEG', quality=85)
pyramid[(z, x, y)] = buf
return pyramid, montage_image.size
def generate_sqlite_pyr(imagedata, outbase, tile_size=512):
"""Generate a multi-resolution image pyramid and store the resulting jpeg files in an sqlite db."""
montage = generate_montage(imagedata)
pyramid, pyramid_size = generate_pyramid(montage, tile_size)
if os.path.exists(outbase):
os.remove(outbase)
con = sqlite3.connect(outbase)
with con:
cur = con.cursor()
cur.execute('CREATE TABLE info(tile_size INT, x_size INT, y_size INT)')
cur.execute('CREATE TABLE tiles(z INT, x INT, y INT, image BLOB)')
cur.execute('INSERT INTO info(tile_size,x_size,y_size) VALUES (?,?,?)', (tile_size,) + pyramid_size)
for idx, tile_buf in pyramid.iteritems():
cur.execute('INSERT INTO tiles(z,x,y,image) VALUES (?,?,?,?)', idx + (sqlite3.Binary(tile_buf.getvalue()),))
if not os.path.exists(outbase):
raise NIMSMontageError('montage (sqlite pyramid) not generated')
else:
log.debug('generated %s' % os.path.basename(outbase))
return outbase
# FIXME panojs_url should be a configurable
def generate_dir_pyr(imagedata, outbase, tile_size=256, panojs_url='https://cni.stanford.edu/nims/javascript/panojs/'):
"""Generate a panojs image pyramid directory."""
montage = generate_montage(imagedata)
pyramid, pyramid_size = generate_pyramid(montage, tile_size)
# write directory pyramid
image_path = os.path.join(outbase, 'images')
if not os.path.exists(image_path):
os.makedirs(image_path)
for idx, tile_buf in pyramid.iteritems():
with open(os.path.join(image_path, ('%03d_%03d_%03d.jpg' % idx)), 'wb') as fp:
fp.write(tile_buf.getvalue())
with open(os.path.join(outbase, 'pyramid.html'), 'w') as f:
f.write('<html>\n<head>\n<meta http-equiv="imagetoolbar" content="no"/>\n')
f.write('<style type="text/css">@import url(' + panojs_url + 'styles/panojs.css);</style>\n')
f.write('<script type="text/javascript" src="' + panojs_url + 'extjs/ext-core.js"></script>\n')
f.write('<script type="text/javascript" src="' + panojs_url + 'panojs/utils.js"></script>\n')
f.write('<script type="text/javascript" src="' + panojs_url + 'panojs/PanoJS.js"></script>\n')
f.write('<script type="text/javascript" src="' + panojs_url + 'panojs/controls.js"></script>\n')
f.write('<script type="text/javascript" src="' + panojs_url + 'panojs/pyramid_imgcnv.js"></script>\n')
f.write('<script type="text/javascript" src="' + panojs_url + 'panojs/control_thumbnail.js"></script>\n')
f.write('<script type="text/javascript" src="' + panojs_url + 'panojs/control_info.js"></script>\n')
f.write('<script type="text/javascript" src="' + panojs_url + 'panojs/control_svg.js"></script>\n')
f.write('<script type="text/javascript" src="' + panojs_url + 'viewer.js"></script>\n')
f.write('<style type="text/css">body { font-family: sans-serif; margin: 0; padding: 10px; color: #000000; background-color: #FFFFFF; font-size: 0.7em; } </style>\n')
f.write('<script type="text/javascript">\nvar viewer = null;Ext.onReady(function () { createViewer( viewer, "viewer", "./images", "", %d, %d, %d ) } );\n</script>\n' % ((tile_size,) + pyramid_size))
f.write('</head>\n<body>\n')
f.write('<div style="width: 100%; height: 100%;"><div id="viewer" class="viewer" style="width: 100%; height: 100%;" ></div></div>\n')
f.write('</body>\n</html>\n')
# check for one image, pyramid file
if not (os.path.exists(os.path.join(outbase, 'pyramid.html')) and os.path.exists(os.path.join(outbase, 'images', '000_000_000.jpg'))):
raise NIMSMontageError('montage (flat png) not generated')
else:
log.debug('generated %s' % outbase)
return outbase
def generate_flat(imagedata, filepath):
"""Generate a flat png montage."""
montage = generate_montage(imagedata)
Image.fromarray(montage).convert('L').save(filepath, optimize=True)
if not os.path.exists(filepath):
raise NIMSMontageError('montage (flat png) not generated')
else:
log.debug('generated %s' % os.path.basename(filepath))
return filepath
class NIMSMontageError(medimg.MedImgError):
pass
class NIMSMontage(medimg.MedImgReader, medimg.MedImgWriter):
domain = u'mr'
filetype = u'montage'
state = ['orig']
def __init__(self, filepath, load_data=False):
super(NIMSMontage, self).__init__(load_data=load_data)
self.data = None # contains montage
if load_data:
self.load_data(preloaded=True)
def load_data(self, preloaded=False):
super(NIMSMontage, self).load_data(preloaded=preloaded)
log.debug('loading %s')
if not preloaded:
# read the data
pass
def get_tile(self, x, y, z):
get_tile(self.filepath, x, y, z)
def get_info(self):
get_info(self.filepath)
@classmethod
def write(cls, metadata, imagedata, outbase, voxel_order=None, mtype='sqlite', tilesize=512, multi=False):
"""
Write the metadata and imagedata to image montage pyramid.
Parameters
----------
metadata : object
fully loaded instance of a NIMSReader.
imagedata : dict
dictionary of np.darrays. label suffix as keys, with np.darrays as values.
outbase : str
output name prefix.
voxel_order : str [default None]
three character string indicating the voxel order, ex. 'LPS'.
mtype : str [default 'sqlite']
type of montage to create. can be 'sqlite', 'dir', or 'png'.
tilesize : int [default 512]
tilesize for generated sqlite or directory pyramid. Has no affect on mtype 'png'.
multi : bool [default False]
True indicates to write multiple files. False only writes primary data in imagedata['']
Returns
-------
results : list
list of files written.
Raises
------
NIMSDataError
metadata or data is None.
"""
if isinstance(imagedata, basestring):
import nibabel
from shutil import rmtree
from glob import glob
# imagedata is a directory containing nifti(s)
log.info('Loading files from %s' % imagedata)
niftis = glob(imagedata + '/' + str(metadata.exam_no) + '_' + str(metadata.series_no) + '_' + str(metadata.acq_no) + '.nii.gz')
results = []
for f in niftis:
# HACK for the outname when there're more than one nifti files for the dataset
outname = outbase + os.path.basename(f)[(len(str(metadata.exam_no))+len(str(metadata.series_no))+len(str(metadata.acq_no))+2):-7]
data = nibabel.load(f).get_data()
if voxel_order:
data, _ = cls.reorder_voxels(data, metadata.qto_xyz, voxel_order)
if mtype == 'sqlite':
log.debug('type: sqlite')
result = generate_sqlite_pyr(data, outname + '.pyrdb', tilesize)
elif mtype == 'dir':
log.debug('type: directory')
result = generate_dir_pyr(data, outname, tilesize)
elif mtype == 'png':
log.debug('type: flat png')
result = generate_flat(data, outname + '.png')
else:
raise NIMSMontageError('montage mtype must be sqlite, dir or png. not %s' % mtype)
results.append(result)
rmtree(imagedata)
else:
super(NIMSMontage, cls).write(metadata, imagedata, outbase, voxel_order)
results = []
for data_label, data in imagedata.iteritems():
if not multi and data_label is not '':
continue
if data is None:
continue
data = imagedata.get(data_label)
outname = outbase + data_label
if voxel_order:
data, _ = cls.reorder_voxels(data, metadata.qto_xyz, voxel_order)
if mtype == 'sqlite':
log.debug('type: sqlite')
result = generate_sqlite_pyr(data, outname + '.pyrdb', tilesize)
elif mtype == 'dir':
log.debug('type: directory')
result = generate_dir_pyr(data, outname, tilesize)
elif mtype == 'png':
log.debug('type: flat png')
result = generate_flat(data, outname + '.png')
else:
raise NIMSMontageError('montage mtype must be sqlite, dir or png. not %s' % mtype)
results.append(result)
return results
write = NIMSMontage.write
| {
"repo_name": "cni/nimsdata",
"path": "medimg/nimsmontage.py",
"copies": "1",
"size": "15269",
"license": "mit",
"hash": -3379602223019705000,
"line_mean": 43.7771260997,
"line_max": 210,
"alpha_frac": 0.6011526623,
"autogenerated": false,
"ratio": 3.452181777074384,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.954387583953027,
"avg_score": 0.0018917199688227395,
"num_lines": 341
} |
"""
scitran.data.medimg.montage
===========================
Montage provides montage writing capabilities for MR datasets read by any subclass of MedImgReader.
Provides a MedImgWriter subclass for creating image pyramids.
"""
import os
import json
import math
import logging
import zipfile
import cStringIO
import numpy as np
from PIL import Image
import medimg
log = logging.getLogger(__name__)
def generate_montage(imagedata, timepoints=[], bits16=False):
"""Generate a montage."""
# Figure out the image dimensions and make an appropriate montage.
# NIfTI images can have up to 7 dimensions. The fourth dimension is
# by convention always supposed to be time, so some images (RGB, vector, tensor)
# will have 5 dimensions with a single 4th dimension. For our purposes, we
# can usually just collapse all dimensions above the 3rd.
# TODO: we should handle data_type = RGB as a special case.
# TODO: should we use the scaled data (getScaledData())? (We do some auto-windowing below)
# This transpose (usually) makes the resulting images come out in a more standard orientation.
# TODO: we could look at the qto_xyz to infer the optimal transpose for any dataset.
data = imagedata.transpose(np.concatenate(([1, 0], range(2, imagedata.ndim))))
num_images = np.prod(data.shape[2:])
if data.ndim < 2:
raise MontageError('NIfTI file must have at least 2 dimensions')
elif data.ndim == 2:
# a single slice: no need to do anything
num_cols = 1
data = np.atleast_3d(data)
elif data.ndim == 3:
# a simple (x, y, z) volume- set num_cols to produce a square(ish) montage.
rows_to_cols_ratio = float(data.shape[0])/float(data.shape[1])
num_cols = int(math.ceil(math.sqrt(float(num_images)) * math.sqrt(rows_to_cols_ratio)))
elif data.ndim >= 4:
# timeseries (x, y, z, t) or more
num_cols = data.shape[2]
data = data.transpose(np.concatenate(([0, 1, 3, 2], range(4, data.ndim)))).reshape(data.shape[0], data.shape[1], num_images)
if len(timepoints) > 0:
data = data[..., timepoints]
num_rows = int(np.ceil(float(data.shape[2])/float(num_cols)))
montage = np.zeros((data.shape[0] * num_rows, data.shape[1] * num_cols), dtype=data.dtype)
for im_num in range(data.shape[2]):
slice_r, slice_c = im_num / num_cols * data.shape[0], im_num % num_cols * data.shape[1]
montage[slice_r:slice_r + data.shape[0], slice_c:slice_c + data.shape[1]] = data[:, :, im_num]
# montage = montage.copy() # is this necessary? need a deep copy?
if montage.dtype == np.uint8 and bits16:
montage = np.cast['uint16'](data)
elif montage.dtype != np.uint8 or (montage.dtype != np.uint16 and bits16):
montage = montage.astype(np.float32) # do scaling/clipping with floats
clip_vals = np.percentile(montage, (20.0, 99.0)) # auto-window the data by clipping
montage = montage.clip(clip_vals[0], clip_vals[1]) - clip_vals[0]
if bits16:
montage = np.cast['uint16'](np.round(montage/(clip_vals[1]-clip_vals[0])*65535))
else:
montage = np.cast['uint8'](np.round(montage/(clip_vals[1]-clip_vals[0])*255.0))
return montage
def generate_pyramid(montage, tile_size):
"""
Slice up a NIfTI file into a multi-res pyramid of tiles.
We use the file name convention suitable for d3tiles
The zoom level (z) is an integer between 0 and n, where 0 is fully zoomed out and n is zoomed in.
E.g., z=0 is for 1 tile covering the whole world, z=1 is for 2x2=4 tiles, ... z=n is the original resolution.
"""
montage_image = Image.fromarray(montage, 'L')
montage_image = montage_image.crop(montage_image.getbbox()) # crop away edges that contain only zeros
sx, sy = montage_image.size
if sx * sy < 1:
raise MontageError('degenerate image size (%d, %d): no tiles will be created' % (sx, sy))
if sx < tile_size and sy < tile_size: # Panojs chokes if the lowest res image is smaller than the tile size.
tile_size = max(sx, sy)
pyramid = {}
pyramid_meta = {
'tile_size': tile_size,
'mimetype': 'image/jpeg',
'real_size': montage_image.size,
'zoom_levels': {},
}
divs = max(1, int(np.ceil(np.log2(float(max(sx, sy))/tile_size))) + 1)
for z in range(divs):
# flip the z label to be d3 friendly
level = (divs - 1) - z
ysize = int(round(float(sy)/pow(2, z)))
xsize = int(round(float(ysize)/sy*sx))
xpieces = int(math.ceil(float(xsize)/tile_size))
ypieces = int(math.ceil(float(ysize)/tile_size))
log.debug('level %s, size %dx%d, splits %d,%d' % (level, xsize, ysize, xpieces, ypieces))
# TODO: we don't need to use 'thumbnail' here. This function always returns a square
# image of the requested size, padding and scaling as needed. Instead, we should resize
# and chop the image up, with no padding, ever. panojs can handle non-square images
# at the edges, so the padding is unnecessary and, in fact, a little wrong.
im = montage_image.copy()
im.thumbnail([xsize, ysize], Image.ANTIALIAS)
im = im.convert('L') # convert to grayscale
for x in range(xpieces):
for y in range(ypieces):
tile = im.copy().crop((x*tile_size, y*tile_size, min((x+1)*tile_size, xsize), min((y+1)*tile_size, ysize)))
log.debug(tile.size)
if tile.size != (tile_size, tile_size):
log.debug('tile is not square...padding')
background = Image.new('L', (tile_size, tile_size), 'white') # what to pad with? default black
background.paste(tile, (0, 0))
tile = background
buf = cStringIO.StringIO()
tile.save(buf, 'JPEG', quality=85)
pyramid[(level, x, y)] = buf
pyramid_meta['zoom_levels'][level] = (xpieces, ypieces)
return pyramid, montage_image.size, pyramid_meta
def generate_dir_pyr(imagedata, outbase, tile_size=256):
"""Generate a panojs image pyramid directory."""
montage = generate_montage(imagedata)
pyramid, pyramid_size, pyramid_meta = generate_pyramid(montage, tile_size)
# write directory pyramid
image_path = os.path.join(outbase, 'images')
if not os.path.exists(image_path):
os.makedirs(image_path)
for idx, tile_buf in pyramid.iteritems():
with open(os.path.join(image_path, ('%03d_%03d_%03d.jpg' % idx)), 'wb') as fp:
fp.write(tile_buf.getvalue())
# check for one image, pyramid file
if not os.path.exists(os.path.join(outbase, 'images', '000_000_000.jpg')):
raise MontageError('montage (flat png) not generated')
else:
log.debug('generated %s' % outbase)
return outbase
def generate_zip_pyr(imagedata, outbase, tile_size=256):
montage = generate_montage(imagedata)
pyramid, pyramid_size, pyramid_meta = generate_pyramid(montage, tile_size)
zip_name = outbase + '.zip'
with zipfile.ZipFile(zip_name, 'w', compression=zipfile.ZIP_STORED) as zf:
pyramid_meta['dirname'] = os.path.basename(outbase)
zf.comment = json.dumps(pyramid_meta)
montage_jpeg = os.path.join(os.path.basename(outbase), 'montage.jpeg')
buf = cStringIO.StringIO()
Image.fromarray(montage).convert('L').save(buf, format='JPEG', optimize=True)
zf.writestr(montage_jpeg, buf.getvalue())
for idx, tile_buf in pyramid.iteritems():
tilename = 'z%03d/x%03d_y%03d.jpg' % idx
arcname = os.path.join(os.path.basename(outbase), tilename)
zf.writestr(arcname, tile_buf.getvalue())
return zip_name
def generate_flat(imagedata, filepath):
"""Generate a flat png montage."""
montage = generate_montage(imagedata)
Image.fromarray(montage).convert('L').save(filepath, optimize=True)
if not os.path.exists(filepath):
raise MontageError('montage (flat png) not generated')
else:
log.debug('generated %s' % os.path.basename(filepath))
return filepath
class MontageError(medimg.MedImgError):
pass
class Montage(medimg.MedImgReader, medimg.MedImgWriter):
domain = u'mr'
filetype = u'montage'
state = ['orig']
def __init__(self, filepath, load_data=False):
super(Montage, self).__init__(filepath, load_data=load_data)
self.data = None # contains montage
if load_data:
self.load_data(preloaded=True)
def load_data(self, preloaded=False):
super(Montage, self).load_data(preloaded=preloaded)
log.debug('loading %s')
if not preloaded:
# read the data
pass
def get_tile(self, x, y, z):
get_tile(self.filepath, x, y, z)
def get_info(self):
get_info(self.filepath)
@classmethod
def write(cls, metadata, imagedata, outbase, voxel_order='LPS', mtype='zip', tilesize=256, multi=False):
"""
Write the metadata and imagedata to image montage pyramid.
Parameters
----------
metadata : object
fully loaded instance of a Reader.
imagedata : dict
dictionary of np.darrays. label suffix as keys, with np.darrays as values.
outbase : str
output name prefix.
voxel_order : str [default None]
three character string indicating the voxel order, ex. 'LPS'.
mtype : str [default 'sqlite']
type of montage to create. can be 'sqlite', 'dir', or 'png'.
tilesize : int [default 512]
tilesize for generated sqlite or directory pyramid. Has no affect on mtype 'png'.
multi : bool [default False]
True indicates to write multiple files. False only writes primary data in imagedata['']
Returns
-------
results : list
list of files written.
Raises
------
DataError
metadata or data is None.
"""
super(Montage, cls).write(metadata, imagedata, outbase, voxel_order)
results = []
for data_label, data in imagedata.iteritems():
if not multi and data_label is not '':
continue
if data is None:
continue
data = imagedata.get(data_label)
outname = outbase + data_label
if voxel_order:
data, _ = cls.reorder_voxels(data, metadata.qto_xyz, voxel_order)
if mtype == 'png':
log.debug('type: flat png')
result = generate_flat(data, outname + '.png')
elif mtype == 'dir':
log.debug('type: directory')
result = generate_dir_pyr(data, outname, tilesize)
elif mtype == 'zip':
log.debug('type: zip of tiles')
result = generate_zip_pyr(data, outname, tilesize)
else:
raise MontageError('montage mtype must be sqlite, dir or png. not %s' % mtype)
results.append(result)
return results
write = Montage.write
| {
"repo_name": "scitran/data",
"path": "scitran/data/medimg/montage.py",
"copies": "1",
"size": "11335",
"license": "mit",
"hash": 8649182239321435000,
"line_mean": 39.9205776173,
"line_max": 132,
"alpha_frac": 0.6120864579,
"autogenerated": false,
"ratio": 3.482334869431644,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45944213273316437,
"avg_score": null,
"num_lines": null
} |
import sys
from code import InteractiveConsole
from gevent import socket
from gevent.greenlet import Greenlet
from gevent.server import StreamServer
__all__ = ['BackdoorServer']
try:
sys.ps1
except AttributeError:
sys.ps1 = '>>> '
try:
sys.ps2
except AttributeError:
sys.ps2 = '... '
class SocketConsole(Greenlet):
def __init__(self, locals, conn):
Greenlet.__init__(self)
self.locals = locals
self.desc = _fileobject(conn)
def finalize(self):
self.desc = None
def switch(self, *args, **kw):
self.saved = sys.stdin, sys.stderr, sys.stdout
sys.stdin = sys.stdout = sys.stderr = self.desc
Greenlet.switch(self, *args, **kw)
def switch_out(self):
sys.stdin, sys.stderr, sys.stdout = self.saved
def _run(self):
try:
try:
console = InteractiveConsole(self.locals)
console.interact()
except SystemExit: # raised by quit()
sys.exc_clear()
finally:
self.switch_out()
self.finalize()
class BackdoorServer(StreamServer):
def __init__(self, listener, locals=None, **server_args):
StreamServer.__init__(self, listener, spawn=None, **server_args)
self.locals = locals
# QQQ passing pool instance as 'spawn' is not possible; should it be fixed?
def handle(self, conn, address):
SocketConsole.spawn(self.locals, conn)
class _fileobject(socket._fileobject):
def write(self, data):
self._sock.sendall(data)
def isatty(self):
return True
def flush(self):
pass
def readline(self, *a):
return socket._fileobject.readline(self, *a).replace("\r\n", "\n")
if __name__ == '__main__':
if not sys.argv[1:]:
print 'USAGE: %s PORT' % sys.argv[0]
else:
BackdoorServer(('127.0.0.1', int(sys.argv[1]))).serve_forever()
| {
"repo_name": "wangyou/XX-Net",
"path": "code/default/python27/1.0/lib/linux/gevent/backdoor.py",
"copies": "10",
"size": "3181",
"license": "bsd-2-clause",
"hash": 287806789964393730,
"line_mean": 29.2952380952,
"line_max": 83,
"alpha_frac": 0.6633134235,
"autogenerated": false,
"ratio": 3.851089588377724,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9514403011877725,
"avg_score": null,
"num_lines": null
} |
__author__ = 'bobwhite'
"""
This example shows how to create a mGui window with menus and submenus using the new mGui 2.1 submenu syntax
"""
from mGui import gui
def food_selected(*_, **kwargs):
print "order " + kwargs['sender'].label
def pizza_selected(*_, **kwargs):
pizza = kwargs['sender'].parent
toppings = [i.label for i in pizza.controls if isinstance(i, gui.CheckBoxMenuItem) and i.checkBox]
print "order pizza " + (" and ".join(toppings))
def radio_selected(*_, **kwargs):
print 'delivery:', kwargs['sender'].label
kwargs['sender'].parent.label = "Delivery: " + kwargs['sender'].label
# the use of tag here acts as a keepalive, so the functions don't get garbage collected
# this is a useful alternative to classes for simple cases
with gui.Window(menuBar=True, tag=(food_selected, pizza_selected, radio_selected)) as test_window:
with gui.Menu(label='TestMenu') as food_menu:
# conventional menu items
hotdog = gui.MenuItem(label = 'Hot Dog')
burger = gui.MenuItem(label = 'Burger')
taco = gui.MenuItem(label = 'Taco')
for each in (hotdog, burger, taco):
each.command += food_selected
# a submenu
with gui.SubMenu(label='Pizza') as sm:
pepperoni = gui.CheckBoxMenuItem(label='Pepperoni')
sausage = gui.CheckBoxMenuItem(label='Sausage')
pineapples = gui.CheckBoxMenuItem(label='Pineapples')
for each in (pepperoni, sausage, pineapples):
each.command += pizza_selected
gui.MenuDivider()
# radio collection submenu
# note that unlike regular radioCollections, radioMenuItemCollections
# don't keep track of their own selection so we track it in the
# individual handlers instead.
with gui.SubMenu(label='Delivery') as sm:
with gui.RadioMenuItemCollection() as radio:
eatin = gui.RadioMenuItem('Eat In')
takeout = gui.RadioMenuItem('Take Out')
delivery = gui.RadioMenuItem('Delivery')
for each in (eatin, takeout, delivery):
each.command += radio_selected
test_window.show()
| {
"repo_name": "theodox/mGui",
"path": "mGui/examples/menus.py",
"copies": "1",
"size": "2203",
"license": "mit",
"hash": -5057149350408243000,
"line_mean": 35.7166666667,
"line_max": 108,
"alpha_frac": 0.6382206083,
"autogenerated": false,
"ratio": 3.8246527777777777,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4962873386077778,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from bson import ObjectId
from threading import RLock
from db.model.raw_data import *
from db.model.site_statistics import SiteStatistics
from synergy.db.manager import ds_manager
from synergy.system.decorator import thread_safe
class SiteDao(object):
""" Thread-safe Data Access Object for site_XXX table/collection """
def __init__(self, logger):
super(SiteDao, self).__init__()
self.logger = logger
self.lock = RLock()
self.ds = ds_manager.ds_factory(logger)
@thread_safe
def get_one(self, collection_name, domain_name, timeperiod):
collection = self.ds.connection(collection_name)
query = {DOMAIN_NAME: domain_name,
TIMEPERIOD: timeperiod}
document = collection.find_one(query)
if document is None:
raise LookupError('MongoDB has no site record in %s for (%s, %s)'
% (collection_name, domain_name, timeperiod))
return SiteStatistics.from_json(document)
@thread_safe
def update(self, collection_name, instance, is_safe):
""" method finds Site Statistics record and update it DB representation """
assert isinstance(instance, SiteStatistics)
collection = self.ds.connection(collection_name)
document = instance.document
if instance.db_id:
document['_id'] = ObjectId(instance.db_id)
instance.db_id = collection.save(document, safe=is_safe)
return instance.db_id
| {
"repo_name": "eggsandbeer/scheduler",
"path": "db/dao/site_dao.py",
"copies": "1",
"size": "1526",
"license": "bsd-3-clause",
"hash": -9145484630527294000,
"line_mean": 36.2195121951,
"line_max": 83,
"alpha_frac": 0.6553079948,
"autogenerated": false,
"ratio": 4.037037037037037,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5192345031837037,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from bson import ObjectId
from threading import RLock
from synergy.db.manager import ds_manager
from synergy.system.decorator import thread_safe
class BaseDao(object):
""" Thread-safe base Data Access Object """
def __init__(self, logger, model_class, primary_key, collection_name):
super(BaseDao, self).__init__()
self.logger = logger
self.model_klass = model_class
self.primary_key = primary_key
self.collection_name = collection_name
self.lock = RLock()
self.ds = ds_manager.ds_factory(logger)
def _tuple_to_query(self, key_tuple):
if isinstance(key_tuple, str):
key_tuple = [key_tuple]
assert len(key_tuple) == len(self.primary_key)
query = dict()
for k, v in zip(self.primary_key, key_tuple):
query[k] = v
return query
@thread_safe
def get_one(self, key):
""" method finds single record base on the given primary key and returns it to the caller"""
query = self._tuple_to_query(key)
collection = self.ds.connection(self.collection_name)
document = collection.find_one(query)
if document is None:
raise LookupError('%s with key %r was not found' % (self.model_klass.__name__, query))
return self.model_klass.from_json(document)
@thread_safe
def run_query(self, query):
""" method runs query on a specified collection and return a list of filtered Model records """
collection = self.ds.connection(self.collection_name)
cursor = collection.find(query)
if cursor.count() == 0:
raise LookupError('Collection %s has no %s records' %
(self.collection_name, self.model_klass.__name__))
return [self.model_klass.from_json(entry) for entry in cursor]
@thread_safe
def get_all(self):
return self.run_query({})
@thread_safe
def update(self, instance):
""" this is upsert method: inserts or updates the DB representation of the model instance """
assert isinstance(instance, self.model_klass)
collection = self.ds.connection(self.collection_name)
document = instance.document
if instance.db_id:
document['_id'] = ObjectId(instance.db_id)
instance.db_id = collection.save(document, safe=True)
return instance.db_id
@thread_safe
def remove(self, key):
query = self._tuple_to_query(key)
collection = self.ds.connection(self.collection_name)
return collection.remove(query, safe=True)
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/db/dao/base_dao.py",
"copies": "1",
"size": "2636",
"license": "bsd-3-clause",
"hash": -6725759968638984000,
"line_mean": 34.6216216216,
"line_max": 103,
"alpha_frac": 0.6297420334,
"autogenerated": false,
"ratio": 3.952023988005997,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5081766021405997,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from collections import OrderedDict, defaultdict
from threading import RLock
from werkzeug.utils import cached_property
from synergy.db.dao import job_dao
from synergy.db.dao.job_dao import JobDao
from synergy.db.dao import unit_of_work_dao
from synergy.db.dao.unit_of_work_dao import UnitOfWorkDao
from synergy.db.model.freerun_process_entry import split_schedulable_name
from synergy.scheduler.scheduler_constants import COLLECTION_JOB_YEARLY, \
COLLECTION_JOB_MONTHLY, COLLECTION_JOB_DAILY, COLLECTION_JOB_HOURLY
from synergy.system.decorator import thread_safe
from synergy.system import time_helper
from synergy.system.time_qualifier import QUALIFIER_DAILY, QUALIFIER_MONTHLY, QUALIFIER_YEARLY
from synergy.mx.base_request_handler import BaseRequestHandler, valid_action_request
class DashboardHandler(BaseRequestHandler):
def __init__(self, request, **values):
super(DashboardHandler, self).__init__(request, **values)
self.time_window = self.request_arguments.get('time_window')
self.is_include_running = self.request_arguments.get('include_running') == 'on'
self.is_include_processed = self.request_arguments.get('include_processed') == 'on'
self.is_include_noop = self.request_arguments.get('include_noop') == 'on'
self.is_include_failed = self.request_arguments.get('include_failed') == 'on'
self.is_include_disabled = self.request_arguments.get('include_disabled') == 'on'
self.is_request_valid = bool(self.time_window)
if self.is_request_valid:
actual_timeperiod = time_helper.actual_timeperiod(QUALIFIER_DAILY)
delta = int(self.time_window)
self.query_start_timeperiod = time_helper.increment_timeperiod(QUALIFIER_DAILY, actual_timeperiod, -delta)
@cached_property
@valid_action_request
def managed(self):
processor = ManagedStatements(self.logger, self.scheduler.managed_handlers)
selection = processor.retrieve_records(self.query_start_timeperiod, self.is_include_running,
self.is_include_processed, self.is_include_noop, self.is_include_failed,
self.is_include_disabled)
return OrderedDict(sorted(selection.items()))
@cached_property
@valid_action_request
def jobs(self):
"""
:return: dict in format <process_name: [job, ..., job]>
"""
resp = defaultdict(list)
for job_primary_key, job_document in self.managed.items():
resp[job_primary_key[0]].append(job_document)
return dict(resp)
@cached_property
@valid_action_request
def freeruns(self):
processor = FreerunStatements(self.logger, self.scheduler.freerun_handlers)
selection = processor.retrieve_records(self.query_start_timeperiod, self.is_include_running,
self.is_include_processed, self.is_include_noop, self.is_include_failed,
self.is_include_disabled)
return OrderedDict(sorted(selection.items()))
class ManagedStatements(object):
def __init__(self, logger, managed_handlers):
self.lock = RLock()
self.logger = logger
self.managed_handlers = managed_handlers
self.job_dao = JobDao(self.logger)
@thread_safe
def retrieve_records(self, timeperiod, include_running,
include_processed, include_noop, include_failed, include_disabled):
""" method looks for suitable job records in all Job collections and returns them as a dict"""
resp = dict()
resp.update(self._search_by_level(COLLECTION_JOB_HOURLY, timeperiod, include_running,
include_processed, include_noop, include_failed, include_disabled))
resp.update(self._search_by_level(COLLECTION_JOB_DAILY, timeperiod, include_running,
include_processed, include_noop, include_failed, include_disabled))
timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_MONTHLY, timeperiod)
resp.update(self._search_by_level(COLLECTION_JOB_MONTHLY, timeperiod, include_running,
include_processed, include_noop, include_failed, include_disabled))
timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_YEARLY, timeperiod)
resp.update(self._search_by_level(COLLECTION_JOB_YEARLY, timeperiod, include_running,
include_processed, include_noop, include_failed, include_disabled))
return resp
@thread_safe
def _search_by_level(self, collection_name, timeperiod, include_running,
include_processed, include_noop, include_failed, include_disabled):
resp = dict()
try:
query = job_dao.QUERY_GET_LIKE_TIMEPERIOD(timeperiod, include_running,
include_processed, include_noop, include_failed)
records_list = self.job_dao.run_query(collection_name, query)
if len(records_list) == 0:
self.logger.warning(f'MX: no Job Records found in {collection_name} since {timeperiod}.')
for job_record in records_list:
if job_record.process_name not in self.managed_handlers:
continue
thread_handler = self.managed_handlers[job_record.process_name]
if not include_disabled and not thread_handler.process_entry.is_on:
continue
resp[job_record.key] = job_record.document
except Exception as e:
self.logger.error(f'MX Dashboard ManagedStatements error: {e}')
return resp
class FreerunStatements(object):
def __init__(self, logger, freerun_handlers):
self.lock = RLock()
self.logger = logger
self.freerun_handlers = freerun_handlers
self.uow_dao = UnitOfWorkDao(self.logger)
@thread_safe
def retrieve_records(self, timeperiod, include_running,
include_processed, include_noop, include_failed, include_disabled):
""" method looks for suitable UOW records and returns them as a dict"""
resp = dict()
try:
query = unit_of_work_dao.QUERY_GET_FREERUN_SINCE(timeperiod, include_running,
include_processed, include_noop, include_failed)
records_list = self.uow_dao.run_query(query)
if len(records_list) == 0:
self.logger.warning(f'MX: no Freerun UOW records found since {timeperiod}.')
for uow_record in records_list:
# freerun uow.process_name is a composite in format <process_name::entry_name>
handler_key = split_schedulable_name(uow_record.process_name)
if handler_key not in self.freerun_handlers:
continue
thread_handler = self.freerun_handlers[handler_key]
if not include_disabled and not thread_handler.process_entry.is_on:
continue
resp[uow_record.key] = uow_record.document
except Exception as e:
self.logger.error(f'MX Dashboard FreerunStatements error: {e}')
return resp
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/mx/dashboard_handler.py",
"copies": "1",
"size": "7454",
"license": "bsd-3-clause",
"hash": -7140851055054475000,
"line_mean": 48.3642384106,
"line_max": 119,
"alpha_frac": 0.635229407,
"autogenerated": false,
"ratio": 4.033549783549783,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5168779190549783,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from collections import OrderedDict
from threading import RLock
from werkzeug.utils import cached_property
from synergy.db.dao import job_dao
from synergy.db.dao.job_dao import JobDao
from synergy.db.dao import unit_of_work_dao
from synergy.db.dao.unit_of_work_dao import UnitOfWorkDao
from synergy.scheduler.scheduler_constants import COLLECTION_JOB_YEARLY, \
COLLECTION_JOB_MONTHLY, COLLECTION_JOB_DAILY, COLLECTION_JOB_HOURLY
from synergy.system.decorator import thread_safe
from synergy.system import time_helper
from synergy.system.time_qualifier import QUALIFIER_DAILY, QUALIFIER_MONTHLY, QUALIFIER_YEARLY
from synergy.mx.base_request_handler import BaseRequestHandler, valid_action_request
TIME_WINDOW_DAY_PREFIX = 'day_'
class DashboardHandler(BaseRequestHandler):
def __init__(self, request, **values):
super(DashboardHandler, self).__init__(request, **values)
self.time_window = self.request.args.get('time_window')
self.is_unprocessed_only = self.request.args.get('unprocessed_only') == 'on'
if self.time_window:
self.is_request_valid = True
else:
self.is_request_valid = False
@cached_property
@valid_action_request
def managed(self):
processor = ManagedStatements(self.logger)
actual_timeperiod = time_helper.actual_timeperiod(QUALIFIER_DAILY)
delta = int(self.time_window[len(TIME_WINDOW_DAY_PREFIX) + 1:])
start_timeperiod = time_helper.increment_timeperiod(QUALIFIER_DAILY, actual_timeperiod, -delta)
selection = processor.retrieve_records(start_timeperiod, self.is_unprocessed_only)
return OrderedDict(sorted(selection.items()))
@cached_property
@valid_action_request
def freeruns(self):
processor = FreerunStatements(self.logger)
actual_timeperiod = time_helper.actual_timeperiod(QUALIFIER_DAILY)
delta = int(self.time_window[len(TIME_WINDOW_DAY_PREFIX) + 1:])
start_timeperiod = time_helper.increment_timeperiod(QUALIFIER_DAILY, actual_timeperiod, -delta)
selection = processor.retrieve_records(start_timeperiod, self.is_unprocessed_only)
return OrderedDict(sorted(selection.items()))
class ManagedStatements(object):
def __init__(self, logger):
self.lock = RLock()
self.logger = logger
self.job_dao = JobDao(self.logger)
@thread_safe
def retrieve_records(self, timeperiod, is_unprocessed_only):
""" method looks for suitable job records in all Job collections and returns them as a dict"""
resp = dict()
resp.update(self._search_by_level(COLLECTION_JOB_HOURLY, timeperiod, is_unprocessed_only))
resp.update(self._search_by_level(COLLECTION_JOB_DAILY, timeperiod, is_unprocessed_only))
timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_MONTHLY, timeperiod)
resp.update(self._search_by_level(COLLECTION_JOB_MONTHLY, timeperiod, is_unprocessed_only))
timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_YEARLY, timeperiod)
resp.update(self._search_by_level(COLLECTION_JOB_YEARLY, timeperiod, is_unprocessed_only))
return resp
@thread_safe
def _search_by_level(self, collection_name, timeperiod, unprocessed_only):
resp = dict()
try:
query = job_dao.QUERY_GET_LIKE_TIMEPERIOD(timeperiod, unprocessed_only)
records_list = self.job_dao.run_query(collection_name, query)
if len(records_list) == 0:
self.logger.warn('No Job Records found in {0} since {1}.'.format(collection_name, timeperiod))
for job_record in records_list:
resp[job_record.key] = job_record.document
except Exception as e:
self.logger.error('DashboardHandler error: {0}'.format(e))
return resp
class FreerunStatements(object):
def __init__(self, logger):
self.lock = RLock()
self.logger = logger
self.uow_dao = UnitOfWorkDao(self.logger)
@thread_safe
def retrieve_records(self, timeperiod, unprocessed_only):
""" method looks for suitable UOW records and returns them as a dict"""
resp = dict()
try:
query = unit_of_work_dao.QUERY_GET_FREERUN_SINCE(timeperiod, unprocessed_only)
records_list = self.uow_dao.run_query(query)
if len(records_list) == 0:
self.logger.warn('No Freerun UOW records found since {0}.'.format(timeperiod))
for uow_record in records_list:
resp[uow_record.key] = uow_record.document
except Exception as e:
self.logger.error('DashboardHandler error: {0}'.format(e))
return resp
if __name__ == '__main__':
import logging
for pd in [ManagedStatements(logging), FreerunStatements(logging)]:
resp = pd.retrieve_records('2015030100', False)
print('{0}'.format(resp))
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/mx/dashboard_handler.py",
"copies": "1",
"size": "4960",
"license": "bsd-3-clause",
"hash": -6348352276310111000,
"line_mean": 40.3333333333,
"line_max": 110,
"alpha_frac": 0.6796370968,
"autogenerated": false,
"ratio": 3.6336996336996337,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48133367304996333,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from collections import OrderedDict
from synergy.conf import context
from synergy.db.model.managed_process_entry import ManagedProcessEntry
from synergy.system.time_qualifier import *
from synergy.system.time_helper import cast_to_time_qualifier
from synergy.system.timeperiod_dict import TimeperiodDict
class HierarchyEntry(object):
def __init__(self, hierarchy, parent, process_entry: ManagedProcessEntry):
self.hierarchy = hierarchy
self.parent = parent
self.process_entry = process_entry
self.timeperiod_dict = TimeperiodDict(process_entry.time_qualifier, process_entry.time_grouping)
def cast_timeperiod(self, timeperiod):
return cast_to_time_qualifier(self.process_entry.time_qualifier, timeperiod)
class ProcessHierarchy(object):
def __init__(self, *process_names):
self.entries = OrderedDict()
self.qualifiers = OrderedDict()
top_node = None
process_entries = [context.process_context[process_name] for process_name in process_names]
sorted_process_entries = sorted(process_entries, key=lambda x: QUALIFIER_DICT[x.time_qualifier], reverse=True)
for process_entry in sorted_process_entries:
assert isinstance(process_entry, ManagedProcessEntry)
entry = HierarchyEntry(self, top_node, process_entry)
top_node = entry
self.entries[process_entry.process_name] = entry
self.qualifiers[process_entry.time_qualifier] = entry
def __contains__(self, value):
"""
:param value: process name
:return: True if a hierarchy entry for the process_entry with the given name is registered in this hierarchy;
False otherwise
"""
return value in self.entries
def __getitem__(self, key):
"""
:param key: process name
:return: associated hierarchy entry of HierarchyEntry type
"""
return self.entries[key]
def __iter__(self):
""" for x in self
:return process_names in descending order of their time_qualifier: yearly->monthly->daily->hourly """
return iter(self.entries)
def __str__(self):
msg = 'Process Hierarchy: '
for process_name, hierarchy_entry in self.entries:
msg += '{1}->{0} '.format(process_name, hierarchy_entry.process_entry.time_qualifier)
return msg
def has_qualifier(self, qualifier):
"""
:param qualifier: time_qualifier
:return: True if a HierarchyEntry with given time_qualifier is registered in this hierarchy; False otherwise
"""
return qualifier in self.qualifiers
def get_by_qualifier(self, qualifier):
"""
:param qualifier: time_qualifier of the searched process
:return: associated entry of HierarchyEntry type
or None if no process with given time_qualifier is registered in this hierarchy
"""
return self.qualifiers.get(qualifier, None)
def get_child_by_qualifier(self, parent_qualifier):
"""
:param parent_qualifier: time_qualifier of the parent process
:return: <HierarchyEntry> child entry to the HierarchyEntry associated with the parent_qualifier
or None if the given parent_qualifier is not registered in this hierarchy
or None if the given parent_qualifier is the bottom process
"""
if parent_qualifier not in self.qualifiers:
return None
process_qualifiers = list(self.qualifiers)
if parent_qualifier == process_qualifiers[-1]:
return None
parent_index = process_qualifiers.index(parent_qualifier)
return self.qualifiers[process_qualifiers[parent_index + 1]]
@property
def top_process(self):
""" :return: <ManagedProcessEntry> of the hierarchy's top entry """
key = next(iter(self.entries))
return self.entries[key].process_entry
@property
def bottom_process(self):
""" :return: <ManagedProcessEntry> of the hierarchy's bottom entry """
key = next(reversed(self.entries))
return self.entries[key].process_entry
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/scheduler/process_hierarchy.py",
"copies": "1",
"size": "4220",
"license": "bsd-3-clause",
"hash": -207520052219369060,
"line_mean": 38.4392523364,
"line_max": 118,
"alpha_frac": 0.6630331754,
"autogenerated": false,
"ratio": 4.323770491803279,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5486803667203278,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from constants import COLLECTION_SITE_DAILY
from db.model.site_statistics import SiteStatistics
from db.dao.site_dao import SiteDao
from synergy.conf import settings
from workers.abstract_vertical_worker import AbstractVerticalWorker
from synergy.system import time_helper
from synergy.system.time_qualifier import *
class AlertDailyWorker(AbstractVerticalWorker):
""" class compares site daily statistics with 1-week old one and
reports an alert should any of the given threshold be crossed """
def __init__(self, process_name):
super(AlertDailyWorker, self).__init__(process_name)
self.site_dao = SiteDao(self.logger)
def _get_tunnel_port(self):
return settings.settings['tunnel_site_port']
def _init_sink_key(self, *args):
return args[0], time_helper.hour_to_day(args[1])
def _init_source_object(self, document):
return SiteStatistics.from_json(document)
def _init_sink_object(self, composite_key):
obj = SiteStatistics()
obj.key = (composite_key[0], composite_key[1])
return obj
def _process_single_document(self, document):
source_obj = self._init_source_object(document)
week_old_timeperiod = time_helper.increment_timeperiod(QUALIFIER_DAILY, source_obj.key[1], delta=-7)
try:
week_old_obj = self.site_dao.get_one(COLLECTION_SITE_DAILY, source_obj.key[0], week_old_timeperiod)
visits_threshold_crossed = source_obj.number_of_visits / week_old_obj.number_of_visits < 0.8 \
or source_obj.number_of_visits / week_old_obj.number_of_visits > 1.2
pageviews_threshold_crossed = source_obj.number_of_pageviews / week_old_obj.number_of_pageviews < 0.8 \
or source_obj.number_of_pageviews / week_old_obj.number_of_pageviews > 1.2
if visits_threshold_crossed or pageviews_threshold_crossed:
composite_key = self._init_sink_key(source_obj.key[0], source_obj.key[1])
target_obj = self._get_aggregated_object(composite_key)
target_obj.number_of_visits = source_obj.number_of_visits - week_old_obj.number_of_visits
target_obj.number_of_pageviews = source_obj.number_of_pageviews - week_old_obj.number_of_pageviews
except LookupError:
self.logger.debug('site statistics (%s:%s) was not found. skipping comparison'
% (source_obj.key[0], week_old_timeperiod))
if __name__ == '__main__':
from constants import PROCESS_ALERT_DAILY
source = AlertDailyWorker(PROCESS_ALERT_DAILY)
source.start()
| {
"repo_name": "eggsandbeer/scheduler",
"path": "workers/alert_daily_worker.py",
"copies": "1",
"size": "2702",
"license": "bsd-3-clause",
"hash": -4291066537242268700,
"line_mean": 44.0333333333,
"line_max": 116,
"alpha_frac": 0.6617320503,
"autogenerated": false,
"ratio": 3.541284403669725,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4703016453969725,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from constants import COLLECTION_SITE_DAILY
from db.model.site_statistics import SiteStatistics
from db.model.alert import Alert, DOMAIN_NAME, TIMEPERIOD
from db.dao.site_dao import SiteDao
from workers.abstract_mongo_worker import AbstractMongoWorker
from synergy.system import time_helper
from synergy.system.time_qualifier import *
class AlertDailyWorker(AbstractMongoWorker):
""" illustration suite worker:
- compares site daily statistics with 1-week old one
reports an alert should any of the given threshold be crossed """
def __init__(self, process_name):
super(AlertDailyWorker, self).__init__(process_name)
self.site_dao = SiteDao(self.logger)
def _init_sink_key(self, *args):
return args[0], time_helper.hour_to_day(args[1])
def _mongo_sink_key(self, *args):
return {DOMAIN_NAME: args[0], TIMEPERIOD: args[1]}
def _init_source_object(self, document):
return SiteStatistics.from_json(document)
def _init_sink_object(self, composite_key):
obj = Alert()
obj.key = (composite_key[0], composite_key[1])
return obj
def _process_single_document(self, document):
source_obj = self._init_source_object(document)
week_old_timeperiod = time_helper.increment_timeperiod(QUALIFIER_DAILY, source_obj.key[1], delta=-7)
try:
week_old_obj = self.site_dao.get_one(COLLECTION_SITE_DAILY, source_obj.key[0], week_old_timeperiod)
visits_threshold_crossed = source_obj.number_of_visits / week_old_obj.number_of_visits < 0.8 \
or source_obj.number_of_visits / week_old_obj.number_of_visits > 1.2
pageviews_threshold_crossed = source_obj.number_of_pageviews / week_old_obj.number_of_pageviews < 0.8 \
or source_obj.number_of_pageviews / week_old_obj.number_of_pageviews > 1.2
if visits_threshold_crossed or pageviews_threshold_crossed:
composite_key = self._init_sink_key(source_obj.key[0], source_obj.key[1])
target_obj = self._get_aggregated_object(composite_key)
target_obj.number_of_visits = source_obj.number_of_visits - week_old_obj.number_of_visits
target_obj.number_of_pageviews = source_obj.number_of_pageviews - week_old_obj.number_of_pageviews
except LookupError:
self.logger.debug('site statistics ({0}:{1}) was not found. skipping comparison'
.format(source_obj.key[0], week_old_timeperiod))
if __name__ == '__main__':
from constants import PROCESS_ALERT_DAILY
source = AlertDailyWorker(PROCESS_ALERT_DAILY)
source.start()
| {
"repo_name": "mushkevych/scheduler",
"path": "workers/alert_daily_worker.py",
"copies": "1",
"size": "2760",
"license": "bsd-3-clause",
"hash": -625608595574918500,
"line_mean": 44.2459016393,
"line_max": 116,
"alpha_frac": 0.6565217391,
"autogenerated": false,
"ratio": 3.5114503816793894,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46679721207793895,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from constants import COLLECTION_SITE_YEARLY
from tests import base_fixtures
from synergy.system.time_qualifier import QUALIFIER_YEARLY
# pylint: disable=C0301
EXPECTED_SITE_YEARLY_00 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 259,
'number_of_visits': 1700, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 159, u'os_2': 159, u'os_1': 159, u'os_0': 159, u'os_4': 159},
'browser': {u'browser_4': 159, u'browser_0': 159, u'browser_1': 159, u'browser_2': 159, u'browser_3': 159}},
'domain': u'domain_name_20', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_01 = {
'stat': {'screen_resolution': {u'(320, 240)': 12, u'(640, 480)': 20, u'(1024, 960)': 28, u'(1280, 768)': 36},
'language': {u'ua_uk': 28, u'ca_en': 12, u'ca_fr': 20, u'us_en': 36}, 'total_duration': 234,
'number_of_visits': 1634, 'number_of_pageviews': 0, 'country': {u'ca': 12, u'fr': 20, u'uk': 28, u'us': 36},
'os': {u'os_3': 198, u'os_2': 198, u'os_1': 198, u'os_0': 198, u'os_4': 198},
'browser': {u'browser_4': 198, u'browser_0': 198, u'browser_1': 198, u'browser_2': 198, u'browser_3': 198}},
'domain': u'domain_name_0', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_02 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 119,
'number_of_visits': 1807, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 177, u'os_2': 177, u'os_1': 177, u'os_0': 177, u'os_4': 177},
'browser': {u'browser_4': 177, u'browser_0': 177, u'browser_1': 177, u'browser_2': 177, u'browser_3': 177}},
'domain': u'domain_name_26', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_03 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 152,
'number_of_visits': 2234, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 186, u'os_2': 186, u'os_1': 186, u'os_0': 186, u'os_4': 186},
'browser': {u'browser_4': 186, u'browser_0': 186, u'browser_1': 186, u'browser_2': 186, u'browser_3': 186}},
'domain': u'domain_name_29', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_04 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 168,
'number_of_visits': 1856, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 120, u'os_2': 120, u'os_1': 120, u'os_0': 120, u'os_4': 120},
'browser': {u'browser_4': 120, u'browser_0': 120, u'browser_1': 120, u'browser_2': 120, u'browser_3': 120}},
'domain': u'domain_name_7', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_05 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 17,
'number_of_visits': 1404, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 180, u'os_2': 180, u'os_1': 180, u'os_0': 180, u'os_4': 180},
'browser': {u'browser_4': 180, u'browser_0': 180, u'browser_1': 180, u'browser_2': 180, u'browser_3': 180}},
'domain': u'domain_name_27', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_06 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 222,
'number_of_visits': 1247, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 165, u'os_2': 165, u'os_1': 165, u'os_0': 165, u'os_4': 165},
'browser': {u'browser_4': 165, u'browser_0': 165, u'browser_1': 165, u'browser_2': 165, u'browser_3': 165}},
'domain': u'domain_name_22', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_07 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 241,
'number_of_visits': 1617, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 108, u'os_2': 108, u'os_1': 108, u'os_0': 108, u'os_4': 108},
'browser': {u'browser_4': 108, u'browser_0': 108, u'browser_1': 108, u'browser_2': 108, u'browser_3': 108}},
'domain': u'domain_name_3', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_08 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 126,
'number_of_visits': 1424, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 123, u'os_2': 123, u'os_1': 123, u'os_0': 123, u'os_4': 123},
'browser': {u'browser_4': 123, u'browser_0': 123, u'browser_1': 123, u'browser_2': 123, u'browser_3': 123}},
'domain': u'domain_name_8', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_09 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 133,
'number_of_visits': 1896, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 114, u'os_2': 114, u'os_1': 114, u'os_0': 114, u'os_4': 114},
'browser': {u'browser_4': 114, u'browser_0': 114, u'browser_1': 114, u'browser_2': 114, u'browser_3': 114}},
'domain': u'domain_name_5', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_10 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 154,
'number_of_visits': 2550, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 147, u'os_2': 147, u'os_1': 147, u'os_0': 147, u'os_4': 147},
'browser': {u'browser_4': 147, u'browser_0': 147, u'browser_1': 147, u'browser_2': 147, u'browser_3': 147}},
'domain': u'domain_name_16', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_11 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 214,
'number_of_visits': 1927, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 162, u'os_2': 162, u'os_1': 162, u'os_0': 162, u'os_4': 162},
'browser': {u'browser_4': 162, u'browser_0': 162, u'browser_1': 162, u'browser_2': 162, u'browser_3': 162}},
'domain': u'domain_name_21', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_12 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 190,
'number_of_visits': 1312, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 156, u'os_2': 156, u'os_1': 156, u'os_0': 156, u'os_4': 156},
'browser': {u'browser_4': 156, u'browser_0': 156, u'browser_1': 156, u'browser_2': 156, u'browser_3': 156}},
'domain': u'domain_name_19', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_13 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 168,
'number_of_visits': 1307, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 111, u'os_2': 111, u'os_1': 111, u'os_0': 111, u'os_4': 111},
'browser': {u'browser_4': 111, u'browser_0': 111, u'browser_1': 111, u'browser_2': 111, u'browser_3': 111}},
'domain': u'domain_name_4', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_14 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 111,
'number_of_visits': 2467, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 189, u'os_2': 189, u'os_1': 189, u'os_0': 189, u'os_4': 189},
'browser': {u'browser_4': 189, u'browser_0': 189, u'browser_1': 189, u'browser_2': 189, u'browser_3': 189}},
'domain': u'domain_name_30', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_15 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 158,
'number_of_visits': 1061, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 132, u'os_2': 132, u'os_1': 132, u'os_0': 132, u'os_4': 132},
'browser': {u'browser_4': 132, u'browser_0': 132, u'browser_1': 132, u'browser_2': 132, u'browser_3': 132}},
'domain': u'domain_name_11', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_16 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 121,
'number_of_visits': 1258, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 117, u'os_2': 117, u'os_1': 117, u'os_0': 117, u'os_4': 117},
'browser': {u'browser_4': 117, u'browser_0': 117, u'browser_1': 117, u'browser_2': 117, u'browser_3': 117}},
'domain': u'domain_name_6', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_17 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 211,
'number_of_visits': 1796, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 144, u'os_2': 144, u'os_1': 144, u'os_0': 144, u'os_4': 144},
'browser': {u'browser_4': 144, u'browser_0': 144, u'browser_1': 144, u'browser_2': 144, u'browser_3': 144}},
'domain': u'domain_name_15', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_18 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 22,
'number_of_visits': 834, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 138, u'os_2': 138, u'os_1': 138, u'os_0': 138, u'os_4': 138},
'browser': {u'browser_4': 138, u'browser_0': 138, u'browser_1': 138, u'browser_2': 138, u'browser_3': 138}},
'domain': u'domain_name_13', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_19 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 222,
'number_of_visits': 1924, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 105, u'os_2': 105, u'os_1': 105, u'os_0': 105, u'os_4': 105},
'browser': {u'browser_4': 105, u'browser_0': 105, u'browser_1': 105, u'browser_2': 105, u'browser_3': 105}},
'domain': u'domain_name_2', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_20 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 81,
'number_of_visits': 1595, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 126, u'os_2': 126, u'os_1': 126, u'os_0': 126, u'os_4': 126},
'browser': {u'browser_4': 126, u'browser_0': 126, u'browser_1': 126, u'browser_2': 126, u'browser_3': 126}},
'domain': u'domain_name_9', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_21 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 140,
'number_of_visits': 486, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 135, u'os_2': 135, u'os_1': 135, u'os_0': 135, u'os_4': 135},
'browser': {u'browser_4': 135, u'browser_0': 135, u'browser_1': 135, u'browser_2': 135, u'browser_3': 135}},
'domain': u'domain_name_12', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_22 = {
'stat': {'screen_resolution': {u'(320, 240)': 12, u'(640, 480)': 20, u'(1024, 960)': 28, u'(1280, 768)': 36},
'language': {u'ua_uk': 28, u'ca_en': 12, u'ca_fr': 20, u'us_en': 36}, 'total_duration': 184,
'number_of_visits': 2102, 'number_of_pageviews': 0, 'country': {u'ca': 12, u'fr': 20, u'uk': 28, u'us': 36},
'os': {u'os_3': 202, u'os_2': 202, u'os_1': 202, u'os_0': 202, u'os_4': 202},
'browser': {u'browser_4': 202, u'browser_0': 202, u'browser_1': 202, u'browser_2': 202, u'browser_3': 202}},
'domain': u'domain_name_1', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_23 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 101,
'number_of_visits': 792, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 153, u'os_2': 153, u'os_1': 153, u'os_0': 153, u'os_4': 153},
'browser': {u'browser_4': 153, u'browser_0': 153, u'browser_1': 153, u'browser_2': 153, u'browser_3': 153}},
'domain': u'domain_name_18', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_24 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 132,
'number_of_visits': 1719, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 192, u'os_2': 192, u'os_1': 192, u'os_0': 192, u'os_4': 192},
'browser': {u'browser_4': 192, u'browser_0': 192, u'browser_1': 192, u'browser_2': 192, u'browser_3': 192}},
'domain': u'domain_name_31', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_25 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 78,
'number_of_visits': 666, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 129, u'os_2': 129, u'os_1': 129, u'os_0': 129, u'os_4': 129},
'browser': {u'browser_4': 129, u'browser_0': 129, u'browser_1': 129, u'browser_2': 129, u'browser_3': 129}},
'domain': u'domain_name_10', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_26 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 174,
'number_of_visits': 1993, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 171, u'os_2': 171, u'os_1': 171, u'os_0': 171, u'os_4': 171},
'browser': {u'browser_4': 171, u'browser_0': 171, u'browser_1': 171, u'browser_2': 171, u'browser_3': 171}},
'domain': u'domain_name_24', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_27 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 159,
'number_of_visits': 1053, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 141, u'os_2': 141, u'os_1': 141, u'os_0': 141, u'os_4': 141},
'browser': {u'browser_4': 141, u'browser_0': 141, u'browser_1': 141, u'browser_2': 141, u'browser_3': 141}},
'domain': u'domain_name_14', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_28 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 165,
'number_of_visits': 1812, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 195, u'os_2': 195, u'os_1': 195, u'os_0': 195, u'os_4': 195},
'browser': {u'browser_4': 195, u'browser_0': 195, u'browser_1': 195, u'browser_2': 195, u'browser_3': 195}},
'domain': u'domain_name_32', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_29 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 165,
'number_of_visits': 887, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 150, u'os_2': 150, u'os_1': 150, u'os_0': 150, u'os_4': 150},
'browser': {u'browser_4': 150, u'browser_0': 150, u'browser_1': 150, u'browser_2': 150, u'browser_3': 150}},
'domain': u'domain_name_17', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_30 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 236,
'number_of_visits': 1722, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 183, u'os_2': 183, u'os_1': 183, u'os_0': 183, u'os_4': 183},
'browser': {u'browser_4': 183, u'browser_0': 183, u'browser_1': 183, u'browser_2': 183, u'browser_3': 183}},
'domain': u'domain_name_28', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_31 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 202,
'number_of_visits': 1446, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 168, u'os_2': 168, u'os_1': 168, u'os_0': 168, u'os_4': 168},
'browser': {u'browser_4': 168, u'browser_0': 168, u'browser_1': 168, u'browser_2': 168, u'browser_3': 168}},
'domain': u'domain_name_23', 'timeperiod': '2001000000'}
EXPECTED_SITE_YEARLY_32 = {
'stat': {'screen_resolution': {u'(320, 240)': 9, u'(640, 480)': 15, u'(1024, 960)': 21, u'(1280, 768)': 27},
'language': {u'ua_uk': 21, u'ca_en': 9, u'ca_fr': 15, u'us_en': 27}, 'total_duration': 132,
'number_of_visits': 1660, 'number_of_pageviews': 0, 'country': {u'ca': 9, u'fr': 15, u'uk': 21, u'us': 27},
'os': {u'os_3': 174, u'os_2': 174, u'os_1': 174, u'os_0': 174, u'os_4': 174},
'browser': {u'browser_4': 174, u'browser_0': 174, u'browser_1': 174, u'browser_2': 174, u'browser_3': 174}},
'domain': u'domain_name_25', 'timeperiod': '2001000000'}
# pylint: enable=C0301
def generated_site_entries():
return base_fixtures.create_site_stats(COLLECTION_SITE_YEARLY, QUALIFIER_YEARLY)
def clean_site_entries():
return base_fixtures.clean_site_entries(COLLECTION_SITE_YEARLY, QUALIFIER_YEARLY)
if __name__ == '__main__':
pass | {
"repo_name": "eggsandbeer/scheduler",
"path": "tests/yearly_fixtures.py",
"copies": "1",
"size": "20835",
"license": "bsd-3-clause",
"hash": -1394630649565285000,
"line_mean": 81.6825396825,
"line_max": 117,
"alpha_frac": 0.5444204464,
"autogenerated": false,
"ratio": 2.3420638489208634,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.33864842953208635,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from constants import *
from settings import ENVIRONMENT
from synergy.system.time_qualifier import *
from synergy.scheduler.scheduler_constants import *
from synergy.workers.worker_constants import *
from synergy.db.model.queue_context_entry import queue_context_entry
from synergy.db.model.daemon_process_entry import daemon_context_entry
from synergy.db.model.managed_process_entry import managed_context_entry
from synergy.db.model.timetable_tree_entry import timetable_tree_entry
mq_queue_context = {
QUEUE_REQUESTED_PACKAGES: queue_context_entry(exchange=EXCHANGE_FREERUN_WORKER,
queue_name=QUEUE_REQUESTED_PACKAGES),
}
process_context = {
PROCESS_LAUNCH_PY: daemon_context_entry(
process_name=PROCESS_LAUNCH_PY,
classname='',
token=TOKEN_LAUNCH_PY,
routing=ROUTING_IRRELEVANT,
exchange=EXCHANGE_UTILS),
PROCESS_SITE_DAILY: managed_context_entry(
process_name=PROCESS_SITE_DAILY,
classname='workers.site_daily_aggregator.SiteDailyAggregator.start',
token=TOKEN_SITE,
time_qualifier=QUALIFIER_DAILY,
source=COLLECTION_SITE_HOURLY,
sink=COLLECTION_SITE_DAILY,
state_machine_name=STATE_MACHINE_DISCRETE,
blocking_type=BLOCKING_CHILDREN,
run_on_active_timeperiod=True,
trigger_frequency='every 900',
present_on_boxes=['dev.*']),
PROCESS_SITE_HOURLY: managed_context_entry(
process_name=PROCESS_SITE_HOURLY,
classname='workers.site_hourly_aggregator.SiteHourlyAggregator.start',
token=TOKEN_SITE,
time_qualifier=QUALIFIER_HOURLY,
source=COLLECTION_SINGLE_SESSION,
sink=COLLECTION_SITE_HOURLY,
state_machine_name=STATE_MACHINE_CONTINUOUS,
blocking_type=BLOCKING_NORMAL,
trigger_frequency='every 60',
present_on_boxes=['dev.*']),
PROCESS_SITE_MONTHLY: managed_context_entry(
process_name=PROCESS_SITE_MONTHLY,
classname='workers.site_monthly_aggregator.SiteMonthlyAggregator.start',
token=TOKEN_SITE,
time_qualifier=QUALIFIER_MONTHLY,
source=COLLECTION_SITE_DAILY,
sink=COLLECTION_SITE_MONTHLY,
state_machine_name=STATE_MACHINE_DISCRETE,
blocking_type=BLOCKING_CHILDREN,
trigger_frequency='every 10800',
present_on_boxes=['dev.*']),
PROCESS_SITE_YEARLY: managed_context_entry(
process_name=PROCESS_SITE_YEARLY,
classname='workers.site_yearly_aggregator.SiteYearlyAggregator.start',
token=TOKEN_SITE,
time_qualifier=QUALIFIER_YEARLY,
source=COLLECTION_SITE_MONTHLY,
sink=COLLECTION_SITE_YEARLY,
state_machine_name=STATE_MACHINE_DISCRETE,
blocking_type=BLOCKING_CHILDREN,
trigger_frequency='every 21600',
present_on_boxes=['dev.*']),
PROCESS_SESSION_WORKER_00: daemon_context_entry(
process_name=PROCESS_SESSION_WORKER_00,
classname='workers.single_session_worker.SingleSessionWorker.start',
token=TOKEN_SESSION,
queue=QUEUE_RAW_DATA,
routing=ROUTING_IRRELEVANT,
exchange=EXCHANGE_RAW_DATA,
pid_file='session_worker_00.pid',
log_file='session_worker_00.log',
present_on_boxes=['dev.*']),
PROCESS_STREAM_GEN: daemon_context_entry(
process_name=PROCESS_STREAM_GEN,
classname='workers.event_stream_generator.EventStreamGenerator.start',
token=TOKEN_STREAM,
queue=QUEUE_RAW_DATA,
routing=ROUTING_IRRELEVANT,
exchange=EXCHANGE_RAW_DATA,
present_on_boxes=['dev.*']),
PROCESS_CLIENT_DAILY: managed_context_entry(
process_name=PROCESS_CLIENT_DAILY,
classname='workers.client_daily_aggregator.ClientDailyAggregator.start',
token=TOKEN_CLIENT,
time_qualifier=QUALIFIER_DAILY,
state_machine_name=STATE_MACHINE_SIMPLE_DISCRETE,
blocking_type=BLOCKING_DEPENDENCIES,
trigger_frequency='every 10800',
present_on_boxes=['dev.*']),
PROCESS_CLIENT_MONTHLY: managed_context_entry(
process_name=PROCESS_CLIENT_MONTHLY,
classname='workers.client_monthly_aggregator.ClientMonthlyAggregator.start',
token=TOKEN_CLIENT,
time_qualifier=QUALIFIER_MONTHLY,
state_machine_name=STATE_MACHINE_SIMPLE_DISCRETE,
blocking_type=BLOCKING_CHILDREN,
trigger_frequency='every 21600',
present_on_boxes=['dev.*']),
PROCESS_CLIENT_YEARLY: managed_context_entry(
process_name=PROCESS_CLIENT_YEARLY,
classname='workers.client_yearly_aggregator.ClientYearlyAggregator.start',
token=TOKEN_CLIENT,
time_qualifier=QUALIFIER_YEARLY,
state_machine_name=STATE_MACHINE_SIMPLE_DISCRETE,
blocking_type=BLOCKING_CHILDREN,
trigger_frequency='every 43200',
present_on_boxes=['dev.*']),
PROCESS_ALERT_DAILY: managed_context_entry(
process_name=PROCESS_ALERT_DAILY,
classname='workers.alert_daily_worker.AlertDailyWorker.start',
token=TOKEN_ALERT,
time_qualifier=QUALIFIER_DAILY,
state_machine_name=STATE_MACHINE_SIMPLE_DISCRETE,
blocking_type=BLOCKING_DEPENDENCIES,
trigger_frequency='every 21600',
present_on_boxes=['dev.*']),
PROCESS_BASH_DRIVER: daemon_context_entry(
process_name=PROCESS_BASH_DRIVER,
classname='workers.bash_driver.BashDriver.start',
token=TOKEN_BASH_DRIVER,
exchange=EXCHANGE_FREERUN_WORKER,
process_type=TYPE_FREERUN,
present_on_boxes=['dev.*']),
}
timetable_context = {
TREE_SITE_VERTICAL: timetable_tree_entry(
tree_name=TREE_SITE_VERTICAL,
enclosed_processes=[PROCESS_SITE_YEARLY, PROCESS_SITE_MONTHLY, PROCESS_SITE_DAILY, PROCESS_SITE_HOURLY],
dependent_on=[],
mx_name=TOKEN_SITE,
mx_page=MX_PAGE_TRAFFIC),
TREE_CLIENT_HORIZONTAL: timetable_tree_entry(
tree_name=TREE_CLIENT_HORIZONTAL,
enclosed_processes=[PROCESS_CLIENT_YEARLY, PROCESS_CLIENT_MONTHLY, PROCESS_CLIENT_DAILY],
dependent_on=[TREE_SITE_VERTICAL],
mx_name=TOKEN_CLIENT,
mx_page=MX_PAGE_TRAFFIC),
TREE_LINEAR_DAILY: timetable_tree_entry(
tree_name=TREE_LINEAR_DAILY,
enclosed_processes=[PROCESS_ALERT_DAILY],
dependent_on=[],
mx_name=TOKEN_ALERT,
mx_page=MX_PAGE_ALERT)
}
# Update current dict with the environment-specific settings
try:
overrides = __import__('context_' + ENVIRONMENT)
process_context.update(overrides.process_context)
except:
pass
| {
"repo_name": "eggsandbeer/scheduler",
"path": "context.py",
"copies": "1",
"size": "6679",
"license": "bsd-3-clause",
"hash": -465940350792028000,
"line_mean": 37.1657142857,
"line_max": 112,
"alpha_frac": 0.6779458003,
"autogenerated": false,
"ratio": 3.543236074270557,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.4721181874570557,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from datetime import datetime
from logging import ERROR, WARNING, INFO
from synergy.conf import context
from synergy.db.dao.freerun_process_dao import FreerunProcessDao
from synergy.db.dao.unit_of_work_dao import UnitOfWorkDao
from synergy.db.error import DuplicateKeyError
from synergy.db.model import unit_of_work
from synergy.db.model.freerun_process_entry import FreerunProcessEntry, MAX_NUMBER_OF_EVENTS
from synergy.db.model.unit_of_work import UnitOfWork
from synergy.scheduler.scheduler_constants import STATE_MACHINE_FREERUN
from synergy.system import time_helper
from synergy.system.decorator import with_reconnect
from synergy.system.mq_transmitter import MqTransmitter
from synergy.system.time_qualifier import QUALIFIER_REAL_TIME
class StateMachineFreerun(object):
""" State Machine to handle freerun jobs/triggers """
def __init__(self, logger, name=STATE_MACHINE_FREERUN):
self.name = name
self.logger = logger
self.mq_transmitter = MqTransmitter(self.logger)
self.uow_dao = UnitOfWorkDao(self.logger)
self.freerun_process_dao = FreerunProcessDao(self.logger)
@with_reconnect
def _log_message(self, level, freerun_entry, msg):
""" method performs logging into log file and the freerun_entry """
self.logger.log(level, msg)
assert isinstance(freerun_entry, FreerunProcessEntry)
event_log = freerun_entry.event_log
if len(event_log) > MAX_NUMBER_OF_EVENTS:
del event_log[-1]
event_log.insert(0, msg)
self.freerun_process_dao.update(freerun_entry)
@with_reconnect
def _insert_uow(self, freerun_entry, flow_request=None):
""" creates unit_of_work and inserts it into the DB
:raise DuplicateKeyError: if unit_of_work with given parameters already exists """
process_entry = context.process_context[freerun_entry.process_name]
arguments = process_entry.arguments
arguments.update(freerun_entry.arguments)
if flow_request:
schedulable_name = flow_request.schedulable_name
timeperiod = flow_request.timeperiod
start_timeperiod = flow_request.start_timeperiod
end_timeperiod = flow_request.end_timeperiod
arguments.update(flow_request.arguments)
else:
schedulable_name = freerun_entry.schedulable_name
timeperiod = time_helper.actual_timeperiod(QUALIFIER_REAL_TIME)
start_timeperiod = timeperiod
end_timeperiod = timeperiod
uow = UnitOfWork()
uow.process_name = schedulable_name
uow.timeperiod = timeperiod
uow.start_id = 0
uow.end_id = 0
uow.start_timeperiod = start_timeperiod
uow.end_timeperiod = end_timeperiod
uow.created_at = datetime.utcnow()
uow.submitted_at = datetime.utcnow()
uow.source = process_entry.source if hasattr(process_entry, 'source') else None
uow.sink = process_entry.sink if hasattr(process_entry, 'sink') else None
uow.state = unit_of_work.STATE_REQUESTED
uow.unit_of_work_type = unit_of_work.TYPE_FREERUN
uow.number_of_retries = 0
uow.arguments = arguments
uow.db_id = self.uow_dao.insert(uow)
msg = 'Created: UOW {0} for {1}@{2}.' \
.format(uow.db_id, freerun_entry.schedulable_name, timeperiod)
self._log_message(INFO, freerun_entry, msg)
return uow
def _publish_uow(self, freerun_entry, uow):
self.mq_transmitter.publish_freerun_uow(freerun_entry, uow)
msg = f'Published: UOW {uow.db_id} for {freerun_entry.schedulable_name}.'
self._log_message(INFO, freerun_entry, msg)
def _reset_flow_uow(self, freerun_entry, uow, flow_request):
""" there can be multiple freeruns for a single combination of workflow+step+timeperiod
hence, we have to *recycle* finished UOW """
process_entry = context.process_context[freerun_entry.process_name]
arguments = process_entry.arguments
arguments.update(freerun_entry.arguments)
arguments.update(flow_request.arguments)
uow.created_at = datetime.utcnow()
uow.submitted_at = datetime.utcnow()
uow.state = unit_of_work.STATE_REQUESTED
uow.unit_of_work_type = unit_of_work.TYPE_FREERUN
del uow.started_at
del uow.finished_at
del uow.number_of_aggregated_documents
del uow.number_of_processed_documents
uow.number_of_retries = 0
uow.arguments = arguments
self.uow_dao.update(uow)
def insert_and_publish_uow(self, freerun_entry, flow_request=None, reset_uow=False):
try:
uow = self._insert_uow(freerun_entry, flow_request)
except DuplicateKeyError as e:
msg = f'Duplication of UOW found for {freerun_entry.schedulable_name}. Error msg: {e}'
self._log_message(WARNING, freerun_entry, msg)
uow = self.uow_dao.recover_from_duplicatekeyerror(e)
if flow_request and reset_uow:
self._reset_flow_uow(freerun_entry, uow, flow_request)
if uow is not None:
# publish the created/recovered/recycled unit_of_work
self._publish_uow(freerun_entry, uow)
freerun_entry.related_unit_of_work = uow.db_id
if not flow_request:
# FreerunProcessEntry for workflows are runtime-only objects
# skip persistence update if this is a workflow request
self.freerun_process_dao.update(freerun_entry)
else:
msg = f'PERSISTENT TIER ERROR! Unable to locate UOW for {freerun_entry.schedulable_name}'
self._log_message(WARNING, freerun_entry, msg)
def _process_state_embryo(self, freerun_entry, flow_request=None):
""" method creates unit_of_work and associates it with the FreerunProcessEntry """
self.insert_and_publish_uow(freerun_entry, flow_request)
def _process_state_in_progress(self, freerun_entry, uow):
""" method that takes care of processing unit_of_work records in STATE_REQUESTED or STATE_IN_PROGRESS states"""
self._publish_uow(freerun_entry, uow)
def _process_terminal_state(self, freerun_entry, uow, flow_request=None):
""" method that takes care of processing unit_of_work records in
STATE_PROCESSED, STATE_NOOP, STATE_INVALID, STATE_CANCELED states"""
msg = f'UOW for {freerun_entry.schedulable_name} found in state {uow.state}.'
self._log_message(INFO, freerun_entry, msg)
self.insert_and_publish_uow(freerun_entry, flow_request, reset_uow=True)
def manage_schedulable(self, freerun_entry: FreerunProcessEntry, flow_request=None):
""" method main duty - is to _avoid_ publishing another unit_of_work, if previous was not yet processed
In case the Scheduler sees that the unit_of_work is pending,
it will issue new WorkerMqRequest for the same UOW """
uow = None
if freerun_entry.related_unit_of_work:
uow = self.uow_dao.get_one(freerun_entry.related_unit_of_work)
try:
if uow is None:
self._process_state_embryo(freerun_entry, flow_request)
elif uow.is_requested or uow.is_in_progress:
self._process_state_in_progress(freerun_entry, uow)
elif uow.is_finished or uow.is_invalid:
self._process_terminal_state(freerun_entry, uow, flow_request)
else:
msg = f'Unknown state {uow.state} of the UOW {uow.db_id}'
self._log_message(ERROR, freerun_entry, msg)
except LookupError as e:
msg = f'Lookup issue for schedulable: {freerun_entry.db_id} in timeperiod {uow.timeperiod}, because of: {e}'
self._log_message(WARNING, freerun_entry, msg)
def cancel_uow(self, freerun_entry):
uow_id = freerun_entry.related_unit_of_work
if uow_id is None:
msg = f'cancel_uow: no related UOW for {freerun_entry.schedulable_name}'
else:
uow = self.uow_dao.get_one(uow_id)
uow.state = unit_of_work.STATE_CANCELED
self.uow_dao.update(uow)
msg = f'cancel_uow: canceled UOW {uow_id} for {freerun_entry.schedulable_name}'
self._log_message(INFO, freerun_entry, msg)
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/scheduler/state_machine_freerun.py",
"copies": "1",
"size": "8423",
"license": "bsd-3-clause",
"hash": -5388459291552159000,
"line_mean": 44.777173913,
"line_max": 120,
"alpha_frac": 0.660216075,
"autogenerated": false,
"ratio": 3.461981093300452,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4622197168300452,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from datetime import datetime
from logging import ERROR, WARNING, INFO
from synergy.conf import context
from synergy.db.error import DuplicateKeyError
from synergy.db.model import unit_of_work
from synergy.db.model.unit_of_work import UnitOfWork
from synergy.db.model.synergy_mq_transmission import SynergyMqTransmission
from synergy.db.model.freerun_process_entry import FreerunProcessEntry, MAX_NUMBER_OF_LOG_ENTRIES
from synergy.db.dao.unit_of_work_dao import UnitOfWorkDao
from synergy.db.dao.freerun_process_dao import FreerunProcessDao
from synergy.system import time_helper
from synergy.system.time_qualifier import QUALIFIER_REAL_TIME
from synergy.system.decorator import with_reconnect
from synergy.scheduler.scheduler_constants import STATE_MACHINE_FREERUN, TYPE_FREERUN
from synergy.mq.flopsy import PublishersPool
class StateMachineFreerun(object):
""" State Machine to handle freerun jobs/triggers """
def __init__(self, logger, name=STATE_MACHINE_FREERUN):
self.name = name
self.logger = logger
self.publishers = PublishersPool(self.logger)
self.uow_dao = UnitOfWorkDao(self.logger)
self.sfe_dao = FreerunProcessDao(self.logger)
def __del__(self):
try:
self.logger.info('Closing Flopsy Publishers Pool...')
self.publishers.close()
except Exception as e:
self.logger.error('Exception caught while closing Flopsy Publishers Pool: %s' % str(e))
@with_reconnect
def _log_message(self, level, freerun_entry, msg):
""" method performs logging into log file and the freerun_entry """
self.logger.log(level, msg)
assert isinstance(freerun_entry, FreerunProcessEntry)
log = freerun_entry.log
if len(log) > MAX_NUMBER_OF_LOG_ENTRIES:
del log[-1]
log.insert(0, msg)
self.sfe_dao.update(freerun_entry)
@with_reconnect
def _insert_uow(self, freerun_entry):
""" creates unit_of_work and inserts it into the DB
:raise DuplicateKeyError: if unit_of_work with given parameters already exists """
current_timeperiod = time_helper.actual_timeperiod(QUALIFIER_REAL_TIME)
uow = UnitOfWork()
uow.process_name = freerun_entry.schedulable_name
uow.timeperiod = current_timeperiod
uow.start_id = 0
uow.end_id = 0
uow.start_timeperiod = current_timeperiod
uow.end_timeperiod = current_timeperiod
uow.created_at = datetime.utcnow()
uow.source = context.process_context[freerun_entry.process_name].source
uow.sink = context.process_context[freerun_entry.process_name].sink
uow.state = unit_of_work.STATE_REQUESTED
uow.unit_of_work_type = TYPE_FREERUN
uow.number_of_retries = 0
uow.arguments = freerun_entry.arguments
uow.db_id = self.uow_dao.insert(uow)
msg = 'Created: UOW %s for %s in timeperiod %s.' \
% (uow.db_id, freerun_entry.schedulable_name, current_timeperiod)
self._log_message(INFO, freerun_entry, msg)
return uow
def _publish_uow(self, freerun_entry, uow):
mq_request = SynergyMqTransmission(process_name=freerun_entry.process_name,
entry_name=freerun_entry.entry_name,
unit_of_work_id=uow.db_id)
publisher = self.publishers.get(freerun_entry.process_name)
publisher.publish(mq_request.document)
publisher.release()
msg = 'Published: UOW %s for %s.' % (uow.db_id, freerun_entry.schedulable_name)
self._log_message(INFO, freerun_entry, msg)
def insert_and_publish_uow(self, freerun_entry):
try:
uow = self._insert_uow(freerun_entry)
except DuplicateKeyError as e:
msg = 'Duplication of unit_of_work found for %s. Error msg: %r' % (freerun_entry.schedulable_name, e)
self._log_message(WARNING, freerun_entry, msg)
uow = self.uow_dao.recover_from_duplicatekeyerror(e)
if uow is not None:
# publish the created/caught up unit_of_work
self._publish_uow(freerun_entry, uow)
freerun_entry.related_unit_of_work = uow.db_id
self.sfe_dao.update(freerun_entry)
else:
msg = 'SYSTEM IS LIKELY IN UNSTABLE STATE! Unable to locate unit_of_work for %s' \
% freerun_entry.schedulable_name
self._log_message(WARNING, freerun_entry, msg)
def manage_schedulable(self, freerun_entry):
""" method main duty - is to _avoid_ publishing another unit_of_work, if previous was not yet processed
In case the Scheduler sees that the unit_of_work is pending it will fire another WorkerMqRequest """
assert isinstance(freerun_entry, FreerunProcessEntry)
if freerun_entry.related_unit_of_work is None:
uow = None
else:
uow = self.uow_dao.get_one(freerun_entry.related_unit_of_work)
try:
if uow is None:
self._process_state_embryo(freerun_entry)
elif uow.is_requested or uow.is_in_progress:
self._process_state_in_progress(freerun_entry, uow)
elif uow.is_finished or uow.is_invalid:
self._process_terminal_state(freerun_entry, uow)
else:
msg = 'Unknown state %s of the unit_of_work %s' % (uow.state, uow.db_id)
self._log_message(ERROR, freerun_entry, msg)
except LookupError as e:
msg = 'Lookup issue for schedulable: %r in timeperiod %s, because of: %r' \
% (freerun_entry.db_id, uow.timeperiod, e)
self._log_message(WARNING, freerun_entry, msg)
def _process_state_embryo(self, freerun_entry):
""" method creates unit_of_work and associates it with the FreerunProcessEntry """
self.insert_and_publish_uow(freerun_entry)
def _process_state_in_progress(self, freerun_entry, uow):
""" method that takes care of processing unit_of_work records in STATE_REQUESTED or STATE_IN_PROGRESS states"""
self._publish_uow(freerun_entry, uow)
def _process_terminal_state(self, freerun_entry, uow):
""" method that takes care of processing unit_of_work records in
STATE_PROCESSED, STATE_INVALID, STATE_CANCELED states"""
msg = 'unit_of_work for %s found in %s state.' % (freerun_entry.schedulable_name, uow.state)
self._log_message(INFO, freerun_entry, msg)
self.insert_and_publish_uow(freerun_entry)
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/scheduler/state_machine_freerun.py",
"copies": "1",
"size": "6635",
"license": "bsd-3-clause",
"hash": -1499173110432158000,
"line_mean": 43.5302013423,
"line_max": 119,
"alpha_frac": 0.6532027129,
"autogenerated": false,
"ratio": 3.4611371935315596,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46143399064315593,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from datetime import datetime
from logging import ERROR, WARNING, INFO
from synergy.db.model import job
from synergy.db.model.job import Job
from synergy.db.model.unit_of_work import UnitOfWork
from synergy.db.manager import ds_manager
from synergy.conf import context
from synergy.system.decorator import with_reconnect
from synergy.system import time_helper
from synergy.scheduler.scheduler_constants import STATE_MACHINE_CONTINUOUS
from synergy.scheduler.abstract_state_machine import AbstractStateMachine
class StateMachineContinuous(AbstractStateMachine):
""" Continuous State Machine re-run process for timeperiod A until A+1,
then transfers the timeperiod A to STATE_FINAL_RUN """
def __init__(self, logger, timetable):
super(StateMachineContinuous, self).__init__(logger, timetable, name=STATE_MACHINE_CONTINUOUS)
self.ds = ds_manager.ds_factory(self.logger)
def __del__(self):
super(StateMachineContinuous, self).__del__()
def shallow_state_update(self, uow):
tree = self.timetable.get_tree(uow.process_name)
node = tree.get_node(uow.process_name, uow.timeperiod)
job_record = node.job_record
assert isinstance(job_record, Job)
if not job_record.is_final_run:
self.logger.info('Can not perform shallow status update for %s in timeperiod %s '
'since the job state is not STATE_FINAL_RUN' % (uow.process_name, uow.timeperiod))
return
self._process_state_final_run(job_record)
@with_reconnect
def update_scope_of_processing(self, process_name, uow, start_timeperiod, end_timeperiod):
"""method reads collection and refine slice upper bound for processing"""
source_collection_name = uow.source
last_object_id = self.ds.lowest_primary_key(source_collection_name, start_timeperiod, end_timeperiod)
uow.end_id = str(last_object_id)
self.uow_dao.update(uow)
msg = 'Updated range to process for %s in timeperiod %s for collection %s: [%s : %s]' \
% (process_name, start_timeperiod, source_collection_name,
uow.start_id, str(last_object_id))
self._log_message(INFO, process_name, start_timeperiod, msg)
def _compute_and_transfer_to_progress(self, process_name, start_timeperiod, end_timeperiod, job_record):
""" method computes new unit_of_work for job record in STATE_IN_PROGRESS
it also contains _fuzzy_ logic regard the DuplicateKeyError:
- we try to compute new scope of processing
- in case we face DuplicateKeyError, we try to recover from it by reading existing unit_of_work from DB:
-- in case unit_of_work can be located - we update job record and proceed normally
-- in case unit_of_work can not be located (what is equal to fatal data corruption) - we log exception and
ask/expect manual intervention to resolve the corruption"""
source_collection_name = context.process_context[process_name].source
start_id = self.ds.highest_primary_key(source_collection_name, start_timeperiod, end_timeperiod)
end_id = self.ds.lowest_primary_key(source_collection_name, start_timeperiod, end_timeperiod)
uow, is_duplicate = self.insert_and_publish_uow(process_name,
start_timeperiod,
end_timeperiod,
start_id,
end_id)
self.timetable.update_job_record(job_record, uow, job.STATE_IN_PROGRESS)
def _compute_and_transfer_to_final_run(self, process_name, start_timeperiod, end_timeperiod, job_record):
""" method computes new unit_of_work and transfers the job to STATE_FINAL_RUN
it also shares _fuzzy_ DuplicateKeyError logic from _compute_and_transfer_to_progress method"""
source_collection_name = context.process_context[process_name].source
start_id = self.ds.highest_primary_key(source_collection_name, start_timeperiod, end_timeperiod)
end_id = self.ds.lowest_primary_key(source_collection_name, start_timeperiod, end_timeperiod)
uow, transfer_to_final = self.insert_and_publish_uow(process_name,
start_timeperiod,
end_timeperiod,
start_id,
end_id)
self.timetable.update_job_record(job_record, uow, job.STATE_FINAL_RUN)
if transfer_to_final:
self._process_state_final_run(job_record)
def _process_state_embryo(self, job_record):
""" method that takes care of processing job records in STATE_EMBRYO state"""
time_qualifier = context.process_context[job_record.process_name].time_qualifier
end_timeperiod = time_helper.increment_timeperiod(time_qualifier, job_record.timeperiod)
self._compute_and_transfer_to_progress(job_record.process_name, job_record.timeperiod,
end_timeperiod, job_record)
def _process_state_in_progress(self, job_record):
""" method that takes care of processing job records in STATE_IN_PROGRESS state"""
time_qualifier = context.process_context[job_record.process_name].time_qualifier
end_timeperiod = time_helper.increment_timeperiod(time_qualifier, job_record.timeperiod)
actual_timeperiod = time_helper.actual_timeperiod(time_qualifier)
is_job_finalizable = self.timetable.is_job_record_finalizable(job_record)
uow = self.uow_dao.get_one(job_record.related_unit_of_work)
if job_record.timeperiod == actual_timeperiod or is_job_finalizable is False:
if uow.is_invalid or uow.is_requested:
# current uow has not been processed yet. update it
self.update_scope_of_processing(job_record.process_name, uow, job_record.timeperiod, end_timeperiod)
else:
# STATE_IN_PROGRESS, STATE_PROCESSED, STATE_CANCELED
# create new uow to cover new inserts
self._compute_and_transfer_to_progress(job_record.process_name, job_record.timeperiod,
end_timeperiod, job_record)
elif job_record.timeperiod < actual_timeperiod and is_job_finalizable is True:
# create new uow for FINAL RUN
self._compute_and_transfer_to_final_run(job_record.process_name, job_record.timeperiod,
end_timeperiod, job_record)
else:
msg = 'job record %s has timeperiod from future %s vs current time %s' \
% (job_record.db_id, job_record.timeperiod, actual_timeperiod)
self._log_message(ERROR, job_record.process_name, job_record.timeperiod, msg)
def _process_state_final_run(self, job_record):
"""method takes care of processing job records in STATE_FINAL_RUN state"""
uow = self.uow_dao.get_one(job_record.related_unit_of_work)
assert isinstance(uow, UnitOfWork)
if uow.is_processed:
self.timetable.update_job_record(job_record, uow, job.STATE_PROCESSED)
elif uow.is_canceled:
self.timetable.update_job_record(job_record, uow, job.STATE_SKIPPED)
else:
msg = 'Suppressed creating uow for %s in timeperiod %s; job record is in %s; uow is in %s' \
% (job_record.process_name, job_record.timeperiod, job_record.state, uow.state)
self._log_message(INFO, job_record.process_name, job_record.timeperiod, msg)
timetable_tree = self.timetable.get_tree(job_record.process_name)
timetable_tree.build_tree()
def _process_state_skipped(self, job_record):
"""method takes care of processing job records in STATE_SKIPPED state"""
msg = 'Skipping job record %s in timeperiod %s. Apparently its most current timeperiod as of %s UTC' \
% (job_record.db_id, job_record.timeperiod, str(datetime.utcnow()))
self._log_message(WARNING, job_record.process_name, job_record.timeperiod, msg)
def _process_state_processed(self, job_record):
"""method takes care of processing job records in STATE_PROCESSED state"""
msg = 'Unexpected state %s of job record %s' % (job_record.state, job_record.db_id)
self._log_message(ERROR, job_record.process_name, job_record.timeperiod, msg)
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/scheduler/state_machine_continuous.py",
"copies": "1",
"size": "8717",
"license": "bsd-3-clause",
"hash": 6394817306943148000,
"line_mean": 57.8986486486,
"line_max": 116,
"alpha_frac": 0.6425375703,
"autogenerated": false,
"ratio": 3.933664259927798,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0036272676607718157,
"num_lines": 148
} |
__author__ = 'Bohdan Mushkevych'
from datetime import datetime
from logging import ERROR, WARNING, INFO
from synergy.db.model import job
from synergy.scheduler.scheduler_constants import STATE_MACHINE_DISCRETE
from synergy.scheduler.abstract_state_machine import AbstractStateMachine
from synergy.system import time_helper
from synergy.conf import context
class StateMachineDiscrete(AbstractStateMachine):
""" State Machine to handle discrete timeperiod boundaries for batch jobs
in comparison to StateMachineContinuous this one does not re-compute processing boundaries"""
def __init__(self, logger, timetable, name=STATE_MACHINE_DISCRETE):
super(StateMachineDiscrete, self).__init__(logger, timetable, name)
def __del__(self):
super(StateMachineDiscrete, self).__del__()
def shallow_state_update(self, uow):
tree = self.timetable.get_tree(uow.process_name)
node = tree.get_node(uow.process_name, uow.timeperiod)
job_record = node.job_record
if not job_record.is_final_run:
self.logger.info('Can not perform shallow status update for %s in timeperiod %s '
'since the job state is not STATE_FINAL_RUN' % (uow.process_name, uow.timeperiod))
return
self._process_state_final_run(job_record)
def _process_state_embryo(self, job_record):
""" method that takes care of processing job records in STATE_EMBRYO state"""
time_qualifier = context.process_context[job_record.process_name].time_qualifier
end_timeperiod = time_helper.increment_timeperiod(time_qualifier, job_record.timeperiod)
uow, is_duplicate = self.insert_and_publish_uow(job_record.process_name,
job_record.timeperiod,
end_timeperiod,
0,
0)
self.timetable.update_job_record(job_record, uow, job.STATE_IN_PROGRESS)
def _process_state_in_progress(self, job_record):
""" method that takes care of processing job records in STATE_IN_PROGRESS state"""
def _process_state(target_state, uow):
if uow.is_active:
# Large Job processing takes more than 1 tick of the Scheduler
# Let the Job processing complete - do no updates to Scheduler records
pass
elif uow.is_finished:
# create new uow to cover new inserts
new_uow, is_duplicate = self.insert_and_publish_uow(job_record.process_name,
job_record.timeperiod,
end_timeperiod,
0,
iteration + 1)
self.timetable.update_job_record(job_record, new_uow, target_state)
time_qualifier = context.process_context[job_record.process_name].time_qualifier
end_timeperiod = time_helper.increment_timeperiod(time_qualifier, job_record.timeperiod)
actual_timeperiod = time_helper.actual_timeperiod(time_qualifier)
is_job_finalizable = self.timetable.is_job_record_finalizable(job_record)
uow = self.uow_dao.get_one(job_record.related_unit_of_work)
iteration = int(uow.end_id)
if job_record.timeperiod == actual_timeperiod or is_job_finalizable is False:
_process_state(job.STATE_IN_PROGRESS, uow)
elif job_record.timeperiod < actual_timeperiod and is_job_finalizable is True:
_process_state(job.STATE_FINAL_RUN, uow)
else:
msg = 'Job record %s has timeperiod from future %s vs current time %s' \
% (job_record.db_id, job_record.timeperiod, actual_timeperiod)
self._log_message(ERROR, job_record.process_name, job_record.timeperiod, msg)
def _process_state_final_run(self, job_record):
"""method takes care of processing job records in STATE_FINAL_RUN state"""
uow = self.uow_dao.get_one(job_record.related_unit_of_work)
if uow.is_processed:
self.timetable.update_job_record(job_record, uow, job.STATE_PROCESSED)
elif uow.is_canceled:
self.timetable.update_job_record(job_record, uow, job.STATE_SKIPPED)
else:
msg = 'Suppressed creating uow for %s in timeperiod %s; job record is in %s; uow is in %s' \
% (job_record.process_name, job_record.timeperiod, job_record.state, uow.state)
self._log_message(INFO, job_record.process_name, job_record.timeperiod, msg)
timetable_tree = self.timetable.get_tree(job_record.process_name)
timetable_tree.build_tree()
def _process_state_skipped(self, job_record):
"""method takes care of processing job records in STATE_SKIPPED state"""
msg = 'Skipping job record %s in timeperiod %s. Apparently its most current timeperiod as of %s UTC' \
% (job_record.db_id, job_record.timeperiod, str(datetime.utcnow()))
self._log_message(WARNING, job_record.process_name, job_record.timeperiod, msg)
def _process_state_processed(self, job_record):
"""method takes care of processing job records in STATE_PROCESSED state"""
msg = 'Unexpected state %s of job record %s' % (job_record.state, job_record.db_id)
self._log_message(ERROR, job_record.process_name, job_record.timeperiod, msg)
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/scheduler/state_machine_dicrete.py",
"copies": "1",
"size": "5655",
"license": "bsd-3-clause",
"hash": 8304448557221240000,
"line_mean": 53.9029126214,
"line_max": 111,
"alpha_frac": 0.6183908046,
"autogenerated": false,
"ratio": 4.0421729807005,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00394822487600535,
"num_lines": 103
} |
__author__ = 'Bohdan Mushkevych'
from datetime import datetime
from logging import INFO, WARNING, ERROR
from synergy.db.model import job
from synergy.db.model.job import Job
from synergy.db.error import DuplicateKeyError
from synergy.db.dao.unit_of_work_dao import UnitOfWorkDao
from synergy.db.dao.job_dao import JobDao
from synergy.db.model import unit_of_work
from synergy.db.model.unit_of_work import UnitOfWork
from synergy.db.model.synergy_mq_transmission import SynergyMqTransmission
from synergy.mq.flopsy import PublishersPool
from synergy.conf import context
from synergy.system.decorator import with_reconnect
from synergy.scheduler.tree_node import NodesCompositeState
from synergy.scheduler.scheduler_constants import TYPE_MANAGED
class AbstractStateMachine(object):
""" Abstract state machine used to govern all processes and their states """
def __init__(self, logger, timetable, name):
self.name = name
self.logger = logger
self.publishers = PublishersPool(self.logger)
self.timetable = timetable
self.uow_dao = UnitOfWorkDao(self.logger)
self.job_dao = JobDao(self.logger)
def __del__(self):
try:
self.logger.info('Closing Flopsy Publishers Pool...')
self.publishers.close()
except Exception as e:
self.logger.error('Exception caught while closing Flopsy Publishers Pool: %s' % str(e))
def _log_message(self, level, process_name, timeperiod, msg):
""" method performs logging into log file and Timetable's tree node"""
self.timetable.add_log_entry(process_name, timeperiod, msg)
self.logger.log(level, msg)
@with_reconnect
def _insert_uow(self, process_name, start_timeperiod, end_timeperiod, start_id, end_id):
"""creates unit_of_work and inserts it into the DB
:raise DuplicateKeyError: if unit_of_work with given parameters already exists """
uow = UnitOfWork()
uow.process_name = process_name
uow.timeperiod = start_timeperiod
uow.start_id = str(start_id)
uow.end_id = str(end_id)
uow.start_timeperiod = start_timeperiod
uow.end_timeperiod = end_timeperiod
uow.created_at = datetime.utcnow()
uow.source = context.process_context[process_name].source
uow.sink = context.process_context[process_name].sink
uow.state = unit_of_work.STATE_REQUESTED
uow.unit_of_work_type = TYPE_MANAGED
uow.number_of_retries = 0
uow.arguments = context.process_context[process_name].arguments
uow.db_id = self.uow_dao.insert(uow)
msg = 'Created: UOW %s for %s in timeperiod [%s:%s).' \
% (uow.db_id, process_name, start_timeperiod, end_timeperiod)
self._log_message(INFO, process_name, start_timeperiod, msg)
return uow
def _publish_uow(self, uow):
mq_request = SynergyMqTransmission(process_name=uow.process_name, unit_of_work_id=uow.db_id)
publisher = self.publishers.get(uow.process_name)
publisher.publish(mq_request.document)
publisher.release()
msg = 'Published: UOW %r for %r in timeperiod %r.' % (uow.db_id, uow.process_name, uow.start_timeperiod)
self._log_message(INFO, uow.process_name, uow.start_timeperiod, msg)
def insert_and_publish_uow(self, process_name, start_timeperiod, end_timeperiod, start_id, end_id):
""" method creates and publishes a unit_of_work. it also handles DuplicateKeyError and attempts recovery
:return: tuple (uow, is_duplicate)
:raise UserWarning: if the recovery from DuplicateKeyError was unsuccessful
"""
is_duplicate = False
try:
uow = self._insert_uow(process_name, start_timeperiod, end_timeperiod, start_id, end_id)
except DuplicateKeyError as e:
is_duplicate = True
msg = 'Catching up with latest unit_of_work %s in timeperiod %s, because of: %r' \
% (process_name, start_timeperiod, e)
self._log_message(WARNING, process_name, start_timeperiod, msg)
uow = self.uow_dao.recover_from_duplicatekeyerror(e)
if uow is None:
msg = 'MANUAL INTERVENTION REQUIRED! Unable to locate unit_of_work for %s in %s' \
% (process_name, start_timeperiod)
self._log_message(WARNING, process_name, start_timeperiod, msg)
raise UserWarning(msg)
# publish the created/caught up unit_of_work
self._publish_uow(uow)
return uow, is_duplicate
def shallow_state_update(self, uow):
""" method does not trigger any new actions
if applicable, it will update job_record state and Timetable tree node state
:assumptions: uow is either in STATE_CANCELED or STATE_PROCESSED """
pass
def _process_state_embryo(self, job_record):
""" method that takes care of processing job records in STATE_EMBRYO state"""
pass
def _process_state_in_progress(self, job_record):
""" method that takes care of processing job records in STATE_IN_PROGRESS state"""
pass
def _process_state_final_run(self, job_record):
"""method takes care of processing job records in STATE_FINAL_RUN state"""
pass
def _process_state_skipped(self, job_record):
"""method takes care of processing job records in STATE_FINAL_SKIPPED state"""
pass
def _process_state_processed(self, job_record):
"""method takes care of processing job records in STATE_FINAL_SKIPPED state"""
pass
def manage_job_with_blocking_children(self, job_record, run_on_active_timeperiod):
""" method will trigger job processing only if all children are in STATE_PROCESSED or STATE_SKIPPED
and if all external dependencies are finalized (i.e. in STATE_PROCESSED or STATE_SKIPPED) """
is_job_finalizable = self.timetable.is_job_record_finalizable(job_record)
composite_state = self.timetable.dependent_on_composite_state(job_record)
if is_job_finalizable:
self.manage_job(job_record)
elif composite_state.all_healthy and run_on_active_timeperiod:
self.manage_job(job_record)
else:
msg = '%s for timeperiod %r is blocked by unprocessed children/dependencies. Waiting another tick' \
% (job_record.process_name, job_record.timeperiod)
self._log_message(INFO, job_record.process_name, job_record.timeperiod, msg)
def manage_job_with_blocking_dependencies(self, job_record, run_on_active_timeperiod):
""" method will trigger job processing only if _all_ dependencies are in STATE_PROCESSED
method will transfer current job into STATE_SKIPPED if any dependency is in STATE_SKIPPED """
composite_state = self.timetable.dependent_on_composite_state(job_record)
assert isinstance(composite_state, NodesCompositeState)
if composite_state.all_processed:
self.manage_job(job_record)
elif composite_state.all_healthy and run_on_active_timeperiod:
self.manage_job(job_record)
elif composite_state.skipped_present:
# As soon as among <dependent on> periods are in STATE_SKIPPED
# there is very little sense in waiting for them to become STATE_PROCESSED
# Skip this timeperiod itself
job_record.state = job.STATE_SKIPPED
self.job_dao.update(job_record)
tree = self.timetable.get_tree(job_record.process_name)
tree.update_node(job_record)
msg = '%s for timeperiod %r is blocked by STATE_SKIPPED dependencies. ' \
'Transferred the job to STATE_SKIPPED' % (job_record.process_name, job_record.timeperiod)
self._log_message(WARNING, job_record.process_name, job_record.timeperiod, msg)
else:
msg = '%s for timeperiod %r is blocked by unprocessed dependencies. Waiting another tick' \
% (job_record.process_name, job_record.timeperiod)
self._log_message(INFO, job_record.process_name, job_record.timeperiod, msg)
def manage_job(self, job_record):
""" method main duty - is to _avoid_ publishing another unit_of_work, if previous was not yet processed
In case the Scheduler sees that the unit_of_work is pending it could either update boundaries of the processing
or wait another tick """
assert isinstance(job_record, Job)
try:
if job_record.is_embryo:
self._process_state_embryo(job_record)
elif job_record.is_in_progress:
self._process_state_in_progress(job_record)
elif job_record.is_final_run:
self._process_state_final_run(job_record)
elif job_record.is_skipped:
self._process_state_skipped(job_record)
elif job_record.is_processed:
self._process_state_processed(job_record)
else:
msg = 'Unknown state %s of the job %s' % (job_record.state, job_record.db_id)
self._log_message(ERROR, job_record.process_name, job_record.timeperiod, msg)
except LookupError as e:
self.timetable.failed_on_processing_job_record(job_record.process_name, job_record.timeperiod)
msg = 'Increasing fail counter for %s in timeperiod %s, because of: %r' \
% (job_record.process_name, job_record.timeperiod, e)
self._log_message(WARNING, job_record.process_name, job_record.timeperiod, msg)
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/scheduler/abstract_state_machine.py",
"copies": "1",
"size": "9642",
"license": "bsd-3-clause",
"hash": 3454144652146593300,
"line_mean": 46.9701492537,
"line_max": 119,
"alpha_frac": 0.6610661688,
"autogenerated": false,
"ratio": 3.812574139976275,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49736403087762754,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from datetime import datetime
from threading import RLock
from synergy.db.dao.unit_of_work_dao import UnitOfWorkDao
from synergy.db.dao.job_dao import JobDao
from synergy.db.model.job import Job
from synergy.db.model import job, unit_of_work
from synergy.conf import context
from synergy.conf import settings
from synergy.system import time_helper
from synergy.system.time_qualifier import *
from synergy.system.decorator import thread_safe
from synergy.scheduler.scheduler_constants import COLLECTION_JOB_HOURLY, COLLECTION_JOB_DAILY, \
COLLECTION_JOB_MONTHLY, COLLECTION_JOB_YEARLY
from synergy.scheduler.tree import MultiLevelTree
class Timetable(object):
""" Timetable holds all known process trees, where every node presents a timeperiod-driven job"""
def __init__(self, logger):
self.lock = RLock()
self.logger = logger
self.uow_dao = UnitOfWorkDao(self.logger)
self.job_dao = JobDao(self.logger)
self.reprocess = dict()
# self.trees contain all of the trees and manages much of their life cycle
# remember to enlist there all trees the system is working with
self.trees = self._construct_trees_from_context()
self._register_callbacks()
self._register_dependencies()
self.load_tree()
self.build_trees()
self.validate()
def _construct_trees_from_context(self):
trees = dict()
for tree_name, context_entry in context.timetable_context.items():
tree = MultiLevelTree(process_names=context_entry.enclosed_processes,
tree_name=tree_name,
mx_name=context_entry.mx_name,
mx_page=context_entry.mx_page)
trees[tree_name] = tree
return trees
def _register_dependencies(self):
""" register dependencies between trees"""
for tree_name, context_entry in context.timetable_context.items():
tree = self.trees[tree_name]
assert isinstance(tree, MultiLevelTree)
for dependent_on in context_entry.dependent_on:
dependent_on_tree = self.trees[dependent_on]
assert isinstance(dependent_on_tree, MultiLevelTree)
tree.register_dependent_on(dependent_on_tree)
def _register_callbacks(self):
""" register logic that reacts on reprocessing request
and create embryo timetable record request"""
# reprocessing request
for tree_name, tree in self.trees.items():
tree.register_reprocess_callback(self._callback_reprocess)
# skip request
for tree_name, tree in self.trees.items():
tree.register_skip_callback(self._callback_skip)
# callbacks register
for tree_name, tree in self.trees.items():
tree.register_create_callbacks(self._callback_create_job_record)
# *** Call-back methods ***
def _find_dependant_trees(self, tree_obj):
""" returns list of trees that are dependent_on given tree_obj """
dependant_trees = []
for tree_name, tree in self.trees.items():
if tree_obj in tree.dependent_on:
dependant_trees.append(tree)
return dependant_trees
def _find_dependant_tree_nodes(self, node_a):
dependant_nodes = set()
for tree_b in self._find_dependant_trees(node_a.tree):
node_b = node_a.find_counterpart_in(tree_b)
if node_b is None:
continue
dependant_nodes.add(node_b)
return dependant_nodes
def _reprocess_single_tree_node(self, tree_node):
""" is called from tree to answer reprocessing request.
It is possible that timetable record will be transferred to STATE_IN_PROGRESS with no related unit_of_work"""
uow_id = tree_node.job_record.related_unit_of_work
if uow_id is not None:
tree_node.job_record.state = job.STATE_IN_PROGRESS
uow = self.uow_dao.get_one(uow_id)
uow.state = unit_of_work.STATE_INVALID
uow.number_of_retries = 0
uow.created_at = datetime.utcnow()
self.uow_dao.update(uow)
msg = 'Transferred job record %s for %s in timeperiod %s to %s; Transferred unit_of_work to %s' \
% (tree_node.job_record.db_id,
tree_node.process_name,
tree_node.job_record.timeperiod,
tree_node.job_record.state,
uow.state)
if tree_node.process_name not in self.reprocess:
self.reprocess[tree_node.process_name] = dict()
self.reprocess[tree_node.process_name][tree_node.timeperiod] = tree_node
else:
tree_node.job_record.state = job.STATE_EMBRYO
msg = 'Transferred job record %s for %s in timeperiod %s to %s;' \
% (tree_node.job_record.db_id,
tree_node.process_name,
tree_node.job_record.timeperiod,
tree_node.job_record.state)
tree_node.job_record.number_of_failures = 0
self.job_dao.update(tree_node.job_record)
self.logger.warn(msg)
tree_node.add_log_entry([datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'), msg])
@thread_safe
def _callback_reprocess(self, tree_node):
""" is called from tree to answer reprocessing request.
It is possible that job record will be transferred to STATE_IN_PROGRESS with no related unit_of_work"""
if (tree_node.job_record.is_embryo and tree_node.job_record.number_of_failures == 0) \
or (tree_node.process_name in self.reprocess
and tree_node.timeperiod in self.reprocess[tree_node.process_name]):
# the node has already been marked for re-processing or does not require one
pass
else:
self._reprocess_single_tree_node(tree_node)
reprocessing_nodes = self._find_dependant_tree_nodes(tree_node)
for node in reprocessing_nodes:
node.request_reprocess()
@thread_safe
def _callback_skip(self, tree_node):
""" is called from tree to answer skip request"""
tree_node.job_record.state = job.STATE_SKIPPED
uow_id = tree_node.job_record.related_unit_of_work
if uow_id is not None:
uow = self.uow_dao.get_one(uow_id)
uow.state = unit_of_work.STATE_CANCELED
self.uow_dao.update(uow)
msg = 'Transferred job record %s in timeperiod %s to %s; Transferred unit_of_work to %s' \
% (tree_node.job_record.db_id,
tree_node.job_record.timeperiod,
tree_node.job_record.state,
uow.state)
else:
msg = 'Transferred job record %s in timeperiod %s to %s;' \
% (tree_node.job_record.db_id,
tree_node.job_record.timeperiod,
tree_node.job_record.state)
self.job_dao.update(tree_node.job_record)
self.logger.warn(msg)
tree_node.add_log_entry([datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'), msg])
if tree_node.process_name in self.reprocess \
and tree_node.timeperiod in self.reprocess[tree_node.process_name]:
del self.reprocess[tree_node.process_name][tree_node.timeperiod]
@thread_safe
def _callback_create_job_record(self, tree_node):
""" is called from a tree to create job record in STATE_EMBRYO and bind it to the given tree node"""
try:
job_record = self.job_dao.get_one(tree_node.process_name, tree_node.timeperiod)
except LookupError:
job_record = Job()
job_record.state = job.STATE_EMBRYO
job_record.timeperiod = tree_node.timeperiod
job_record.process_name = tree_node.process_name
tr_id = self.job_dao.update(job_record)
self.logger.info('Created job record %s, with timeperiod %s for process %s'
% (str(tr_id), tree_node.timeperiod, tree_node.process_name))
tree_node.job_record = job_record
# *** Tree-manipulation methods ***
@thread_safe
def get_tree(self, process_name):
""" return tree that is managing time-periods for given process"""
for tree_name, tree in self.trees.items():
if process_name in tree:
return tree
@thread_safe
def _build_tree_by_level(self, collection_name, since):
""" method iterated thru all documents in all job collections and builds a tree of known system state"""
try:
unsupported_records = dict()
job_records = self.job_dao.get_all(collection_name, since)
for job_record in job_records:
tree = self.get_tree(job_record.process_name)
if tree is not None:
tree.update_node(job_record)
else:
unsupported_records[job_record.process_name] = \
unsupported_records.get(job_record.process_name, 0) + 1
for name, counter in unsupported_records.items():
self.logger.warn('Skipping %r Job records for %s as no tree is handling it.' % (counter, name))
except LookupError:
self.logger.warn('No Job Records in %s.' % str(collection_name))
@thread_safe
def load_tree(self):
""" method iterates thru all objects older than synergy_start_timeperiod parameter in job collections
and loads them into this timetable"""
timeperiod = settings.settings['synergy_start_timeperiod']
yearly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_YEARLY, timeperiod)
monthly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_MONTHLY, timeperiod)
daily_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_DAILY, timeperiod)
hourly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_HOURLY, timeperiod)
self._build_tree_by_level(COLLECTION_JOB_HOURLY, since=hourly_timeperiod)
self._build_tree_by_level(COLLECTION_JOB_DAILY, since=daily_timeperiod)
self._build_tree_by_level(COLLECTION_JOB_MONTHLY, since=monthly_timeperiod)
self._build_tree_by_level(COLLECTION_JOB_YEARLY, since=yearly_timeperiod)
@thread_safe
def build_trees(self):
""" method iterates thru all trees and ensures that all time-period nodes are created up till <utc_now>"""
for tree_name, tree in self.trees.items():
tree.build_tree()
@thread_safe
def validate(self):
"""validates that none of nodes in tree is improperly finalized and that every node has job_record"""
for tree_name, tree in self.trees.items():
tree.validate()
@thread_safe
def dependent_on_composite_state(self, job_record):
""" :return instance of <NodesCompositeState> """
assert isinstance(job_record, Job)
tree = self.get_tree(job_record.process_name)
node = tree.get_node(job_record.process_name, job_record.timeperiod)
return node.dependent_on_composite_state()
# *** Job manipulation methods ***
@thread_safe
def update_job_record(self, job_record, uow, new_state):
""" method updates job record with a new unit_of_work and new state"""
job_record.state = new_state
job_record.related_unit_of_work = uow.db_id
job_record.start_id = uow.start_id
job_record.end_id = uow.end_id
self.job_dao.update(job_record)
tree = self.get_tree(job_record.process_name)
tree.update_node(job_record)
msg = 'Transferred job %s for %s in timeperiod %s to new state %s' \
% (job_record.db_id, job_record.timeperiod, job_record.process_name, new_state)
self.logger.info(msg)
self.add_log_entry(job_record.process_name, job_record.timeperiod, msg)
@thread_safe
def failed_on_processing_job_record(self, process_name, timeperiod):
"""method increases node's inner counter of failed processing
if _skip_node logic returns True - node is set to STATE_SKIP"""
tree = self.get_tree(process_name)
node = tree.get_node(process_name, timeperiod)
node.job_record.number_of_failures += 1
if tree._skip_the_node(node):
node.request_skip()
else:
# job record is automatically updated in request_skip()
# so if the node was not skipped - job record has to be updated explicitly
self.job_dao.update(node.job_record)
@thread_safe
def get_next_job_record(self, process_name):
"""returns next job record to work on for given process"""
if process_name in self.reprocess and len(self.reprocess[process_name]) > 0:
timeperiod = sorted(self.reprocess[process_name].keys())[0]
node = self.reprocess[process_name][timeperiod]
del self.reprocess[process_name][timeperiod]
else:
tree = self.get_tree(process_name)
node = tree.get_next_node(process_name)
if node.job_record is None:
node.request_embryo_job_record()
return node.job_record
@thread_safe
def is_job_record_finalizable(self, job_record):
""" :return True, if the node and all its children are either in STATE_PROCESSED or STATE_SKIPPED"""
assert isinstance(job_record, Job)
tree = self.get_tree(job_record.process_name)
node = tree.get_node(job_record.process_name, job_record.timeperiod)
return node.is_finalizable()
@thread_safe
def add_log_entry(self, process_name, timeperiod, msg):
""" adds a log entry to the tree node. log entries has no persistence """
tree = self.get_tree(process_name)
node = tree.get_node(process_name, timeperiod)
node.add_log_entry([datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'), msg])
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/scheduler/timetable.py",
"copies": "1",
"size": "14130",
"license": "bsd-3-clause",
"hash": -5386719221785159000,
"line_mean": 44.1437699681,
"line_max": 117,
"alpha_frac": 0.6246992215,
"autogenerated": false,
"ratio": 3.782119914346895,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4906819135846895,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from datetime import datetime
from flow.core.execution_context import ContextDriven, get_step_logger, valid_context
from synergy.system.log_recording_handler import LogRecordingHandler
from flow.db.model.step import Step, STATE_EMBRYO, STATE_INVALID, STATE_PROCESSED, STATE_IN_PROGRESS
from flow.db.dao.step_dao import StepDao
class FlowGraphNode(ContextDriven):
""" represents a Node in the FlowGraph """
def __init__(self, step_name, step_executor):
super(FlowGraphNode, self).__init__()
self.step_name = step_name
self.step_executor = step_executor
self.step_dao = None
self.step_entry = None
self.log_recording_handler = None
# attributes _prev and _next contains FlowGraphNodes that precedes and follows this node
# these are managed by the FlowGraph.append
self._prev = list()
self._next = list()
def set_context(self, context, **kwargs):
super(FlowGraphNode, self).set_context(context, **kwargs)
self.step_executor.set_context(context)
self.step_dao = StepDao(self.logger)
if not self.step_entry:
# Normal flow
self.step_entry = Step()
self.step_entry.created_at = datetime.utcnow()
self.step_entry.flow_name = self.context.flow_name
self.step_entry.timeperiod = self.context.timeperiod
self.step_entry.related_flow = self.context.flow_id
self.step_entry.state = STATE_EMBRYO
else:
# ExecutionEngine is performing recovery
# step_entry has been loaded from the DB
pass
def get_logger(self):
return get_step_logger(self.flow_name, self.step_name, self.settings)
def mark_start(self):
""" performs step start-up, such as db and context updates """
self.step_entry.started_at = datetime.utcnow()
self.step_entry.state = STATE_IN_PROGRESS
self.step_dao.update(self.step_entry)
# enable log recording into DB
self.log_recording_handler = LogRecordingHandler(self.get_logger(), self.step_entry.db_id)
self.log_recording_handler.attach()
def _mark_finish(self, state):
self.step_entry.finished_at = datetime.utcnow()
self.step_entry.state = state
self.step_dao.update(self.step_entry)
if self.log_recording_handler:
self.log_recording_handler.detach()
def mark_failure(self):
""" perform step post-failure activities, such as db update """
self._mark_finish(STATE_INVALID)
def mark_success(self):
""" perform activities in case of the step successful completion """
self._mark_finish(STATE_PROCESSED)
@valid_context
def run(self, execution_cluster):
self.mark_start()
self.step_executor.do(execution_cluster)
if self.step_executor.is_complete:
self.mark_success()
else:
self.mark_failure()
return self.step_executor.is_complete
| {
"repo_name": "mushkevych/synergy_flow",
"path": "flow/core/flow_graph_node.py",
"copies": "1",
"size": "3064",
"license": "bsd-3-clause",
"hash": -1284216443896132900,
"line_mean": 35.4761904762,
"line_max": 100,
"alpha_frac": 0.6475195822,
"autogenerated": false,
"ratio": 3.858942065491184,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004992760181819006,
"num_lines": 84
} |
__author__ = 'Bohdan Mushkevych'
from datetime import datetime
from synergy.db.model import unit_of_work
from synergy.db.model.mq_transmission import MqTransmission
from synergy.db.dao.unit_of_work_dao import UnitOfWorkDao
from synergy.system.mq_transmitter import MqTransmitter
from synergy.system.performance_tracker import UowAwareTracker
from synergy.system.log_recording_handler import LogRecordingHandler
from synergy.workers.abstract_mq_worker import AbstractMqWorker
class AbstractUowAwareWorker(AbstractMqWorker):
""" Abstract class is inherited by all workers/aggregators
that are aware of unit_of_work and capable of processing it """
def __init__(self, process_name, perform_db_logging=False):
super(AbstractUowAwareWorker, self).__init__(process_name)
self.perform_db_logging = perform_db_logging
self.uow_dao = UnitOfWorkDao(self.logger)
self.mq_transmitter = MqTransmitter(self.logger)
def __del__(self):
del self.mq_transmitter
super(AbstractUowAwareWorker, self).__del__()
# **************** Abstract Methods ************************
def _init_performance_tracker(self, logger):
self.performance_tracker = UowAwareTracker(logger)
self.performance_tracker.start()
def _process_uow(self, uow):
"""
:param uow: unit_of_work to process
:return: a tuple (number of processed items/documents/etc, desired unit_of_work state) or None
if None is returned then it is assumed that the return tuple is (0, unit_of_work.STATE_PROCESSED)
:raise an Exception if the UOW shall be marked as STATE_INVALID
"""
raise NotImplementedError(f'method _process_uow must be implemented by {self.__class__.__name__}')
def _clean_up(self):
""" method is called from the *finally* clause and is suppose to clean up after the uow processing """
pass
def _mq_callback(self, message):
try:
mq_request = MqTransmission.from_json(message.body)
uow = self.uow_dao.get_one(mq_request.record_db_id)
if not uow.is_requested:
# accept only UOW in STATE_REQUESTED
self.logger.warning(f'Skipping UOW: id {message.body}; state {uow.state};', exc_info=False)
self.consumer.acknowledge(message.delivery_tag)
return
except Exception:
self.logger.error(f'Safety fuse. Can not identify UOW {message.body}', exc_info=True)
self.consumer.acknowledge(message.delivery_tag)
return
log_recording_handler = LogRecordingHandler(self.logger, uow.db_id)
try:
uow.state = unit_of_work.STATE_IN_PROGRESS
uow.started_at = datetime.utcnow()
self.uow_dao.update(uow)
self.performance_tracker.start_uow(uow)
if self.perform_db_logging:
log_recording_handler.attach()
result = self._process_uow(uow)
if result is None:
self.logger.warning(f'Method {self.__class__.__name__}._process_uow returned None. '
f'Assuming happy flow.')
number_of_aggregated_objects, target_state = 0, unit_of_work.STATE_PROCESSED
else:
number_of_aggregated_objects, target_state = result
uow.number_of_aggregated_documents = number_of_aggregated_objects
uow.number_of_processed_documents = self.performance_tracker.success_per_job
uow.finished_at = datetime.utcnow()
uow.state = target_state
self.uow_dao.update(uow)
if uow.is_finished:
self.performance_tracker.finish_uow()
else:
self.performance_tracker.cancel_uow()
except Exception as e:
fresh_uow = self.uow_dao.get_one(mq_request.record_db_id)
self.performance_tracker.cancel_uow()
if fresh_uow.is_canceled:
self.logger.warning('UOW {0} for {1}@{2} was likely marked by MX as SKIPPED. No UOW update performed.'
.format(uow.db_id, uow.process_name, uow.timeperiod), exc_info=False)
else:
self.logger.error('Safety fuse while processing UOW {0} for {1}@{2}: {3}'
.format(uow.db_id, uow.process_name, uow.timeperiod, e), exc_info=True)
uow.state = unit_of_work.STATE_INVALID
self.uow_dao.update(uow)
finally:
self.consumer.acknowledge(message.delivery_tag)
self.consumer.close()
self._clean_up()
log_recording_handler.detach()
try:
self.mq_transmitter.publish_uow_status(uow)
self.logger.info(f'UOW *{uow.state}* status report published into MQ')
except Exception:
self.logger.error('Error on UOW status report publishing', exc_info=True)
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/workers/abstract_uow_aware_worker.py",
"copies": "1",
"size": "4991",
"license": "bsd-3-clause",
"hash": -4788686773611804000,
"line_mean": 43.963963964,
"line_max": 118,
"alpha_frac": 0.6203165698,
"autogenerated": false,
"ratio": 3.836279784780938,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4956596354580938,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from datetime import datetime
from synergy.db.model import unit_of_work
from synergy.db.model.synergy_mq_transmission import SynergyMqTransmission
from synergy.db.dao.unit_of_work_dao import UnitOfWorkDao
from synergy.mq.flopsy import PublishersPool
from synergy.workers.abstract_mq_worker import AbstractMqWorker
from synergy.system.performance_tracker import UowAwareTracker
from synergy.scheduler.scheduler_constants import QUEUE_UOW_REPORT
class AbstractUowAwareWorker(AbstractMqWorker):
""" Abstract class is inherited by all workers/aggregators
that are aware of unit_of_work and capable of processing it"""
def __init__(self, process_name):
super(AbstractUowAwareWorker, self).__init__(process_name)
self.uow_dao = UnitOfWorkDao(self.logger)
self.publishers = PublishersPool(self.logger)
def __del__(self):
self.publishers.close()
super(AbstractUowAwareWorker, self).__del__()
# **************** Abstract Methods ************************
def _init_performance_ticker(self, logger):
self.performance_ticker = UowAwareTracker(logger)
self.performance_ticker.start()
def _process_uow(self, uow):
"""
:param uow: unit_of_work to process
:return: a tuple (number of processed items/documents/etc, desired unit_of_work state) or None
if None is returned then it is assumed that the return tuple is (0, unit_of_work.STATE_PROCESSED)
:raise an Exception if the UOW shall be marked as STATE_INVALID
"""
raise NotImplementedError('_process_uow must be overridden in the child class %s' % self.__class__.__name__)
def _clean_up(self):
""" method is called from the *finally* clause and is suppose to clean up after the uow processing """
pass
def _mq_callback(self, message):
try:
mq_request = SynergyMqTransmission.from_json(message.body)
uow = self.uow_dao.get_one(mq_request.unit_of_work_id)
if not uow.is_requested:
# accept only UOW in STATE_REQUESTED
self.logger.warn('Skipping unit_of_work: id %s; state %s;' % (str(message.body), uow.state),
exc_info=False)
self.consumer.acknowledge(message.delivery_tag)
return
except Exception:
self.logger.error('Safety fuse. Can not identify unit_of_work %s' % str(message.body), exc_info=True)
self.consumer.acknowledge(message.delivery_tag)
return
try:
uow.state = unit_of_work.STATE_IN_PROGRESS
uow.started_at = datetime.utcnow()
self.uow_dao.update(uow)
self.performance_ticker.start_uow(uow)
result = self._process_uow(uow)
if result is None or isinstance(result, str):
number_of_aggregated_objects, target_state = 0, unit_of_work.STATE_PROCESSED
else:
number_of_aggregated_objects, target_state = result
uow.number_of_aggregated_documents = number_of_aggregated_objects
uow.number_of_processed_documents = self.performance_ticker.per_job
uow.finished_at = datetime.utcnow()
uow.state = target_state
self.uow_dao.update(uow)
self.performance_ticker.finish_uow()
except Exception as e:
fresh_uow = self.uow_dao.get_one(mq_request.unit_of_work_id)
self.performance_ticker.cancel_uow()
if fresh_uow.is_canceled:
self.logger.warn('unit_of_work: id %s was likely marked by MX as SKIPPED. '
'No unit_of_work update is performed.' % str(message.body),
exc_info=False)
else:
self.logger.error('Safety fuse while processing unit_of_work %s in timeperiod %s : %r'
% (message.body, uow.timeperiod, e), exc_info=True)
uow.state = unit_of_work.STATE_INVALID
self.uow_dao.update(uow)
finally:
self.consumer.acknowledge(message.delivery_tag)
self.consumer.close()
self._clean_up()
try:
publisher = self.publishers.get(QUEUE_UOW_REPORT)
publisher.publish(mq_request.document)
publisher.release()
self.logger.info('Published unit_of_work status report into %s queue' % QUEUE_UOW_REPORT)
except Exception:
self.logger.error('Error on unit_of_work status report publishing', exc_info=True)
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/workers/abstract_uow_aware_worker.py",
"copies": "1",
"size": "4651",
"license": "bsd-3-clause",
"hash": 4145579758013639000,
"line_mean": 44.5980392157,
"line_max": 116,
"alpha_frac": 0.6200817029,
"autogenerated": false,
"ratio": 3.882303839732888,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5002385542632888,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from datetime import datetime
from synergy.scheduler.process_hierarchy import ProcessHierarchy
from synergy.scheduler.tree_node import TreeNode, RootNode, AbstractTreeNode
from synergy.conf import settings
from synergy.system import time_helper
from synergy.system.time_helper import cast_to_time_qualifier
# number of times a Job can fail before it is considered STATE_SKIPPED.
MAX_NUMBER_OF_FAILURES = 3
class AbstractTree(object):
""" MixIn to handle subscription for various tree events """
def __init__(self):
self.dependent_on = []
def register_dependent_on(self, tree):
"""registering tree that we are dependent on.
example: horizontal client should not be finalized until we have finalized vertical site for the same period"""
self.dependent_on.append(tree)
def unregister_dependent_on(self, tree):
"""unregistering tree that we are dependent on"""
if tree in self.dependent_on:
self.dependent_on.remove(tree)
class MultiLevelTree(AbstractTree):
""" Multi-level Tree, suited to host both single Process Entry
or multiple hierarchy-organized Process Entries """
def __init__(self, process_names, timetable, tree_name=None, mx_name=None, mx_page=None):
"""
:param tree_name <optional>: full name of the tree. used as an identifier
:param mx_name <optional>: is used by MX only as visual vertical name
:param mx_page <optional>: is used by MX only as anchor to specific page
"""
super(MultiLevelTree, self).__init__()
self.process_hierarchy = ProcessHierarchy(*process_names)
self.build_timeperiod = None
self.validation_timestamp = None
self.timetable = timetable
self.tree_name = tree_name
self.mx_name = mx_name
self.mx_page = mx_page
self.root = RootNode(self)
def __contains__(self, value):
"""
:param value: process name
:return: True if a process_entry with the given name is registered in this hierarchy; False otherwise
"""
return value in self.process_hierarchy
def _get_next_parent_node(self, parent):
""" Used by _get_next_child_node, this method is called to find next possible parent.
For example if timeperiod 2011010200 has all children processed, but is not yet processed itself
then it makes sense to look in 2011010300 for hourly nodes """
grandparent = parent.parent
if grandparent is None:
# here, we work at yearly/linear level
return None
parent_siblings = list(grandparent.children)
sorted_keys = sorted(parent_siblings)
index = sorted_keys.index(parent.timeperiod)
if index + 1 >= len(sorted_keys):
return None
else:
return grandparent.children[sorted_keys[index + 1]]
def _get_next_child_node(self, parent):
"""
Iterates among children of the given parent and looks for a suitable node to process
In case given parent has no suitable nodes, a younger parent will be found
and the logic will be repeated for him
"""
children_keys = list(parent.children)
sorted_keys = sorted(children_keys)
for key in sorted_keys:
node = parent.children[key]
if node.job_record is None:
self.timetable.assign_job_record(node)
return node
elif self.should_skip_tree_node(node):
continue
elif node.job_record.is_active:
return node
# special case, when all children of the parent node are not suitable for processing
new_parent = self._get_next_parent_node(parent)
if new_parent is not None:
# in case all nodes are processed or blocked - look for next valid parent node
return self._get_next_child_node(new_parent)
else:
# if all valid parents are exploited - return current node
process_name = parent.children[sorted_keys[0]].process_name
time_qualifier = parent.children[sorted_keys[0]].time_qualifier
actual_timeperiod = time_helper.actual_timeperiod(time_qualifier)
return self.get_node(process_name, actual_timeperiod)
def _get_node(self, time_qualifier, timeperiod):
"""
Method retrieves a tree node identified by time_qualifier and timeperiod
In case intermittent or target nodes does not exist - method will request their creation
:param time_qualifier: identifies the tree level
:param timeperiod: identifies the target node timeperiod
:return: requested node; type <AbstractNode>
"""
hierarchy_entry = self.process_hierarchy.get_by_qualifier(time_qualifier)
if hierarchy_entry.parent:
parent_time_qualifier = hierarchy_entry.parent.process_entry.time_qualifier
parent_timeperiod = hierarchy_entry.parent.cast_timeperiod(timeperiod)
parent = self._get_node(parent_time_qualifier, parent_timeperiod)
else:
parent = self.root
node = parent.children.get(timeperiod)
if node is None:
node = TreeNode(self, parent, hierarchy_entry.process_entry.process_name, timeperiod, None)
parent.children[timeperiod] = node
return node
def _get_next_node(self, time_qualifier):
""" Method goes to the top of the tree and traverses from
there in search of the next suitable node for processing
to the level defined by the given time_qualifier
:param time_qualifier: defines target level of the tree
:return: located node; type <TreeNode> """
hierarchy_entry = self.process_hierarchy.get_by_qualifier(time_qualifier)
if hierarchy_entry.parent:
parent_time_qualifier = hierarchy_entry.parent.process_entry.time_qualifier
parent = self._get_next_node(parent_time_qualifier)
else:
parent = self.root
return self._get_next_child_node(parent)
def should_skip_tree_node(self, node: AbstractTreeNode):
""" :return True: in case the node should be _skipped_ and not included into processing """
# case 1: node processing is complete
if node.job_record.is_finished:
return True
# case 2: this is a bottom-level leaf node. retry this node for MAX_NUMBER_OF_FAILURES
if node.process_name == self.process_hierarchy.bottom_process.process_name:
if len(node.children) == 0:
# no children - this is a leaf
return node.job_record.number_of_failures > MAX_NUMBER_OF_FAILURES
# case 3: here we process process_daily, process_monthly and process_yearly that have children
# iterate thru children and check if all of them are in STATE_SKIPPED (i.e. no data for parent to process)
# if any is still in processing (i.e. has produced some data) - then we can not skip parent of the child node
# case 3': consider parent as worth processing (i.e. do not skip) if child's job_record is None
all_children_spoiled = True
for key, child in node.children.items():
if child.job_record is None or \
(child.job_record.number_of_failures <= MAX_NUMBER_OF_FAILURES
and not child.job_record.is_skipped):
all_children_spoiled = False
break
return all_children_spoiled
def build_tree(self, rebuild=False):
""" method builds tree by iterating from the synergy_start_timeperiod to the current time
and inserting corresponding nodes """
time_qualifier = self.process_hierarchy.bottom_process.time_qualifier
process_name = self.process_hierarchy.bottom_process.process_name
if rebuild or self.build_timeperiod is None:
timeperiod = settings.settings['synergy_start_timeperiod']
else:
timeperiod = self.build_timeperiod
timeperiod = cast_to_time_qualifier(time_qualifier, timeperiod)
actual_timeperiod = time_helper.actual_timeperiod(time_qualifier)
while actual_timeperiod >= timeperiod:
self.get_node(process_name, timeperiod)
timeperiod = time_helper.increment_timeperiod(time_qualifier, timeperiod)
self.build_timeperiod = actual_timeperiod
def get_next_node(self, process_name):
""" :return: <AbstractTreeNode> next node to process by a process with process_name """
if process_name not in self.process_hierarchy:
raise ValueError(f'unable to compute the next_node due to unknown process: {process_name}')
time_qualifier = self.process_hierarchy[process_name].process_entry.time_qualifier
return self._get_next_node(time_qualifier)
def update_node(self, job_record):
""" Updates job record property for a tree node associated with the given Job """
if job_record.process_name not in self.process_hierarchy:
raise ValueError(f'unable to update the node due to unknown process: {job_record.process_name}')
time_qualifier = self.process_hierarchy[job_record.process_name].process_entry.time_qualifier
node = self._get_node(time_qualifier, job_record.timeperiod)
node.job_record = job_record
def get_node(self, process_name, timeperiod):
""" Method retrieves a tree node identified by the time_qualifier and the timeperiod """
if process_name not in self.process_hierarchy:
raise ValueError(f'unable to retrieve the node due to unknown process: {process_name}')
time_qualifier = self.process_hierarchy[process_name].process_entry.time_qualifier
return self._get_node(time_qualifier, timeperiod)
def validate(self):
""" method starts validation of the tree.
@see TreeNode.validate """
for timeperiod, child in self.root.children.items():
child.validate()
self.validation_timestamp = datetime.utcnow()
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/scheduler/tree.py",
"copies": "1",
"size": "10217",
"license": "bsd-3-clause",
"hash": 8327217932241422000,
"line_mean": 45.2307692308,
"line_max": 119,
"alpha_frac": 0.6581188216,
"autogenerated": false,
"ratio": 4.27668480535789,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5434803626957889,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from datetime import datetime, timedelta
from synergy.system.repeat_timer import RepeatTimer
TRIGGER_INTERVAL = 30 # half a minute
EVERY_DAY = '*' # marks every day as suitable to trigger the event
TIME_OF_DAY_FORMAT = "%H:%M"
TRIGGER_PREAMBLE_AT = 'at '
TRIGGER_PREAMBLE_EVERY = 'every '
def parse_time_trigger_string(trigger_frequency):
"""
:param trigger_frequency: human-readable and editable string in one of two formats:
- 'at Day_of_Week-HH:MM, ..., Day_of_Week-HH:MM'
- 'every NNN'
:return: return tuple (parsed_trigger_frequency, timer_klass)
"""
# replace multiple spaces with one
trigger_frequency = ' '.join(trigger_frequency.split())
if trigger_frequency.startswith(TRIGGER_PREAMBLE_AT):
# EventClock block
trigger_frequency = trigger_frequency[len(TRIGGER_PREAMBLE_AT):]
parsed_trigger_frequency = trigger_frequency.replace(' ', '').replace(',', ' ').split(' ')
timer_klass = EventClock
elif trigger_frequency.startswith(TRIGGER_PREAMBLE_EVERY):
# RepeatTimer block
trigger_frequency = trigger_frequency[len(TRIGGER_PREAMBLE_EVERY):]
parsed_trigger_frequency = int(trigger_frequency)
timer_klass = RepeatTimer
else:
raise ValueError('Unknown time trigger format %s' % trigger_frequency)
return parsed_trigger_frequency, timer_klass
def format_time_trigger_string(timer_instance):
"""
:param timer_instance: either instance of RepeatTimer or EventClock
:return: human-readable and editable string in one of two formats:
- 'at Day_of_Week-HH:MM, ..., Day_of_Week-HH:MM'
- 'every NNN'
"""
if isinstance(timer_instance, RepeatTimer):
return TRIGGER_PREAMBLE_EVERY + str(timer_instance.interval_new)
elif isinstance(timer_instance, EventClock):
timestamps = [repr(x) for x in timer_instance.timestamps]
return TRIGGER_PREAMBLE_AT + ','.join(timestamps)
else:
raise ValueError('Unknown timer instance type %s' % timer_instance.__class__.__name__)
class EventTime(object):
def __init__(self, trigger_frequency):
self.trigger_frequency = trigger_frequency
tokens = self.trigger_frequency.split('-')
if len(tokens) > 1:
# Day of Week is provided
self.day_of_week = tokens[0]
self.time_of_day = datetime.strptime(tokens[1], TIME_OF_DAY_FORMAT)
else:
# Day of Week is not provided. Assume every day of the week
self.day_of_week = EVERY_DAY
self.time_of_day = datetime.strptime(tokens[0], TIME_OF_DAY_FORMAT)
def __str__(self):
return 'EventTime: day_of_week=%s time_of_day=%s' % \
(self.day_of_week, datetime.strftime(self.time_of_day, TIME_OF_DAY_FORMAT))
def __repr__(self):
return '%s-%s' % (self.day_of_week, datetime.strftime(self.time_of_day, TIME_OF_DAY_FORMAT))
def __eq__(self, other):
if not isinstance(other, EventTime):
return False
return self.time_of_day == other.time_of_day \
and (self.day_of_week == other.day_of_week
or self.day_of_week == EVERY_DAY
or other.day_of_week == EVERY_DAY)
def __hash__(self):
return hash((self.day_of_week, self.time_of_day))
def next_trigger_frequency(self, utc_now=None):
""" :param utc_now: optional parameter to be used by Unit Tests as a definition of "now"
:return: datetime instance presenting next trigger time of the event """
if utc_now is None:
utc_now = datetime.utcnow()
def wind_days(start_date):
while True:
if self.day_of_week == EVERY_DAY or start_date.weekday() == int(self.day_of_week):
return start_date.replace(hour=self.time_of_day.hour, minute=self.time_of_day.minute)
else:
start_date += timedelta(days=1)
if utc_now.time() > self.time_of_day.time():
return wind_days(utc_now + timedelta(days=1))
else:
return wind_days(utc_now)
@classmethod
def utc_now(cls):
utc_now = datetime.utcnow()
return EventTime('%s-%s' % (utc_now.weekday(), utc_now.strftime(TIME_OF_DAY_FORMAT)))
class EventClock(object):
""" This class triggers on predefined time set in format 'day_of_week-HH:MM' or 'HH:MM'
Maintaining API compatibility with the RepeatTimer class """
def __init__(self, interval, call_back, args=None, kwargs=None):
if not kwargs: kwargs = {}
if not args: args = []
self.timestamps = []
self.change_interval(interval)
self.args = args
self.kwargs = kwargs
self.call_back = call_back
self.handler = RepeatTimer(TRIGGER_INTERVAL, self.manage_schedule)
self.activation_dt = None
def _trigger_now(self):
if self.activation_dt is not None and datetime.utcnow() - self.activation_dt < timedelta(minutes=1):
# the event was already triggered within 1 minute. no need to trigger it again
return
self.call_back(*self.args, **self.kwargs)
self.activation_dt = datetime.utcnow()
def manage_schedule(self, *_):
current_time = EventTime.utc_now()
if current_time in self.timestamps:
self._trigger_now()
def start(self):
self.handler.start()
def cancel(self):
self.handler.cancel()
def trigger(self):
current_time = EventTime.utc_now()
if current_time not in self.timestamps:
self._trigger_now()
else:
# leave it to the regular flow to trigger the call_back via manage_schedule method
pass
def change_interval(self, value):
""" :param value: list of strings in format 'Day_of_Week-HH:MM' """
assert not isinstance(value, str)
self.timestamps = []
for timestamp in value:
event = EventTime(timestamp)
self.timestamps.append(event)
def next_run_in(self, utc_now=None):
""" :param utc_now: optional parameter to be used by Unit Tests as a definition of "now"
:return: timedelta instance presenting amount of time before the trigger is triggered next time
or None if the EventClock instance is not running """
if utc_now is None:
utc_now = datetime.utcnow()
if self.is_alive():
smallest_timedelta = timedelta(days=99, hours=0, minutes=0, seconds=0, microseconds=0, milliseconds=0)
for event_time in self.timestamps:
next_trigger = event_time.next_trigger_frequency(utc_now)
if next_trigger - utc_now < smallest_timedelta:
smallest_timedelta = next_trigger - utc_now
return smallest_timedelta
else:
return None
def is_alive(self):
return self.handler.is_alive()
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/system/event_clock.py",
"copies": "1",
"size": "7018",
"license": "bsd-3-clause",
"hash": 89917818426944600,
"line_mean": 37.349726776,
"line_max": 114,
"alpha_frac": 0.6208321459,
"autogenerated": false,
"ratio": 3.809989142236699,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4930821288136699,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from datetime import datetime, timedelta
from threading import Lock
from amqp import AMQPError
from synergy.conf import context
from synergy.mq.flopsy import PublishersPool
from synergy.mx.synergy_mx import MX
from synergy.db.manager import db_manager
from synergy.db.model.synergy_mq_transmission import SynergyMqTransmission
from synergy.db.model.managed_process_entry import ManagedProcessEntry
from synergy.db.dao.freerun_process_dao import FreerunProcessDao
from synergy.system import time_helper
from synergy.system.decorator import with_reconnect, thread_safe
from synergy.system.synergy_process import SynergyProcess
from synergy.scheduler.status_bus_listener import StatusBusListener
from synergy.scheduler.scheduler_constants import *
from synergy.scheduler.state_machine_continuous import StateMachineContinuous
from synergy.scheduler.state_machine_dicrete import StateMachineDiscrete
from synergy.scheduler.state_machine_simple_dicrete import StateMachineSimpleDiscrete
from synergy.scheduler.state_machine_freerun import StateMachineFreerun
from synergy.scheduler.timetable import Timetable
from synergy.scheduler.thread_handler import construct_thread_handler, ThreadHandlerArguments
class Scheduler(SynergyProcess):
""" Scheduler hosts multiple state machines, and logic for triggering jobs """
def __init__(self, process_name):
super(Scheduler, self).__init__(process_name)
self.lock = Lock()
self.logger.info('Starting %s' % self.process_name)
self.publishers = PublishersPool(self.logger)
self.managed_handlers = dict()
self.freerun_handlers = dict()
self.timetable = Timetable(self.logger)
self.state_machines = self._construct_state_machines()
self.freerun_process_dao = FreerunProcessDao(self.logger)
self.mx = None
self.bus_listener = None
self.logger.info('Started %s' % self.process_name)
def __del__(self):
for key, handler in self.managed_handlers.items():
handler.deactivate(update_persistent=False)
self.managed_handlers.clear()
for key, handler in self.freerun_handlers.items():
handler.deactivate(update_persistent=False)
self.freerun_handlers.clear()
self.publishers.close()
super(Scheduler, self).__del__()
def _construct_state_machines(self):
""" :return: dict in format <state_machine_common_name: instance_of_the_state_machine> """
state_machines = dict()
for state_machine in [StateMachineContinuous(self.logger, self.timetable),
StateMachineDiscrete(self.logger, self.timetable),
StateMachineSimpleDiscrete(self.logger, self.timetable),
StateMachineFreerun(self.logger)]:
state_machines[state_machine.name] = state_machine
return state_machines
def _register_process_entry(self, process_entry, call_back):
""" method parses process_entry and creates a timer_handler out of it
timer_handler is enlisted to either :self.freerun_handlers or :self.managed_handlers
timer_handler is started, unless it is marked as STATE_OFF """
handler = construct_thread_handler(self.logger, process_entry, call_back)
if handler.is_managed:
self.managed_handlers[handler.key] = handler
elif handler.is_freerun:
self.freerun_handlers[handler.key] = handler
else:
self.logger.error('Process/Handler type %s is not known to the system. Skipping it.'
% handler.handler_type)
return
if process_entry.is_on:
handler.activate()
self.logger.info('Started scheduler thread for %s:%r.'
% (handler.handler_type, handler.key))
else:
self.logger.info('Handler for %s:%r registered in Scheduler. Idle until activated.'
% (handler.handler_type, handler.key))
# **************** Scheduler Methods ************************
def _load_managed_entries(self):
""" loads scheduler managed entries. no start-up procedures are performed """
for process_name, process_entry in context.process_context.items():
if process_entry.process_type == TYPE_MANAGED:
function = self.fire_managed_worker
elif process_entry.process_type == TYPE_GARBAGE_COLLECTOR:
function = self.fire_garbage_collector
elif process_entry.process_type in [TYPE_FREERUN, TYPE_DAEMON]:
self.logger.info('%s of type %s is found in context, but not managed by Synergy Scheduler. '
'Skipping the process.'
% (process_name, process_entry.process_type.upper()))
continue
else:
self.logger.error('Type %s of process %s is not known to the system. Skipping it.' %
(process_entry.process_type, process_name))
continue
try:
self._register_process_entry(process_entry, function)
except Exception:
self.logger.error('Scheduler Handler %r failed to start. Skipping it.' % (process_entry.key,),
exc_info=True)
def _load_freerun_entries(self):
""" reads scheduler managed entries and starts their timers to trigger events """
freerun_entries = self.freerun_process_dao.get_all()
for freerun_entry in freerun_entries:
try:
self._register_process_entry(freerun_entry, self.fire_freerun_worker)
except Exception:
self.logger.error('Scheduler Handler %r failed to start. Skipping it.' % (freerun_entry.key,),
exc_info=True)
@with_reconnect
def start(self, *_):
""" reads managed process entries and starts timer instances; starts MX thread """
db_manager.synch_db()
self._load_managed_entries()
try:
self._load_freerun_entries()
except LookupError as e:
self.logger.warn('DB Lookup: %s' % str(e))
# Scheduler is initialized and running. Status Bus Listener can be safely started
self.bus_listener = StatusBusListener(self)
self.bus_listener.start()
# All Scheduler components are initialized and running. Management Extension (MX) can be safely started
self.mx = MX(self)
self.mx.start_mx_thread()
@thread_safe
def fire_managed_worker(self, thread_handler_arguments):
"""requests next valid job for given process and manages its state"""
def _fire_worker(process_entry):
assert isinstance(process_entry, ManagedProcessEntry)
job_record = self.timetable.get_next_job_record(process_entry.process_name)
state_machine = self.state_machines[process_entry.state_machine_name]
run_on_active_timeperiod = process_entry.run_on_active_timeperiod
if not run_on_active_timeperiod:
time_qualifier = process_entry.time_qualifier
incremented_timeperiod = time_helper.increment_timeperiod(time_qualifier, job_record.timeperiod)
dt_record_timestamp = time_helper.synergy_to_datetime(time_qualifier, incremented_timeperiod)
dt_record_timestamp += timedelta(minutes=LAG_5_MINUTES)
if datetime.utcnow() <= dt_record_timestamp:
self.logger.info('Job record %s for timeperiod %s will not be triggered until %s.'
% (job_record.db_id,
job_record.timeperiod,
dt_record_timestamp.strftime('%Y-%m-%d %H:%M:%S')))
return None
blocking_type = process_entry.blocking_type
if blocking_type == BLOCKING_DEPENDENCIES:
state_machine.manage_job_with_blocking_dependencies(job_record, run_on_active_timeperiod)
elif blocking_type == BLOCKING_CHILDREN:
state_machine.manage_job_with_blocking_children(job_record, run_on_active_timeperiod)
elif blocking_type == BLOCKING_NORMAL:
state_machine.manage_job(job_record)
else:
raise ValueError('Unknown managed process type %s' % blocking_type)
return job_record
try:
assert isinstance(thread_handler_arguments, ThreadHandlerArguments)
self.logger.info('%r {' % (thread_handler_arguments.key, ))
job_record = _fire_worker(thread_handler_arguments.process_entry)
while job_record and job_record.is_finished:
job_record = _fire_worker(thread_handler_arguments.process_entry)
except (AMQPError, IOError) as e:
self.logger.error('AMQPError: %s' % str(e), exc_info=True)
self.publishers.reset_all(suppress_logging=True)
except Exception as e:
self.logger.error('Exception: %s' % str(e), exc_info=True)
finally:
self.logger.info('}')
@thread_safe
def fire_freerun_worker(self, thread_handler_arguments):
"""fires free-run worker with no dependencies to track"""
try:
assert isinstance(thread_handler_arguments, ThreadHandlerArguments)
self.logger.info('%r {' % (thread_handler_arguments.key, ))
state_machine = self.state_machines[STATE_MACHINE_FREERUN]
state_machine.manage_schedulable(thread_handler_arguments.process_entry)
except Exception as e:
self.logger.error('fire_freerun_worker: %s' % str(e))
finally:
self.logger.info('}')
@thread_safe
def fire_garbage_collector(self, thread_handler_arguments):
"""fires garbage collector to re-trigger invalid unit_of_work"""
try:
assert isinstance(thread_handler_arguments, ThreadHandlerArguments)
self.logger.info('%r {' % (thread_handler_arguments.key, ))
mq_request = SynergyMqTransmission(process_name=thread_handler_arguments.key)
publisher = self.publishers.get(thread_handler_arguments.key)
publisher.publish(mq_request.document)
publisher.release()
self.logger.info('Published trigger for %s' % thread_handler_arguments.key)
self.logger.info('Starting timetable housekeeping...')
self.timetable.build_trees()
self.timetable.validate()
self.logger.info('Timetable housekeeping complete.')
except (AMQPError, IOError) as e:
self.logger.error('AMQPError: %s' % str(e), exc_info=True)
self.publishers.reset_all(suppress_logging=True)
except Exception as e:
self.logger.error('fire_garbage_collector: %s' % str(e))
finally:
self.logger.info('}')
if __name__ == '__main__':
from synergy.scheduler.scheduler_constants import PROCESS_SCHEDULER
source = Scheduler(PROCESS_SCHEDULER)
source.start()
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/scheduler/synergy_scheduler.py",
"copies": "1",
"size": "11280",
"license": "bsd-3-clause",
"hash": -6493068878860936000,
"line_mean": 45.8049792531,
"line_max": 112,
"alpha_frac": 0.6340425532,
"autogenerated": false,
"ratio": 4.168514412416852,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5302556965616851,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from datetime import datetime, timedelta
from threading import Lock
from synergy.conf import context
from synergy.mx.synergy_mx import MX
from synergy.db.manager import db_manager
from synergy.db.model.managed_process_entry import ManagedProcessEntry
from synergy.db.model.freerun_process_entry import FreerunProcessEntry
from synergy.db.dao.freerun_process_dao import FreerunProcessDao
from synergy.system import time_helper
from synergy.system.decorator import with_reconnect, thread_safe
from synergy.system.synergy_process import SynergyProcess
from synergy.scheduler.garbage_collector import GarbageCollector
from synergy.scheduler.uow_status_listener import UowStatusListener
from synergy.scheduler.job_status_listener import JobStatusListener
from synergy.scheduler.scheduler_constants import *
from synergy.scheduler.timetable import Timetable
from synergy.scheduler.thread_handler import ThreadHandlerHeader, ManagedThreadHandler, FreerunThreadHandler
class Scheduler(SynergyProcess):
""" Scheduler hosts:
- timetable: container for job tress and state machines
- GarbageCollector: recycles failed/stalled unit of works
- freerun and managed thread handlers: logic to trigger job execution
- UowStatusListener: MQ Listener receiving UOW statuses from the workers
- JobStatusListener: asynchronous intra-scheduler notification bus
- MX: HTTP server with management UI """
def __init__(self, process_name):
super(Scheduler, self).__init__(process_name)
self.lock = Lock()
self.logger.info('Initializing {0}...'.format(self.process_name))
self.managed_handlers = dict()
self.freerun_handlers = dict()
self.timetable = Timetable(self.logger)
self.freerun_process_dao = FreerunProcessDao(self.logger)
self.gc = GarbageCollector(self)
self.uow_listener = UowStatusListener(self)
self.job_listener = JobStatusListener(self)
self.mx = MX(self)
self.logger.info('Initialization complete.')
def __del__(self):
self.mx.stop()
self.uow_listener.stop()
self.job_listener.stop()
self.gc.stop()
for key, handler in self.managed_handlers.items():
handler.deactivate(update_persistent=False)
self.managed_handlers.clear()
for key, handler in self.freerun_handlers.items():
handler.deactivate(update_persistent=False)
self.freerun_handlers.clear()
super(Scheduler, self).__del__()
def _register_process_entry(self, process_entry, call_back):
""" method parses process_entry and creates a timer_handler out of it
timer_handler is enlisted to either :self.freerun_handlers or :self.managed_handlers
timer_handler is started, unless it is marked as STATE_OFF """
trigger_frequency = process_entry.trigger_frequency
if isinstance(process_entry, ManagedProcessEntry):
handler_key = process_entry.process_name
handler = ManagedThreadHandler(self.logger, handler_key, trigger_frequency, call_back, process_entry)
self.managed_handlers[handler.key] = handler
elif isinstance(process_entry, FreerunProcessEntry):
handler_key = (process_entry.process_name, process_entry.entry_name)
handler = FreerunThreadHandler(self.logger, handler_key, trigger_frequency, call_back, process_entry)
self.freerun_handlers[handler.key] = handler
else:
raise ValueError('ProcessEntry type {0} is not known to the system. Skipping it.'
.format(process_entry.__class__.__name__))
if process_entry.is_on:
handler.activate()
self.logger.info('Started {0} for {1}.'
.format(handler.__class__.__name__, handler.key))
else:
self.logger.info('Registered {0} for {1}. Idle until activated.'
.format(handler.__class__.__name__, handler.key))
def _load_managed_entries(self):
""" loads scheduler managed entries. no start-up procedures are performed """
for process_name, process_entry in context.process_context.items():
if isinstance(process_entry, ManagedProcessEntry):
_function = self.fire_managed_worker
else:
self.logger.warning('Skipping non-managed context entry {0} of type {1}.'
.format(process_name, process_entry.__class__.__name__))
continue
try:
self._register_process_entry(process_entry, _function)
except Exception:
self.logger.error('Managed Thread Handler {0} failed to start. Skipping it.'
.format(process_entry.key), exc_info=True)
def _load_freerun_entries(self):
""" reads scheduler managed entries and starts their timers to trigger events """
freerun_entries = self.freerun_process_dao.get_all()
for freerun_entry in freerun_entries:
try:
self._register_process_entry(freerun_entry, self.fire_freerun_worker)
except Exception:
self.logger.error('Freerun Thread Handler {0} failed to start. Skipping it.'
.format(freerun_entry.key), exc_info=True)
@with_reconnect
def start(self, *_):
""" reads managed process entries and starts timer instances; starts dependant threads """
self.logger.info('Starting Scheduler...')
db_manager.synch_db()
self._load_managed_entries()
try:
self._load_freerun_entries()
except LookupError as e:
self.logger.warning(f'DB Lookup: {e}')
# Scheduler is initialized and running. GarbageCollector can be safely started
self.gc.start()
# Job/UOW Status Listeners can be safely started
self.uow_listener.start()
self.job_listener.start()
self.logger.info('Startup Sequence Completed. Starting MX.')
# Management Extension (MX) should be the last to start
self.mx.start()
def state_machine_for(self, process_name):
""" :return: state machine for the given process name """
process_entry = self.managed_handlers[process_name].process_entry
return self.timetable.state_machines[process_entry.state_machine_name]
@thread_safe
def fire_managed_worker(self, thread_handler_header):
""" requests next valid job for given process and manages its state """
def _fire_worker(process_entry, prev_job_record):
assert isinstance(process_entry, ManagedProcessEntry)
job_record = self.timetable.get_next_job_record(process_entry.process_name)
state_machine = self.timetable.state_machines[process_entry.state_machine_name]
if job_record == prev_job_record:
# avoid the loop
return None
if not state_machine.run_on_active_timeperiod:
time_qualifier = process_entry.time_qualifier
incremented_timeperiod = time_helper.increment_timeperiod(time_qualifier, job_record.timeperiod)
dt_record_timestamp = time_helper.synergy_to_datetime(time_qualifier, incremented_timeperiod)
dt_record_timestamp += timedelta(minutes=LAG_5_MINUTES)
if datetime.utcnow() <= dt_record_timestamp:
self.logger.info('Job {0} for {1}@{2} will not be triggered until {3}.'
.format(job_record.db_id,
job_record.process_name,
job_record.timeperiod,
dt_record_timestamp.strftime('%Y-%m-%d %H:%M:%S')))
return None
blocking_type = process_entry.blocking_type
if blocking_type == BLOCKING_DEPENDENCIES:
state_machine.manage_job_with_blocking_dependencies(job_record)
elif blocking_type == BLOCKING_CHILDREN:
state_machine.manage_job_with_blocking_children(job_record)
elif blocking_type == BLOCKING_NORMAL:
state_machine.manage_job(job_record)
else:
raise ValueError(f'Unknown managed process type {blocking_type}')
return job_record
try:
assert isinstance(thread_handler_header, ThreadHandlerHeader)
self.logger.info(f'{thread_handler_header.key} {{')
job_record = _fire_worker(thread_handler_header.process_entry, None)
while job_record and job_record.is_finished:
# if applicable, process next timeperiod
job_record = _fire_worker(thread_handler_header.process_entry, job_record)
except Exception as e:
self.logger.error(f'Exception: {e}', exc_info=True)
finally:
self.logger.info('}')
@thread_safe
def fire_freerun_worker(self, thread_handler_header):
""" fires free-run worker with no dependencies to track """
try:
assert isinstance(thread_handler_header, ThreadHandlerHeader)
self.logger.info(f'{thread_handler_header.key} {{')
state_machine = self.timetable.state_machines[STATE_MACHINE_FREERUN]
state_machine.manage_schedulable(thread_handler_header.process_entry)
except Exception as e:
self.logger.error(f'fire_freerun_worker: {e}')
finally:
self.logger.info('}')
if __name__ == '__main__':
from synergy.scheduler.scheduler_constants import PROCESS_SCHEDULER
source = Scheduler(PROCESS_SCHEDULER)
source.start()
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/scheduler/synergy_scheduler.py",
"copies": "1",
"size": "9886",
"license": "bsd-3-clause",
"hash": -5053138936638035000,
"line_mean": 45.1962616822,
"line_max": 113,
"alpha_frac": 0.6369613595,
"autogenerated": false,
"ratio": 4.185436071126165,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5322397430626165,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from datetime import datetime, timedelta
from synergy.scheduler.process_hierarchy import ProcessHierarchy
from synergy.scheduler.tree_node import TreeNode
from synergy.conf import settings
from synergy.system import time_helper
from synergy.system.time_helper import cast_to_time_qualifier
MAX_NUMBER_OF_RETRIES = 3 # number of times a node is re-run before it is considered STATE_SKIPPED
LIFE_SUPPORT_HOURS = 48 # number of hours that node is retried infinite number of times
class AbstractTree(object):
""" MixIn to handle subscription for various tree events """
def __init__(self):
self.dependent_on = []
self.reprocess_callbacks = []
self.skip_callbacks = []
self.create_job_record_callbacks = []
def register_dependent_on(self, tree):
"""registering tree that we are dependent on.
example: horizontal client should not be finalized until we have finalized vertical site for the same period"""
self.dependent_on.append(tree)
def unregister_dependent_on(self, tree):
"""unregistering tree that we are dependent on"""
if tree in self.dependent_on:
self.dependent_on.remove(tree)
def register_reprocess_callback(self, function):
"""method that allows outside functionality to listen for _reprocess_requests_"""
self.reprocess_callbacks.append(function)
def unregister_reprocess_callback(self, function):
"""method that allows outside functionality to abandon _reprocess_requests_ listening"""
if function in self.reprocess_callbacks:
self.reprocess_callbacks.remove(function)
def register_skip_callback(self, function):
"""method that allows outside functionality to listen for _skip_requests_"""
self.skip_callbacks.append(function)
def unregister_skip_callback(self, function):
"""method that allows outside functionality to abandon _skip_requests_ listening"""
if function in self.skip_callbacks:
self.skip_callbacks.remove(function)
def register_create_callbacks(self, function):
"""method subscribes to _create_embryo_job_record_ requests"""
self.create_job_record_callbacks.append(function)
def unregister_create_callback(self, function):
"""method un-subscribes from _create_embryo_job_record_ requests"""
if function in self.create_job_record_callbacks:
self.create_job_record_callbacks.remove(function)
class MultiLevelTree(AbstractTree):
""" Multi-level Tree, suited to host both single Process Entry
or multiple hierarchy-organized Process Entries """
def __init__(self, process_names, tree_name=None, mx_name=None, mx_page=None):
"""
:param tree_name <optional>: full name of the tree. used as an identifier
:param mx_name <optional>: is used by MX only as visual vertical name
:param mx_page <optional>: is used by MX only as anchor to specific page
"""
super(MultiLevelTree, self).__init__()
self.process_hierarchy = ProcessHierarchy(*process_names)
self.build_timeperiod = None
self.validation_timestamp = None
self.tree_name = tree_name
self.mx_name = mx_name
self.mx_page = mx_page
self.root = TreeNode(self, None, None, None, None)
def __contains__(self, value):
"""
:param value: process name
:return: True if a process_entry with the given name is registered in this hierarchy; False otherwise
"""
return value in self.process_hierarchy
def _get_next_parent_node(self, parent):
""" Used by _get_next_child_node, this method is called to find next possible parent.
For example if timeperiod 2011010200 has all children processed, but is not yet processed itself
then it makes sense to look in 2011010300 for hourly nodes"""
parent_of_parent = parent.parent
if parent_of_parent is None:
# here, we work at yearly/linear level
return None
sorted_keys = sorted(parent_of_parent.children.keys())
index = sorted_keys.index(parent.timeperiod)
if index + 1 >= len(sorted_keys):
return None
else:
return parent_of_parent.children[sorted_keys[index + 1]]
def _get_next_child_node(self, parent):
"""
Iterates among children of the given parent and looks for a suitable node to process
In case given parent has no suitable nodes, a younger parent will be found
and the logic will be repeated for him
"""
sorted_keys = sorted(parent.children.keys())
for key in sorted_keys:
node = parent.children[key]
if node.job_record is None:
node.request_embryo_job_record()
return node
elif self._skip_the_node(node):
continue
elif node.job_record.is_active:
return node
# special case, when all children of the parent node are not suitable for processing
new_parent = self._get_next_parent_node(parent)
if new_parent is not None:
# in case all nodes are processed or blocked - look for next valid parent node
return self._get_next_child_node(new_parent)
else:
# if all valid parents are exploited - return current node
process_name = parent.children[sorted_keys[0]].process_name
time_qualifier = parent.children[sorted_keys[0]].time_qualifier
actual_timeperiod = time_helper.actual_timeperiod(time_qualifier)
return self.get_node(process_name, actual_timeperiod)
def _get_node(self, time_qualifier, timeperiod):
"""
Method retrieves a tree node identified by time_qualifier and timeperiod
In case intermittent or target nodes does not exist - method will request their creation
:param time_qualifier: identifies the tree level
:param timeperiod: identifies the target node timeperiod
:return: requested node; type <AbstractNode>
"""
hierarchy_entry = self.process_hierarchy.get_by_qualifier(time_qualifier)
if hierarchy_entry.parent:
parent_time_qualifier = hierarchy_entry.parent.process_entry.time_qualifier
parent_timeperiod = hierarchy_entry.parent.cast_timeperiod(timeperiod)
parent = self._get_node(parent_time_qualifier, parent_timeperiod)
else:
parent = self.root
node = parent.children.get(timeperiod)
if node is None:
node = TreeNode(self, parent, hierarchy_entry.process_entry.process_name, timeperiod, None)
parent.children[timeperiod] = node
return node
def _get_next_node(self, time_qualifier):
"""
Method goes to the top of the tree and traverses from
there in search of the next suitable node for processing
to the level defined by the given time_qualifier
:param time_qualifier: defines target level of the tree
:return: located node; type <TreeNode>
"""
hierarchy_entry = self.process_hierarchy.get_by_qualifier(time_qualifier)
if hierarchy_entry.parent:
parent_time_qualifier = hierarchy_entry.parent.process_entry.time_qualifier
parent = self._get_next_node(parent_time_qualifier)
else:
parent = self.root
return self._get_next_child_node(parent)
def _skip_the_node(self, node):
"""Method is used during _get_next_node calculations.
:return True: in case the node shall be _skipped_"""
# case 1: node processing is complete
if node.job_record.is_finished:
return True
# case 2: this is a bottom-level leaf node. retry this time_period for INFINITE_RETRY_HOURS
if node.process_name == self.process_hierarchy.bottom_process.process_name:
if len(node.children) == 0:
# no children - this is a leaf
creation_time = time_helper.synergy_to_datetime(node.time_qualifier, node.timeperiod)
if datetime.utcnow() - creation_time < timedelta(hours=LIFE_SUPPORT_HOURS):
return False
else:
return node.job_record.number_of_failures > MAX_NUMBER_OF_RETRIES
# case 3: here we process process_daily, process_monthly and process_yearly that have children
# iterate thru children and check if all of them are in STATE_SKIPPED (i.e. no data for parent to process)
# if any is still in processing (i.e. has produced some data) - then we can not skip parent of the child node
# case 3': consider parent as worth processing (i.e. do not skip) if child's job_record is None
all_children_spoiled = True
for key in node.children.keys():
child = node.children[key]
if child.job_record is None or \
(child.job_record.number_of_failures <= MAX_NUMBER_OF_RETRIES
and not child.job_record.is_skipped):
all_children_spoiled = False
break
return all_children_spoiled
def build_tree(self, rebuild=False):
"""method builds tree by iterating from the synergy_start_timeperiod to current time
and inserting corresponding nodes"""
time_qualifier = self.process_hierarchy.bottom_process.time_qualifier
process_name = self.process_hierarchy.bottom_process.process_name
if rebuild or self.build_timeperiod is None:
timeperiod = settings.settings['synergy_start_timeperiod']
else:
timeperiod = self.build_timeperiod
timeperiod = cast_to_time_qualifier(time_qualifier, timeperiod)
actual_timeperiod = time_helper.actual_timeperiod(time_qualifier)
while actual_timeperiod >= timeperiod:
self.get_node(process_name, timeperiod)
timeperiod = time_helper.increment_timeperiod(time_qualifier, timeperiod)
self.build_timeperiod = actual_timeperiod
def get_next_node(self, process_name):
"""
Find a next node to process by a process with process_name
"""
if process_name not in self.process_hierarchy:
raise ValueError('unable to compute the next_node due to unknown process: %s' % process_name)
time_qualifier = self.process_hierarchy[process_name].process_entry.time_qualifier
return self._get_next_node(time_qualifier)
def update_node(self, job_record):
"""
Updates job record property for a tree node associated with the given Job
"""
if job_record.process_name not in self.process_hierarchy:
raise ValueError('unable to update the node due to unknown process: %s' % job_record.process_name)
time_qualifier = self.process_hierarchy[job_record.process_name].process_entry.time_qualifier
node = self._get_node(time_qualifier, job_record.timeperiod)
node.job_record = job_record
def get_node(self, process_name, timeperiod):
"""
Method retrieves a tree node identified by the time_qualifier and the timeperiod
"""
if process_name not in self.process_hierarchy:
raise ValueError('unable to retrieve the node due to unknown process: %s' % process_name)
time_qualifier = self.process_hierarchy[process_name].process_entry.time_qualifier
return self._get_node(time_qualifier, timeperiod)
def validate(self):
"""method starts validation of the tree.
@see TreeNode.validate"""
for timeperiod in self.root.children:
child = self.root.children[timeperiod]
child.validate()
self.validation_timestamp = datetime.utcnow()
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/scheduler/tree.py",
"copies": "1",
"size": "11943",
"license": "bsd-3-clause",
"hash": -3035589669388636700,
"line_mean": 44.4106463878,
"line_max": 119,
"alpha_frac": 0.6562002847,
"autogenerated": false,
"ratio": 4.266881028938907,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5423081313638907,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from datetime import datetime, timedelta
from synergy.system.repeat_timer import RepeatTimer
TIME_OF_DAY_FORMAT = "%H:%M"
EVERY_DAY = '*' # marks every day as suitable to trigger the event
TRIGGER_INTERVAL = 30 # half a minute
class EventTime(object):
def __init__(self, trigger_frequency):
self.trigger_frequency = trigger_frequency
tokens = self.trigger_frequency.split('-')
if len(tokens) > 1:
# Day of Week is provided
self.day_of_week = tokens[0]
self.time_of_day = datetime.strptime(tokens[1], TIME_OF_DAY_FORMAT)
else:
# Day of Week is not provided. Assume every day of the week
self.day_of_week = EVERY_DAY
self.time_of_day = datetime.strptime(tokens[0], TIME_OF_DAY_FORMAT)
def __str__(self):
return 'EventTime: day_of_week={0} time_of_day={1}'\
.format(self.day_of_week, self.time_of_day.strftime(TIME_OF_DAY_FORMAT))
def __repr__(self):
return '{0}-{1}'.format(self.day_of_week, self.time_of_day.strftime(TIME_OF_DAY_FORMAT))
def __eq__(self, other):
if not isinstance(other, EventTime):
return False
return self.time_of_day == other.time_of_day \
and (self.day_of_week == other.day_of_week
or self.day_of_week == EVERY_DAY
or other.day_of_week == EVERY_DAY)
def __hash__(self):
return hash((self.day_of_week, self.time_of_day))
def next_trigger_frequency(self, utc_now=None):
""" :param utc_now: optional parameter to be used by Unit Tests as a definition of "now"
:return: datetime instance presenting next trigger time of the event """
if utc_now is None:
utc_now = datetime.utcnow()
def wind_days(start_date):
while True:
if self.day_of_week == EVERY_DAY or start_date.weekday() == int(self.day_of_week):
return start_date.replace(hour=self.time_of_day.hour, minute=self.time_of_day.minute)
else:
start_date += timedelta(days=1)
if utc_now.time() > self.time_of_day.time():
return wind_days(utc_now + timedelta(days=1))
else:
return wind_days(utc_now)
@classmethod
def utc_now(cls):
utc_now = datetime.utcnow()
return EventTime('{0}-{1}'.format(utc_now.weekday(), utc_now.strftime(TIME_OF_DAY_FORMAT)))
class EventClock(object):
""" This class triggers on predefined time set in format 'day_of_week-HH:MM' or 'HH:MM'
Maintaining API compatibility with the RepeatTimer class """
def __init__(self, interval, call_back, args=None, kwargs=None):
if not kwargs: kwargs = {}
if not args: args = []
self.timestamps = []
self.change_interval(interval)
self.args = args
self.kwargs = kwargs
self.call_back = call_back
self.handler = RepeatTimer(TRIGGER_INTERVAL, self.manage_schedule)
self.activation_dt = None
def _trigger_now(self):
if self.activation_dt is not None and datetime.utcnow() - self.activation_dt < timedelta(minutes=1):
# the event was already triggered within 1 minute. no need to trigger it again
return
self.call_back(*self.args, **self.kwargs)
self.activation_dt = datetime.utcnow()
def manage_schedule(self, *_):
current_time = EventTime.utc_now()
if current_time in self.timestamps:
self._trigger_now()
def start(self):
self.handler.start()
def cancel(self):
self.handler.cancel()
def trigger(self):
current_time = EventTime.utc_now()
if current_time not in self.timestamps:
self._trigger_now()
else:
# leave it to the regular flow to trigger the call_back via manage_schedule method
pass
def change_interval(self, value):
""" :param value: list of strings in format 'Day_of_Week-HH:MM' """
assert not isinstance(value, str)
self.timestamps = []
for timestamp in value:
event = EventTime(timestamp)
self.timestamps.append(event)
def next_run_in(self, utc_now=None):
""" :param utc_now: optional parameter to be used by Unit Tests as a definition of "now"
:return: timedelta instance presenting amount of time before the trigger is triggered next time
or None if the EventClock instance is not running """
if utc_now is None:
utc_now = datetime.utcnow()
if self.is_alive():
smallest_timedelta = timedelta(days=99, hours=0, minutes=0, seconds=0, microseconds=0, milliseconds=0)
for event_time in self.timestamps:
next_trigger = event_time.next_trigger_frequency(utc_now)
if next_trigger - utc_now < smallest_timedelta:
smallest_timedelta = next_trigger - utc_now
return smallest_timedelta
else:
return None
def is_alive(self):
return self.handler.is_alive()
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/system/event_clock.py",
"copies": "1",
"size": "5191",
"license": "bsd-3-clause",
"hash": -5935195343356679000,
"line_mean": 36.0785714286,
"line_max": 114,
"alpha_frac": 0.6018108264,
"autogenerated": false,
"ratio": 3.839497041420118,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.993402448360205,
"avg_score": 0.0014566768436136293,
"num_lines": 140
} |
__author__ = 'Bohdan Mushkevych'
from db.model.client_statistics import ClientStatistics
from synergy.system.utils import copy_and_sum_families
from synergy.system import time_helper
from workers.client_daily_aggregator import ClientDailyAggregator
class ClientMonthlyAggregator(ClientDailyAggregator):
""" class works as an aggregator from the client_daily collection into the client_monthly collection """
def __init__(self, process_name):
super(ClientMonthlyAggregator, self).__init__(process_name)
def _init_sink_key(self, *args):
return args[0], time_helper.day_to_month(args[1])
def _init_source_object(self, document):
return ClientStatistics.from_json(document)
def _init_sink_object(self, composite_key):
obj = ClientStatistics()
obj.key = (composite_key[0], composite_key[1])
return obj
def _process_single_document(self, document):
source_obj = self._init_source_object(document)
composite_key = self._init_sink_key(source_obj.key[0], source_obj.key[1])
target_obj = self._get_aggregated_object(composite_key)
target_obj.number_of_visits += source_obj.number_of_visits
target_obj.number_of_pageviews += source_obj.number_of_pageviews
target_obj.total_duration += source_obj.total_duration
copy_and_sum_families(source_obj.os, target_obj.os)
copy_and_sum_families(source_obj.browsers, target_obj.browsers)
copy_and_sum_families(source_obj.screen_res, target_obj.screen_res)
copy_and_sum_families(source_obj.languages, target_obj.languages)
copy_and_sum_families(source_obj.countries, target_obj.countries)
if __name__ == '__main__':
from constants import PROCESS_CLIENT_MONTHLY
source = ClientMonthlyAggregator(PROCESS_CLIENT_MONTHLY)
source.start()
| {
"repo_name": "eggsandbeer/scheduler",
"path": "workers/client_monthly_aggregator.py",
"copies": "1",
"size": "1844",
"license": "bsd-3-clause",
"hash": 6276059106931069000,
"line_mean": 39.9777777778,
"line_max": 108,
"alpha_frac": 0.7071583514,
"autogenerated": false,
"ratio": 3.5667311411992264,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47738894925992265,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from db.model.raw_data import DOMAIN_NAME
from db.model.site_statistics import SiteStatistics
from db.model.client_statistics import ClientStatistics
from synergy.system.utils import copy_and_sum_families
from synergy.conf import settings
from synergy.system import time_helper
from synergy.system.restful_client import RestClient
from workers.abstract_horizontal_worker import AbstractHorizontalWorker
class ClientDailyAggregator(AbstractHorizontalWorker):
""" class works as an aggregator from the site_hourly into the site_daily """
def __init__(self, process_name):
super(ClientDailyAggregator, self).__init__(process_name)
self.rest_client = RestClient(self.logger)
def _get_tunnel_port(self):
return settings.settings['tunnel_site_port']
def _init_sink_key(self, *args):
return args[0], time_helper.hour_to_day(args[1])
def _init_source_object(self, document):
return SiteStatistics.from_json(document)
def _init_sink_object(self, composite_key):
obj = ClientStatistics()
obj.key = (composite_key[0], composite_key[1])
return obj
def _process_bulk_array(self, array_of_documents, timeperiod):
domain_list = [x[DOMAIN_NAME] for x in array_of_documents]
client_mapping = self.rest_client.get_client_mapping(timeperiod, domain_list)
for document in array_of_documents:
source_obj = self._init_source_object(document)
try:
client_id = client_mapping[source_obj.key[0]]
composite_key = self._init_sink_key(client_id, timeperiod)
target_obj = self._get_aggregated_object(composite_key)
target_obj.number_of_visits += source_obj.number_of_visits
target_obj.number_of_pageviews += source_obj.number_of_pageviews
target_obj.total_duration += source_obj.total_duration
copy_and_sum_families(source_obj.os, target_obj.os)
copy_and_sum_families(source_obj.browsers, target_obj.browsers)
copy_and_sum_families(source_obj.screen_res, target_obj.screen_res)
copy_and_sum_families(source_obj.languages, target_obj.languages)
copy_and_sum_families(source_obj.countries, target_obj.countries)
except KeyError:
self.logger.error('domain name %s has no valid owner client_id' % source_obj.key[0])
if __name__ == '__main__':
from constants import PROCESS_CLIENT_DAILY
source = ClientDailyAggregator(PROCESS_CLIENT_DAILY)
source.start()
| {
"repo_name": "eggsandbeer/scheduler",
"path": "workers/client_daily_aggregator.py",
"copies": "1",
"size": "2625",
"license": "bsd-3-clause",
"hash": -706293630090908700,
"line_mean": 41.3387096774,
"line_max": 100,
"alpha_frac": 0.675047619,
"autogenerated": false,
"ratio": 3.723404255319149,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48984518743191485,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from db.model.single_session import SingleSession, DOMAIN_NAME, TIMEPERIOD
from db.model.site_statistics import SiteStatistics
from synergy.system.utils import increment_family_property
from workers.abstract_mongo_worker import AbstractMongoWorker
class SiteHourlyAggregator(AbstractMongoWorker):
""" illustration suite worker:
- aggregates single_session collection records and produces/updates records in the site_hourly
principle of work is following: we extract all of the sessions for the hour
(for example: from 13:00:00 till 13:59:59) and aggregate them into one record of site_hourly collection
"""
def __init__(self, process_name):
super(SiteHourlyAggregator, self).__init__(process_name)
def _init_sink_key(self, *args):
return args[0], args[1]
def _mongo_sink_key(self, *args):
return {DOMAIN_NAME: args[0], TIMEPERIOD: args[1]}
def _init_source_object(self, document):
return SingleSession.from_json(document)
def _init_sink_object(self, composite_key):
obj = SiteStatistics()
obj.key = composite_key
return obj
def _process_single_document(self, document):
source_obj = self._init_source_object(document)
composite_key = self._init_sink_key(source_obj.domain_name, source_obj.timeperiod)
target_obj = self._get_aggregated_object(composite_key)
target_obj.stat.number_of_visits += 1
target_obj.stat.number_of_pageviews += source_obj.browsing_history.number_of_pageviews
target_obj.stat.total_duration += source_obj.browsing_history.total_duration
increment_family_property(source_obj.user_profile.os, target_obj.stat.os)
increment_family_property(source_obj.user_profile.browser, target_obj.stat.browser)
increment_family_property(source_obj.user_profile.screen_resolution, target_obj.stat.screen_resolution)
increment_family_property(source_obj.user_profile.language, target_obj.stat.language)
increment_family_property(source_obj.user_profile.country, target_obj.stat.country)
if __name__ == '__main__':
from constants import PROCESS_SITE_HOURLY
source = SiteHourlyAggregator(PROCESS_SITE_HOURLY)
source.start()
| {
"repo_name": "mushkevych/scheduler",
"path": "workers/site_hourly_aggregator.py",
"copies": "1",
"size": "2284",
"license": "bsd-3-clause",
"hash": -2254821314411407000,
"line_mean": 42.9230769231,
"line_max": 111,
"alpha_frac": 0.715849387,
"autogenerated": false,
"ratio": 3.7198697068403908,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49357190938403905,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from db.model.single_session import SingleSession
from db.model.site_statistics import SiteStatistics
from synergy.system.utils import increment_family_property
from synergy.system import time_helper
from synergy.conf import settings
from workers.abstract_vertical_worker import AbstractVerticalWorker
class SiteHourlyAggregator(AbstractVerticalWorker):
"""
class works as an aggregator from the single_session collection and produces/updates records in the site_hourly
principle of work is following: we extract all of the sessions for the hour
(for example: from 13:00:00 till 13:59:59) and aggregate them into one record of site_hourly collection
"""
def __init__(self, process_name):
super(SiteHourlyAggregator, self).__init__(process_name)
def _get_tunnel_port(self):
return settings.settings['tunnel_site_port']
def _init_sink_key(self, *args):
return args[0], time_helper.session_to_hour(args[1])
def _init_source_object(self, document):
return SingleSession.from_json(document)
def _init_sink_object(self, composite_key):
obj = SiteStatistics()
obj.key = composite_key
return obj
def _process_single_document(self, document):
source_obj = self._init_source_object(document)
composite_key = self._init_sink_key(source_obj.domain_name, source_obj.timeperiod)
target_obj = self._get_aggregated_object(composite_key)
target_obj.stat.number_of_visits += 1
target_obj.stat.number_of_pageviews += source_obj.browsing_history.number_of_pageviews
target_obj.stat.total_duration += source_obj.browsing_history.total_duration
increment_family_property(source_obj.user_profile.os, target_obj.stat.os)
increment_family_property(source_obj.user_profile.browser, target_obj.stat.browsers)
increment_family_property(source_obj.user_profile.screen_res, target_obj.stat.screen_res)
increment_family_property(source_obj.user_profile.language, target_obj.stat.languages)
increment_family_property(source_obj.user_profile.country, target_obj.stat.countries)
if __name__ == '__main__':
from constants import PROCESS_SITE_HOURLY
source = SiteHourlyAggregator(PROCESS_SITE_HOURLY)
source.start()
| {
"repo_name": "eggsandbeer/scheduler",
"path": "workers/site_hourly_aggregator.py",
"copies": "1",
"size": "2326",
"license": "bsd-3-clause",
"hash": -2648299537357980000,
"line_mean": 42.0740740741,
"line_max": 115,
"alpha_frac": 0.7252794497,
"autogenerated": false,
"ratio": 3.7156549520766773,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9931168060388438,
"avg_score": 0.001953268277648129,
"num_lines": 54
} |
__author__ = 'Bohdan Mushkevych'
from db.model.site_statistics import SiteStatistics, DOMAIN_NAME, TIMEPERIOD
from synergy.system.utils import copy_and_sum_families
from synergy.system import time_helper
from workers.abstract_mongo_worker import AbstractMongoWorker
class SiteDailyAggregator(AbstractMongoWorker):
""" illustration suite worker:
- an aggregator from the site_hourly into the site_daily """
def __init__(self, process_name):
super(SiteDailyAggregator, self).__init__(process_name)
def _init_sink_key(self, *args):
return args[0], time_helper.hour_to_day(args[1])
def _mongo_sink_key(self, *args):
return {DOMAIN_NAME: args[0], TIMEPERIOD: args[1]}
def _init_source_object(self, document):
return SiteStatistics.from_json(document)
def _init_sink_object(self, composite_key):
obj = SiteStatistics()
obj.key = composite_key
return obj
def _process_single_document(self, document):
source_obj = self._init_source_object(document)
composite_key = self._init_sink_key(source_obj.domain_name, source_obj.timeperiod)
target_obj = self._get_aggregated_object(composite_key)
target_obj.stat.number_of_visits += source_obj.stat.number_of_visits
target_obj.stat.number_of_pageviews += source_obj.stat.number_of_pageviews
target_obj.stat.total_duration += source_obj.stat.total_duration
copy_and_sum_families(source_obj.stat.os, target_obj.stat.os)
copy_and_sum_families(source_obj.stat.browser, target_obj.stat.browser)
copy_and_sum_families(source_obj.stat.screen_resolution, target_obj.stat.screen_resolution)
copy_and_sum_families(source_obj.stat.language, target_obj.stat.language)
copy_and_sum_families(source_obj.stat.country, target_obj.stat.country)
if __name__ == '__main__':
from constants import PROCESS_SITE_DAILY
source = SiteDailyAggregator(PROCESS_SITE_DAILY)
source.start()
| {
"repo_name": "mushkevych/scheduler",
"path": "workers/site_daily_aggregator.py",
"copies": "1",
"size": "1999",
"license": "bsd-3-clause",
"hash": 7854702936228471000,
"line_mean": 39.7959183673,
"line_max": 99,
"alpha_frac": 0.7013506753,
"autogenerated": false,
"ratio": 3.452504317789292,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4653854993089292,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from db.model.site_statistics import SiteStatistics
from synergy.system.utils import copy_and_sum_families
from synergy.conf import settings
from synergy.system import time_helper
from workers.abstract_vertical_worker import AbstractVerticalWorker
class SiteDailyAggregator(AbstractVerticalWorker):
""" class works as an aggregator from the site_hourly into the site_daily """
def __init__(self, process_name):
super(SiteDailyAggregator, self).__init__(process_name)
def _get_tunnel_port(self):
return settings.settings['tunnel_site_port']
def _init_sink_key(self, *args):
return args[0], time_helper.hour_to_day(args[1])
def _init_source_object(self, document):
return SiteStatistics.from_json(document)
def _init_sink_object(self, composite_key):
obj = SiteStatistics()
obj.key = composite_key
return obj
def _process_single_document(self, document):
source_obj = self._init_source_object(document)
composite_key = self._init_sink_key(source_obj.domain_name, source_obj.timeperiod)
target_obj = self._get_aggregated_object(composite_key)
target_obj.stat.number_of_visits += source_obj.stat.number_of_visits
target_obj.stat.number_of_pageviews += source_obj.stat.number_of_pageviews
target_obj.stat.total_duration += source_obj.stat.total_duration
copy_and_sum_families(source_obj.stat.os, target_obj.stat.os)
copy_and_sum_families(source_obj.stat.browsers, target_obj.stat.browsers)
copy_and_sum_families(source_obj.stat.screen_res, target_obj.stat.screen_res)
copy_and_sum_families(source_obj.stat.languages, target_obj.stat.languages)
copy_and_sum_families(source_obj.stat.countries, target_obj.stat.countries)
if __name__ == '__main__':
from constants import PROCESS_SITE_DAILY
source = SiteDailyAggregator(PROCESS_SITE_DAILY)
source.start()
| {
"repo_name": "eggsandbeer/scheduler",
"path": "workers/site_daily_aggregator.py",
"copies": "1",
"size": "1977",
"license": "bsd-3-clause",
"hash": 6794603272468982000,
"line_mean": 39.3469387755,
"line_max": 90,
"alpha_frac": 0.708143652,
"autogenerated": false,
"ratio": 3.4806338028169015,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46887774548169014,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from flow.core.abstract_action import AbstractAction
class MkdirAction(AbstractAction):
""" Filesystem action: make the target directory and all folders along the path if needed """
def __init__(self, uri_path, bucket_name=None):
super(MkdirAction, self).__init__('mkdir')
self.uri_path = uri_path
self.bucket_name = bucket_name
def run(self, execution_cluster):
filesystem = execution_cluster.filesystem
filesystem.mkdir(uri_path=self.uri_path, bucket_name=self.bucket_name)
class RmdirAction(AbstractAction):
""" Filesystem action: removes the target directory and all nested files and folders """
def __init__(self, uri_path, bucket_name=None):
super(RmdirAction, self).__init__('rmdir')
self.uri_path = uri_path
self.bucket_name = bucket_name
def run(self, execution_cluster):
filesystem = execution_cluster.filesystem
filesystem.rmdir(uri_path=self.uri_path, bucket_name=self.bucket_name)
class RmAction(AbstractAction):
""" Filesystem action: removes a single file """
def __init__(self, uri_path, bucket_name=None):
super(RmAction, self).__init__('rm')
self.uri_path = uri_path
self.bucket_name = bucket_name
def run(self, execution_cluster):
filesystem = execution_cluster.filesystem
filesystem.rm(uri_path=self.uri_path, bucket_name=self.bucket_name)
class CpAction(AbstractAction):
""" Filesystem action: copies a single file/folder from location SOURCE to TARGET """
def __init__(self, uri_source, uri_target, bucket_name_source=None, bucket_name_target=None):
super(CpAction, self).__init__('cp')
self.uri_source = uri_source
self.bucket_name_source = bucket_name_source
self.uri_target = uri_target
self.bucket_name_target = bucket_name_target
def run(self, execution_cluster):
filesystem = execution_cluster.filesystem
filesystem.cp(uri_source=self.uri_source, uri_target=self.uri_target,
bucket_name_source=self.bucket_name_source, bucket_name_target=self.bucket_name_target)
class MvAction(AbstractAction):
""" Filesystem action: moves a single file/folder from location SOURCE to TARGET """
def __init__(self, uri_source, uri_target, bucket_name_source=None, bucket_name_target=None):
super(MvAction, self).__init__('mv')
self.uri_source = uri_source
self.bucket_name_source = bucket_name_source
self.uri_target = uri_target
self.bucket_name_target = bucket_name_target
def run(self, execution_cluster):
filesystem = execution_cluster.filesystem
filesystem.mv(uri_source=self.uri_source, uri_target=self.uri_target,
bucket_name_source=self.bucket_name_source, bucket_name_target=self.bucket_name_target)
class CopyToLocalAction(AbstractAction):
""" Filesystem action: copies a single file from remote SOURCE to local TARGET """
def __init__(self, uri_source, uri_target, bucket_name_source=None, bucket_name_target=None):
super(CopyToLocalAction, self).__init__('copyToLocal')
self.uri_source = uri_source
self.bucket_name_source = bucket_name_source
self.uri_target = uri_target
self.bucket_name_target = bucket_name_target
def run(self, execution_cluster):
filesystem = execution_cluster.filesystem
filesystem.copyToLocal(uri_source=self.uri_source, uri_target=self.uri_target,
bucket_name_source=self.bucket_name_source, bucket_name_target=self.bucket_name_target)
class CopyFromLocalAction(AbstractAction):
""" Filesystem action: copies a single file from local SOURCE to remote TARGET """
def __init__(self, uri_source, uri_target, bucket_name_source=None, bucket_name_target=None):
super(CopyFromLocalAction, self).__init__('copyFromLocal')
self.uri_source = uri_source
self.bucket_name_source = bucket_name_source
self.uri_target = uri_target
self.bucket_name_target = bucket_name_target
def run(self, execution_cluster):
filesystem = execution_cluster.filesystem
filesystem.copyFromLocal(uri_source=self.uri_source, uri_target=self.uri_target,
bucket_name_source=self.bucket_name_source, bucket_name_target=self.bucket_name_target)
| {
"repo_name": "mushkevych/synergy_flow",
"path": "flow/core/filesystem_actions.py",
"copies": "1",
"size": "4448",
"license": "bsd-3-clause",
"hash": -5862026090865802000,
"line_mean": 43.9292929293,
"line_max": 120,
"alpha_frac": 0.6785071942,
"autogenerated": false,
"ratio": 3.827882960413081,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5006390154613081,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from flow.core.abstract_cluster import AbstractCluster
from flow.core.execution_context import ContextDriven, get_action_logger, valid_context
class AbstractAction(ContextDriven):
""" abstraction for action
API sequence:
1. *set_context*
2. *do*
3. *cleanup* """
def __init__(self, action_name, log_tag=None, skip=False, **kwargs):
super(AbstractAction, self).__init__()
self.action_name = action_name
self.log_tag = log_tag
self.skip = skip
self.step_name = None
self.kwargs = {} if not kwargs else kwargs
def __del__(self):
self.cleanup()
if self.logger:
self.logger.info('Action {0} finished.'.format(self.action_name))
def set_context(self, context, step_name=None, **kwargs):
assert step_name is not None, 'step name must be passed to the action.set_context'
self.step_name = step_name
super(AbstractAction, self).set_context(context, **kwargs)
def get_logger(self):
fqlt = self.action_name if self.log_tag is None else '{0}.{1}'.format(self.action_name, self.log_tag)
return get_action_logger(self.flow_name, self.step_name, fqlt, self.settings)
def run(self, execution_cluster):
""" abstract method implementing Action functionality
:param execution_cluster: of type `AbstractCluster`
"""
raise NotImplementedError('method *run* must be implemented by {0}'.format(self.__class__.__name__))
@valid_context
def do(self, execution_cluster):
""" this method calls *run* in a skip-aware manner
:return None or raise and exception
"""
assert isinstance(execution_cluster, AbstractCluster)
if self.skip:
return
self.run(execution_cluster)
def cleanup(self):
pass
| {
"repo_name": "mushkevych/synergy_flow",
"path": "flow/core/abstract_action.py",
"copies": "1",
"size": "1897",
"license": "bsd-3-clause",
"hash": -4694522191198998000,
"line_mean": 35.4807692308,
"line_max": 109,
"alpha_frac": 0.6299420137,
"autogenerated": false,
"ratio": 3.911340206185567,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001004726448978964,
"num_lines": 52
} |
__author__ = 'Bohdan Mushkevych'
from flow.core.execution_context import ExecutionContext, get_cluster_logger
class ClusterError(Exception):
pass
class AbstractCluster(object):
""" abstraction for action execution environment
API sequence is to launch the cluster, perform one or more steps/commands and terminate """
def __init__(self, name, context, **kwargs):
assert isinstance(context, ExecutionContext)
self.name = name
self.context = context
self.logger = get_cluster_logger(context.flow_name, self.name, context.settings)
self.kwargs = {} if not kwargs else kwargs
@property
def filesystem(self):
raise NotImplementedError('property *filesystem* must be implemented by the {0}'
.format(self.__class__.__name__))
def run_pig_step(self, uri_script, **kwargs):
pass
def run_spark_step(self, uri_script, language, **kwargs):
pass
def run_hadoop_step(self, uri_script, **kwargs):
pass
def run_shell_command(self, uri_script, **kwargs):
pass
def launch(self):
pass
def terminate(self):
pass
| {
"repo_name": "mushkevych/synergy_flow",
"path": "flow/core/abstract_cluster.py",
"copies": "1",
"size": "1187",
"license": "bsd-3-clause",
"hash": 5495472388559978000,
"line_mean": 27.2619047619,
"line_max": 99,
"alpha_frac": 0.6368997473,
"autogenerated": false,
"ratio": 4.254480286738351,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007731407169609417,
"num_lines": 42
} |
__author__ = 'Bohdan Mushkevych'
from http import HTTPStatus
import json
from werkzeug.wrappers import Response
from werkzeug.utils import redirect
from synergy.mx.gc_action_handler import GcActionHandler
from synergy.mx.freerun_action_handler import FreerunActionHandler
from synergy.mx.managed_action_handler import ManagedActionHandler
from synergy.mx.scheduler_entries import SchedulerEntries
from synergy.mx.dashboard_handler import DashboardHandler
from synergy.mx.utils import render_template, expose
from synergy.mx.tree_node_details import TreeNodeDetails
from synergy.mx.tree_details import TreeDetails
from synergy.mx.supervisor_action_handler import SupervisorActionHandler
@expose('/scheduler/managed/entries/')
def scheduler_managed_entries(request, **values):
details = SchedulerEntries(request, **values)
return render_template('scheduler_managed_entries.html', details=details)
@expose('/scheduler/freerun/entries/')
def scheduler_freerun_entries(request, **values):
details = SchedulerEntries(request, **values)
return render_template('scheduler_freerun_entries.html', details=details)
@expose('/')
def landing_page(request, **values):
return redirect('/scheduler/dashboard/overview/')
@expose('/scheduler/dashboard/overview/')
def dashboard_overview(request, **values):
details = DashboardHandler(request, **values)
return render_template('dashboard_overview.html', details=details)
@expose('/scheduler/dashboard/managed/')
def dashboard_managed(request, **values):
details = DashboardHandler(request, **values)
return render_template('dashboard_managed.html', details=details)
@expose('/scheduler/dashboard/freerun/')
def dashboard_freeruns(request, **values):
details = DashboardHandler(request, **values)
return render_template('dashboard_freeruns.html', details=details)
@expose('/scheduler/jobs/')
def jobs(request, **values):
details = DashboardHandler(request, **values)
return Response(response=json.dumps(details.jobs), mimetype='application/json')
@expose('/scheduler/trees/')
def details_trees(request, **values):
details = TreeDetails(request, **values)
return Response(response=json.dumps(details.trees), mimetype='application/json')
@expose('/scheduler/tree/')
def details_tree(request, **values):
details = TreeDetails(request, **values)
return Response(response=json.dumps(details.tree_details), mimetype='application/json')
@expose('/scheduler/tree/nodes/')
def details_tree_nodes(request, **values):
details = TreeNodeDetails(request, **values)
return Response(response=json.dumps(details.details), mimetype='application/json')
@expose('/scheduler/tree/node/reprocess/')
def reprocess_job(request, **values):
handler = ManagedActionHandler(request, **values)
handler.reprocess_tree_node()
return Response(status=HTTPStatus.NO_CONTENT)
@expose('/scheduler/tree/node/skip/')
def skip_job(request, **values):
handler = ManagedActionHandler(request, **values)
handler.skip_tree_node()
return Response(status=HTTPStatus.NO_CONTENT)
@expose('/scheduler/freerun/entry/', methods=['HEAD', 'DELETE', 'PUT', 'POST'])
def freerun_entry_action(request, **values):
handler = FreerunActionHandler(request, **values)
if 'cancel_button' in handler.request_arguments or request.method == 'HEAD':
pass
elif 'insert_button' in handler.request_arguments or request.method == 'PUT':
handler.create_entry()
elif 'delete_button' in handler.request_arguments or request.method == 'DELETE':
handler.delete_entry()
elif 'update_button' in handler.request_arguments or request.method == 'POST':
handler.update_entry()
else:
handler.logger.error(f'MX Error: unsupported method for by /freerun/entry/: {request.method}')
return Response(status=HTTPStatus.NO_CONTENT)
@expose('/scheduler/managed/uow/')
def managed_uow(request, **values):
handler = ManagedActionHandler(request, **values)
return Response(response=json.dumps(handler.get_uow()), mimetype='application/json')
@expose('/scheduler/managed/uow/log/')
def managed_uow_log(request, **values):
handler = ManagedActionHandler(request, **values)
return Response(response=json.dumps(handler.get_uow_log()), mimetype='application/json')
@expose('/scheduler/freerun/uow/')
def freerun_uow(request, **values):
handler = FreerunActionHandler(request, **values)
return Response(response=json.dumps(handler.get_uow()), mimetype='application/json')
@expose('/scheduler/freerun/uow/cancel/')
def freerun_cancel_uow(request, **values):
handler = FreerunActionHandler(request, **values)
handler.cancel_uow()
return Response(status=HTTPStatus.NO_CONTENT)
@expose('/scheduler/freerun/uow/log/')
def freerun_uow_log(request, **values):
handler = FreerunActionHandler(request, **values)
return Response(response=json.dumps(handler.get_uow_log()), mimetype='application/json')
@expose('/scheduler/managed/timeline/')
def managed_event_log(request, **values):
handler = ManagedActionHandler(request, **values)
return Response(response=json.dumps(handler.get_event_log()), mimetype='application/json')
@expose('/scheduler/freerun/timeline/')
def freerun_event_log(request, **values):
handler = FreerunActionHandler(request, **values)
return Response(response=json.dumps(handler.get_event_log()), mimetype='application/json')
@expose('/scheduler/managed/entry/interval/')
def managed_change_interval(request, **values):
handler = ManagedActionHandler(request, **values)
handler.change_interval()
return Response(status=HTTPStatus.NO_CONTENT)
@expose('/scheduler/freerun/entry/interval/')
def freerun_change_interval(request, **values):
handler = FreerunActionHandler(request, **values)
handler.change_interval()
return Response(status=HTTPStatus.NO_CONTENT)
@expose('/scheduler/managed/entry/trigger/')
def managed_trigger_now(request, **values):
handler = ManagedActionHandler(request, **values)
handler.trigger_now()
return Response(status=HTTPStatus.NO_CONTENT)
@expose('/scheduler/freerun/entry/trigger/')
def freerun_trigger_now(request, **values):
handler = FreerunActionHandler(request, **values)
handler.trigger_now()
return Response(status=HTTPStatus.NO_CONTENT)
@expose('/scheduler/managed/entry/deactivate/')
def managed_deactivate_trigger(request, **values):
handler = ManagedActionHandler(request, **values)
handler.deactivate_trigger()
return Response(status=HTTPStatus.NO_CONTENT)
@expose('/scheduler/freerun/entry/deactivate/')
def freerun_deactivate_trigger(request, **values):
handler = FreerunActionHandler(request, **values)
handler.deactivate_trigger()
return Response(status=HTTPStatus.NO_CONTENT)
@expose('/scheduler/managed/entry/activate/')
def managed_activate_trigger(request, **values):
handler = ManagedActionHandler(request, **values)
handler.activate_trigger()
return Response(status=HTTPStatus.NO_CONTENT)
@expose('/scheduler/freerun/entry/activate/')
def freerun_activate_trigger(request, **values):
handler = FreerunActionHandler(request, **values)
handler.activate_trigger()
return Response(status=HTTPStatus.NO_CONTENT)
@expose('/scheduler/gc/flush/all/')
def gc_flush_all(request, **values):
handler = GcActionHandler(request, **values)
handler.flush_all()
return Response(status=HTTPStatus.NO_CONTENT)
@expose('/scheduler/gc/flush/one/')
def gc_flush_one(request, **values):
handler = GcActionHandler(request, **values)
handler.flush_one()
return Response(status=HTTPStatus.NO_CONTENT)
@expose('/scheduler/gc/refresh/')
def gc_refresh(request, **values):
handler = GcActionHandler(request, **values)
handler.refresh()
return Response(status=HTTPStatus.NO_CONTENT)
@expose('/scheduler/gc/log/')
def gc_log(request, **values):
handler = GcActionHandler(request, **values)
return Response(response=json.dumps(handler.tail_gc_log()), mimetype='application/json')
@expose('/scheduler/mx/log/')
def mx_log(request, **values):
handler = SchedulerEntries(request, **values)
return Response(response=json.dumps(handler.tail_mx_log()), mimetype='application/json')
@expose('/scheduler/scheduler/log/')
def scheduler_log(request, **values):
handler = SchedulerEntries(request, **values)
return Response(response=json.dumps(handler.tail_scheduler_log()), mimetype='application/json')
@expose('/supervisor/entries/')
def supervisor_entries(request, **values):
handler = SupervisorActionHandler(request, **values)
return render_template('supervisor_entries.html', details=handler.entries)
@expose('/supervisor/entry/start/')
def supervisor_start_process(request, **values):
handler = SupervisorActionHandler(request, **values)
handler.mark_for_start()
return Response(status=HTTPStatus.NO_CONTENT)
@expose('/supervisor/entry/stop/')
def supervisor_stop_process(request, **values):
handler = SupervisorActionHandler(request, **values)
handler.mark_for_stop()
return Response(status=HTTPStatus.NO_CONTENT)
@expose('/scheduler/viewer/object/')
def object_viewer(request, **values):
return render_template('object_viewer.html')
@expose('/scheduler/viewer/schedulable/')
def schedulable_viewer(request, **values):
if 'is_new_entry' in request.args and request.args['is_new_entry'] in ('True', 'true', '1'):
handler = None
else:
handler = FreerunActionHandler(request, **values)
return render_template('schedulable_form.html', handler=handler)
@expose('/scheduler/mx_page_tiles/')
def mx_page_tiles(request, **values):
details = TreeDetails(request, **values)
return render_template('mx_page_tiles.html', details=details)
# referenced from mx.synergy_mx.py module
def not_found(request, **values):
return render_template('not_found.html')
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/mx/views.py",
"copies": "1",
"size": "9947",
"license": "bsd-3-clause",
"hash": -705303144000122600,
"line_mean": 33.4186851211,
"line_max": 102,
"alpha_frac": 0.7381119936,
"autogenerated": false,
"ratio": 3.652956298200514,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48910682918005144,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from logging import ERROR, INFO
from synergy.db.model import job
from synergy.db.manager import ds_manager
from synergy.conf import context
from synergy.system.decorator import with_reconnect
from synergy.system import time_helper
from synergy.scheduler.scheduler_constants import STATE_MACHINE_RECOMPUTING
from synergy.scheduler.abstract_state_machine import AbstractStateMachine
class StateMachineRecomputing(AbstractStateMachine):
""" State Machine of 6 states is expected to spawn multiple UOWs per job/timeperiod.
Job timeperiods boundaries are dynamic - i.e. underlying data volume grows with time """
def __init__(self, logger, timetable):
super(StateMachineRecomputing, self).__init__(logger, timetable, name=STATE_MACHINE_RECOMPUTING)
self.ds = ds_manager.ds_factory(self.logger)
@property
def run_on_active_timeperiod(self):
""" :return: True, as this StateMachine allows multiple runs for some given timeperiod """
return True
def notify(self, uow):
tree = self.timetable.get_tree(uow.process_name)
node = tree.get_node(uow.process_name, uow.timeperiod)
job_record = node.job_record
if not job_record.is_final_run:
self.logger.info('Suppressing state change for Job {0}@{1}, since it is not in STATE_FINAL_RUN'
.format(uow.process_name, uow.timeperiod))
return
self._process_state_final_run(job_record)
self.mq_transmitter.publish_job_status(job_record)
@with_reconnect
def update_scope_of_processing(self, process_name, uow, start_timeperiod, end_timeperiod):
"""method reads collection and refine slice upper bound for processing"""
source_collection_name = uow.source
last_object_id = self.ds.lowest_primary_key(source_collection_name, start_timeperiod, end_timeperiod)
uow.end_id = str(last_object_id)
self.uow_dao.update(uow)
msg = 'Updated processing range for {0}@{1} for collection {2}: [{3} : {4}]' \
.format(process_name, start_timeperiod, source_collection_name, uow.start_id, uow.end_id)
self._log_message(INFO, process_name, start_timeperiod, msg)
def _compute_and_transfer_to_progress(self, process_name, start_timeperiod, end_timeperiod, job_record):
""" method computes new unit_of_work for job record in STATE_IN_PROGRESS
it also contains _fuzzy_ logic regard the DuplicateKeyError:
- we try to compute new scope of processing
- in case we face DuplicateKeyError, we try to recover from it by reading existing unit_of_work from DB:
-- in case unit_of_work can be located - we update job record and proceed normally
-- in case unit_of_work can not be located (what is equal to fatal data corruption) - we log exception and
ask/expect manual intervention to resolve the corruption"""
source_collection_name = context.process_context[process_name].source
start_id = self.ds.highest_primary_key(source_collection_name, start_timeperiod, end_timeperiod)
end_id = self.ds.lowest_primary_key(source_collection_name, start_timeperiod, end_timeperiod)
uow, is_duplicate = self.insert_and_publish_uow(job_record, start_id, end_id)
self.update_job(job_record, uow, job.STATE_IN_PROGRESS)
def _compute_and_transfer_to_final_run(self, process_name, start_timeperiod, end_timeperiod, job_record):
""" method computes new unit_of_work and transfers the job to STATE_FINAL_RUN
it also shares _fuzzy_ DuplicateKeyError logic from _compute_and_transfer_to_progress method"""
source_collection_name = context.process_context[process_name].source
start_id = self.ds.highest_primary_key(source_collection_name, start_timeperiod, end_timeperiod)
end_id = self.ds.lowest_primary_key(source_collection_name, start_timeperiod, end_timeperiod)
uow, transfer_to_final = self.insert_and_publish_uow(job_record, start_id, end_id)
self.update_job(job_record, uow, job.STATE_FINAL_RUN)
if transfer_to_final:
self._process_state_final_run(job_record)
def _process_state_embryo(self, job_record):
""" method that takes care of processing job records in STATE_EMBRYO state"""
start_timeperiod = self.compute_start_timeperiod(job_record.process_name, job_record.timeperiod)
end_timeperiod = self.compute_end_timeperiod(job_record.process_name, job_record.timeperiod)
self._compute_and_transfer_to_progress(job_record.process_name, start_timeperiod,
end_timeperiod, job_record)
def _process_state_in_progress(self, job_record):
""" method that takes care of processing job records in STATE_IN_PROGRESS state"""
start_timeperiod = self.compute_start_timeperiod(job_record.process_name, job_record.timeperiod)
end_timeperiod = self.compute_end_timeperiod(job_record.process_name, job_record.timeperiod)
time_qualifier = context.process_context[job_record.process_name].time_qualifier
actual_timeperiod = time_helper.actual_timeperiod(time_qualifier)
is_job_finalizable = self.timetable.is_job_record_finalizable(job_record)
uow = self.uow_dao.get_one(job_record.related_unit_of_work)
if job_record.timeperiod == actual_timeperiod or is_job_finalizable is False:
if uow.is_invalid or uow.is_requested:
# current uow has not been processed yet. update it
self.update_scope_of_processing(job_record.process_name, uow, start_timeperiod, end_timeperiod)
else:
# STATE_IN_PROGRESS, STATE_PROCESSED, STATE_CANCELED, STATE_NOOP
# create new uow to cover new inserts
self._compute_and_transfer_to_progress(job_record.process_name, start_timeperiod,
end_timeperiod, job_record)
elif job_record.timeperiod < actual_timeperiod and is_job_finalizable is True:
# create new uow for FINAL RUN
self._compute_and_transfer_to_final_run(job_record.process_name, start_timeperiod,
end_timeperiod, job_record)
else:
msg = 'Job {0} has timeperiod {1} from the future vs current timeperiod {2}' \
.format(job_record.db_id, job_record.timeperiod, actual_timeperiod)
self._log_message(ERROR, job_record.process_name, job_record.timeperiod, msg)
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/scheduler/state_machine_recomputing.py",
"copies": "1",
"size": "6626",
"license": "bsd-3-clause",
"hash": -6055053112663943000,
"line_mean": 57.6371681416,
"line_max": 114,
"alpha_frac": 0.6817084214,
"autogenerated": false,
"ratio": 3.76049943246311,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49422078538631103,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from logging import ERROR, INFO
from synergy.db.model import job
from synergy.scheduler.scheduler_constants import STATE_MACHINE_DISCRETE
from synergy.scheduler.abstract_state_machine import AbstractStateMachine
from synergy.system import time_helper
from synergy.conf import context
class StateMachineDiscrete(AbstractStateMachine):
""" State Machine of 5 states is expected to spawn one UOW per job/timeperiod
Job timeperiods boundaries are meant to be discrete/fixed
in comparison to StateMachineContinuous this one does not transfer to STATE_FINAL_RUN"""
def __init__(self, logger, timetable):
super(StateMachineDiscrete, self).__init__(logger, timetable, name=STATE_MACHINE_DISCRETE)
@property
def run_on_active_timeperiod(self):
""" :return: False, since there should be only 1 run for given timeperiod """
return False
def notify(self, uow):
tree = self.timetable.get_tree(uow.process_name)
node = tree.get_node(uow.process_name, uow.timeperiod)
job_record = node.job_record
if not job_record.is_in_progress:
self.logger.info('Suppressing state change for Job {0}@{1}, since it is not in STATE_IN_PROGRESS'
.format(uow.process_name, uow.timeperiod))
return
time_qualifier = context.process_context[uow.process_name].time_qualifier
actual_timeperiod = time_helper.actual_timeperiod(time_qualifier)
is_job_finalizable = self.timetable.is_job_record_finalizable(job_record)
if uow.timeperiod < actual_timeperiod and is_job_finalizable is True:
self.__process_finalizable_job(job_record, uow)
self.mq_transmitter.publish_job_status(job_record)
elif uow.timeperiod >= actual_timeperiod:
self.logger.info('Suppressing state change for Job {0}@{1}, '
'since the working timeperiod has not finished yet'
.format(uow.process_name, uow.timeperiod))
elif not is_job_finalizable:
self.logger.info('Suppressing state change for Job {0}@{1}, '
'since the job is not finalizable'.format(uow.process_name, uow.timeperiod))
def __process_non_finalizable_job(self, job_record, uow):
""" method handles given job_record based on the unit_of_work status
Assumption: job_record is in STATE_IN_PROGRESS and is not yet finalizable """
if uow.is_active:
# Large Job processing takes more than 1 tick of Scheduler
# Let the Large Job processing complete - do no updates to Scheduler records
pass
elif uow.is_finished:
# create new uow to cover new inserts
uow, is_duplicate = self.insert_and_publish_uow(job_record, 0, int(uow.end_id) + 1)
self.update_job(job_record, uow, job.STATE_IN_PROGRESS)
def __process_finalizable_job(self, job_record, uow):
""" method handles given job_record based on the unit_of_work status
Assumption: job_record is in STATE_IN_PROGRESS and is finalizable """
if uow.is_active:
# Job processing has not started yet
# Let the processing complete - do no updates to Scheduler records
msg = 'Suppressed new UOW creation for Job {0}@{1}; Job is in {2}; UOW is in {3}' \
.format(job_record.process_name, job_record.timeperiod, job_record.state, uow.state)
self._log_message(INFO, job_record.process_name, job_record.timeperiod, msg)
elif uow.is_processed:
self.update_job(job_record, uow, job.STATE_PROCESSED)
elif uow.is_noop:
self.update_job(job_record, uow, job.STATE_NOOP)
elif uow.is_canceled:
self.update_job(job_record, uow, job.STATE_SKIPPED)
elif uow.is_invalid:
msg = 'Job {0}: UOW for {1}@{2} is in {3}; ' \
'relying on the Garbage Collector to transfer UOW into the STATE_CANCELED' \
.format(job_record.db_id, job_record.process_name, job_record.timeperiod, uow.state)
self._log_message(INFO, job_record.process_name, job_record.timeperiod, msg)
else:
msg = 'Unknown state {0} of Job {1} for {2}@{3}' \
.format(uow.state, job_record.db_id, job_record.process_name, job_record.timeperiod)
self._log_message(INFO, job_record.process_name, job_record.timeperiod, msg)
timetable_tree = self.timetable.get_tree(job_record.process_name)
timetable_tree.build_tree()
def _process_state_embryo(self, job_record):
""" method that takes care of processing job records in STATE_EMBRYO state"""
uow, is_duplicate = self.insert_and_publish_uow(job_record, 0, 0)
self.update_job(job_record, uow, job.STATE_IN_PROGRESS)
def _process_state_in_progress(self, job_record):
""" method that takes care of processing job records in STATE_IN_PROGRESS state """
time_qualifier = context.process_context[job_record.process_name].time_qualifier
actual_timeperiod = time_helper.actual_timeperiod(time_qualifier)
is_job_finalizable = self.timetable.is_job_record_finalizable(job_record)
uow = self.uow_dao.get_one(job_record.related_unit_of_work)
if job_record.timeperiod == actual_timeperiod or is_job_finalizable is False:
self.__process_non_finalizable_job(job_record, uow)
elif job_record.timeperiod < actual_timeperiod and is_job_finalizable is True:
self.__process_finalizable_job(job_record, uow)
else:
msg = 'Job {0} has timeperiod {1} from the future vs current timeperiod {2}' \
.format(job_record.db_id, job_record.timeperiod, actual_timeperiod)
self._log_message(ERROR, job_record.process_name, job_record.timeperiod, msg)
def _process_state_final_run(self, job_record):
"""method takes care of processing job records in STATE_FINAL_RUN state"""
raise NotImplementedError(f'Method _process_state_final_run is not supported by {self.__class__.__name__}')
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/scheduler/state_machine_discrete.py",
"copies": "1",
"size": "6216",
"license": "bsd-3-clause",
"hash": 7253418096692521000,
"line_mean": 52.1282051282,
"line_max": 115,
"alpha_frac": 0.6549227799,
"autogenerated": false,
"ratio": 3.73109243697479,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.488601521687479,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from logging import ERROR, INFO
from synergy.db.model import job
from synergy.scheduler.scheduler_constants import STATE_MACHINE_SIMPLE_DISCRETE
from synergy.scheduler.state_machine_dicrete import StateMachineDiscrete
from synergy.system import time_helper
from synergy.conf import context
class StateMachineSimpleDiscrete(StateMachineDiscrete):
""" State Machine to handle discrete timeperiod boundaries for jobs
in comparison to StateMachineDiscrete this one does not transfer to STATE_FINAL_RUN"""
def __init__(self, logger, timetable):
super(StateMachineSimpleDiscrete, self).__init__(logger, timetable, name=STATE_MACHINE_SIMPLE_DISCRETE)
def __del__(self):
super(StateMachineSimpleDiscrete, self).__del__()
def shallow_state_update(self, uow):
tree = self.timetable.get_tree(uow.process_name)
node = tree.get_node(uow.process_name, uow.timeperiod)
job_record = node.job_record
if not job_record.is_in_progress:
self.logger.info('Can not perform shallow status update for %s in timeperiod %s '
'since the job state is not STATE_IN_PROGRESS' % (uow.process_name, uow.timeperiod))
return
time_qualifier = context.process_context[uow.process_name].time_qualifier
actual_timeperiod = time_helper.actual_timeperiod(time_qualifier)
is_job_finalizable = self.timetable.is_job_record_finalizable(job_record)
if uow.timeperiod < actual_timeperiod and is_job_finalizable is True:
self.__process_finalizable_job(job_record, uow)
elif uow.timeperiod >= actual_timeperiod:
self.logger.info('Can not complete shallow status update for %s in timeperiod %s '
'since the working timeperiod has not finished yet' % (uow.process_name, uow.timeperiod))
elif not is_job_finalizable:
self.logger.info('Can not complete shallow status update for %s in timeperiod %s '
'since the job could not be finalized' % (uow.process_name, uow.timeperiod))
def __process_non_finalizable_job(self, job_record, uow, start_timeperiod, end_timeperiod):
""" method handles given job_record based on the unit_of_work status
Assumption: job_record is in STATE_IN_PROGRESS and is not yet finalizable """
if uow.is_active:
# Large Job processing takes more than 1 tick of Scheduler
# Let the Large Job processing complete - do no updates to Scheduler records
pass
elif uow.is_finished:
# create new uow to cover new inserts
uow, is_duplicate = self.insert_and_publish_uow(job_record.process_name,
start_timeperiod,
end_timeperiod,
0,
int(uow.end_id) + 1)
self.timetable.update_job_record(job_record, uow, job.STATE_IN_PROGRESS)
def __process_finalizable_job(self, job_record, uow):
""" method handles given job_record based on the unit_of_work status
Assumption: job_record is in STATE_IN_PROGRESS and is finalizable """
if uow.is_active:
# Job processing has not started yet
# Let the processing complete - do no updates to Scheduler records
msg = 'Suppressed creating uow for %s in timeperiod %s; job record is in %s; uow is in %s' \
% (job_record.process_name, job_record.timeperiod, job_record.state, uow.state)
self._log_message(INFO, job_record.process_name, job_record.timeperiod, msg)
elif uow.is_processed:
self.timetable.update_job_record(job_record, uow, job.STATE_PROCESSED)
elif uow.is_canceled:
self.timetable.update_job_record(job_record, uow, job.STATE_SKIPPED)
else:
msg = 'Unknown state %s for job record %s in timeperiod %s for %s' \
% (uow.state, job_record.db_id, job_record.timeperiod, job_record.process_name)
self._log_message(INFO, job_record.process_name, job_record.timeperiod, msg)
timetable_tree = self.timetable.get_tree(job_record.process_name)
timetable_tree.build_tree()
def _process_state_in_progress(self, job_record):
""" method that takes care of processing job records in STATE_IN_PROGRESS state """
time_qualifier = context.process_context[job_record.process_name].time_qualifier
end_timeperiod = time_helper.increment_timeperiod(time_qualifier, job_record.timeperiod)
actual_timeperiod = time_helper.actual_timeperiod(time_qualifier)
is_job_finalizable = self.timetable.is_job_record_finalizable(job_record)
uow = self.uow_dao.get_one(job_record.related_unit_of_work)
if job_record.timeperiod == actual_timeperiod or is_job_finalizable is False:
self.__process_non_finalizable_job(job_record, uow, job_record.timeperiod, end_timeperiod)
elif job_record.timeperiod < actual_timeperiod and is_job_finalizable is True:
self.__process_finalizable_job(job_record, uow)
else:
msg = 'Job record %s has timeperiod from future %s vs current time %s' \
% (job_record.db_id, job_record.timeperiod, actual_timeperiod)
self._log_message(ERROR, job_record.process_name, job_record.timeperiod, msg)
def _process_state_final_run(self, job_record):
"""method takes care of processing job records in STATE_FINAL_RUN state"""
raise NotImplementedError('Method _process_state_final_run is not supported by %s' % self.__class__.__name__)
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/scheduler/state_machine_simple_dicrete.py",
"copies": "1",
"size": "5844",
"license": "bsd-3-clause",
"hash": 6921274129728505000,
"line_mean": 54.6571428571,
"line_max": 118,
"alpha_frac": 0.6461327858,
"autogenerated": false,
"ratio": 3.956668923493568,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0037532585306307054,
"num_lines": 105
} |
__author__ = 'Bohdan Mushkevych'
from logging import ERROR
from synergy.db.model import job
from synergy.scheduler.scheduler_constants import STATE_MACHINE_CONTINUOUS
from synergy.scheduler.abstract_state_machine import AbstractStateMachine
from synergy.system import time_helper
from synergy.conf import context
class StateMachineContinuous(AbstractStateMachine):
""" State Machine of 6 states is expected to spawn multiple UOWs per job/timeperiod.
Job timeperiods boundaries are meant to be discrete/fixed
in comparison to StateMachineRecomputing this one does not re-compute processing boundaries """
def __init__(self, logger, timetable, name=STATE_MACHINE_CONTINUOUS):
super(StateMachineContinuous, self).__init__(logger, timetable, name)
@property
def run_on_active_timeperiod(self):
""" :return: True, as we allow multiple runs on a given timeperiod """
return True
def notify(self, uow):
tree = self.timetable.get_tree(uow.process_name)
node = tree.get_node(uow.process_name, uow.timeperiod)
job_record = node.job_record
if not job_record.is_final_run:
self.logger.info('Suppressing state change for Job {0}@{1}, since it is not in STATE_FINAL_RUN'
.format(uow.process_name, uow.timeperiod))
return
self._process_state_final_run(job_record)
self.mq_transmitter.publish_job_status(job_record)
def _compute_next_job_state(self, job_record):
time_qualifier = context.process_context[job_record.process_name].time_qualifier
actual_timeperiod = time_helper.actual_timeperiod(time_qualifier)
is_job_finalizable = self.timetable.is_job_record_finalizable(job_record)
if job_record.timeperiod == actual_timeperiod or is_job_finalizable is False:
return job.STATE_IN_PROGRESS
elif job_record.timeperiod < actual_timeperiod and is_job_finalizable is True:
return job.STATE_FINAL_RUN
else:
msg = 'Job {0} has timeperiod {1} from the future vs current timeperiod {2}' \
.format(job_record.db_id, job_record.timeperiod, actual_timeperiod)
self._log_message(ERROR, job_record.process_name, job_record.timeperiod, msg)
raise ValueError(msg)
def _process_state_embryo(self, job_record):
""" method that takes care of processing job records in STATE_EMBRYO state"""
uow, is_duplicate = self.insert_and_publish_uow(job_record, 0, 0)
try:
target_state = self._compute_next_job_state(job_record)
self.update_job(job_record, uow, target_state)
except ValueError:
# do no processing for the future timeperiods
pass
def _process_state_in_progress(self, job_record):
""" method that takes care of processing job records in STATE_IN_PROGRESS state"""
def _process_state(target_state, uow):
if uow.is_active:
# Large Job processing takes more than 1 tick of the Scheduler
# Let the Job processing complete - do no updates to Scheduler records
pass
elif uow.is_finished:
# create new UOW to cover new inserts
new_uow, is_duplicate = self.insert_and_publish_uow(job_record, 0, int(uow.end_id) + 1)
self.update_job(job_record, new_uow, target_state)
uow = self.uow_dao.get_one(job_record.related_unit_of_work)
try:
target_state = self._compute_next_job_state(job_record)
_process_state(target_state, uow)
except ValueError:
# do no processing for the future timeperiods
pass
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/scheduler/state_machine_continuous.py",
"copies": "1",
"size": "3754",
"license": "bsd-3-clause",
"hash": -3075468334046331000,
"line_mean": 44.7804878049,
"line_max": 107,
"alpha_frac": 0.6574320725,
"autogenerated": false,
"ratio": 3.9063475546305932,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0018828692103428947,
"num_lines": 82
} |
__author__ = 'Bohdan Mushkevych'
from logging import INFO
from synergy.db.model import job
from synergy.system import time_helper
from synergy.system.immutable_dict import ImmutableDict
from synergy.conf import context
class DependentOnSummary(object):
""" This structure is compiled to represent a composite state of dependent_on TreeNodes """
def __init__(self, tree_node):
self.tree_node = tree_node
# contains TreeNodes whose Job are not in finished state
self.unfinished = []
# contains TreeNodes whose Job are not in [STATE_PROCESSED, STATE_NOOP]
self.unprocessed = []
# contains TreeNodes whose Job neither active nor in [STATE_PROCESSED, STATE_NOOP]
self.unhealthy = []
# contains TreeNodes whose Job are in STATE_SKIPPED
self.skipped = []
def enlist(self, tree_node):
assert isinstance(tree_node, TreeNode)
if not tree_node.job_record.is_finished:
self.unfinished.append(tree_node)
if not (tree_node.job_record.is_processed or tree_node.job_record.is_noop):
self.unprocessed.append(tree_node)
if not (tree_node.job_record.is_active or tree_node.job_record.is_processed or tree_node.job_record.is_noop):
self.unhealthy.append(tree_node)
if tree_node.job_record.is_skipped:
self.skipped.append(tree_node)
@property
def all_finished(self):
# True if all dependent_on Jobs are finished
return len(self.unfinished) == 0
@property
def all_processed(self):
# True if all dependent_on Jobs are in [STATE_PROCESSED, STATE_NOOP]
return len(self.unprocessed) == 0
@property
def all_healthy(self):
# True if all dependent_on Jobs are either active or in [STATE_PROCESSED, STATE_NOOP]
return len(self.unhealthy) == 0
@property
def skipped_present(self):
# True if among dependent_on periods are some in STATE_SKIPPED
return len(self.skipped) != 0
def _write_log(self, level:int, msg:str):
from synergy.system.system_logger import get_logger
from synergy.scheduler.scheduler_constants import PROCESS_SCHEDULER
logger = get_logger(PROCESS_SCHEDULER)
logger.log(level, msg)
self.tree_node.add_log_entry(msg)
def _build_str(self, collection:list, description:str):
blockers = ','.join([str(e) for e in collection])
return f'TreeNode {self.tree_node} {description}: {blockers}'
def log_unfinished(self, level:int):
_summary = self._build_str(self.unfinished, 'is blocked by unfinished:')
self._write_log(level, _summary)
def log_unprocessed(self, level:int):
_summary = self._build_str(self.unprocessed, 'is blocked by unprocessed:')
self._write_log(level, _summary)
def log_skipped(self, level:int):
_summary = self._build_str(self.skipped, 'has skipped among its dependent_on:')
self._write_log(level, _summary)
class AbstractTreeNode(object):
def __init__(self, tree, parent, process_name, timeperiod, job_record):
self.tree = tree
self.parent = parent
self.process_name = process_name
self.timeperiod = timeperiod
self.job_record = job_record
# fields self.time_qualifier and self.children are properly set in the child class
self.time_qualifier = None
self.children = ImmutableDict({})
def is_finalizable(self):
"""method checks whether:
- all counterpart of this node in dependent_on trees are finished
- all direct children of the node are finished
- the node itself is in active state"""
depon_summary = self.dependent_on_summary()
if not depon_summary.all_finished:
depon_summary.log_unfinished(INFO)
return False
if self.job_record is None:
self.tree.timetable.assign_job_record(self)
children_processed = all([child.job_record.is_finished for child in self.children.values()])
return children_processed and self.job_record.is_active
def validate(self):
"""method traverse tree and performs following activities:
* requests a job record in STATE_EMBRYO if no job record is currently assigned to the node
* requests nodes for reprocessing, if STATE_PROCESSED node relies on unfinalized nodes
* requests node for skipping if it is daily node and all 24 of its Hourly nodes are in STATE_SKIPPED state"""
# step 1: request Job record if current one is not set
if self.job_record is None:
self.tree.timetable.assign_job_record(self)
# step 2: define if current node has a younger sibling
next_timeperiod = time_helper.increment_timeperiod(self.time_qualifier, self.timeperiod)
has_younger_sibling = next_timeperiod in self.parent.children
# step 3: define if all children are done and if perhaps they all are in STATE_SKIPPED
all_children_skipped = True
all_children_finished = True
for timeperiod, child in self.children.items():
child.validate()
if child.job_record.is_active:
all_children_finished = False
if not child.job_record.is_skipped:
all_children_skipped = False
# step 4: request this node's reprocessing if it is enroute to STATE_PROCESSED
# while some of its children are still performing processing
if all_children_finished is False and self.job_record.is_finished:
self.tree.timetable.reprocess_tree_node(self)
# step 5: verify if this node should be transferred to STATE_SKIPPED
# algorithm is following:
# point a: node must have children
# point b: existence of a younger sibling means that the tree contains another node of the same level
# thus - should the tree.build_timeperiod be not None - the children level of this node is fully constructed
# point c: if all children of this node are in STATE_SKIPPED then we will set this node state to STATE_SKIPPED
if len(self.children) != 0 \
and all_children_skipped \
and self.tree.build_timeperiod is not None \
and has_younger_sibling is True \
and not self.job_record.is_skipped:
self.tree.timetable.skip_tree_node(self)
def add_log_entry(self, entry):
""" :db.model.job record holds event log, that can be accessed by MX
this method adds a record and removes oldest one if necessary """
event_log = self.job_record.event_log
if len(event_log) > job.EVENT_LOG_MAX_SIZE:
del event_log[-1]
event_log.insert(0, entry)
def find_counterpart_in(self, tree_b):
""" Finds a TreeNode counterpart for this node in tree_b
:param tree_b: target tree that hosts counterpart to this node
:return: TreeNode from tree_b that has the same timeperiod as self.timeperiod,
or None if no counterpart ware found
"""
tree_b_hierarchy_entry = tree_b.process_hierarchy.get_by_qualifier(self.time_qualifier)
if not tree_b_hierarchy_entry:
# special case when tree with more levels depends on the tree with smaller amount of levels
# for example ThreeLevel Financial tree depends on TwoLevel Google Channel
# in this case - we just verify time-periods that matches in both trees;
# for levels that have no match, we assume that dependency does not exists
# for example Financial Monthly has no counterpart in Google Daily Report -
# so we assume that its not blocked
node_b = None
else:
node_b = tree_b.get_node(tree_b_hierarchy_entry.process_entry.process_name, self.timeperiod)
return node_b
def dependent_on_summary(self):
""" method iterates over all nodes that provide dependency to the current node,
and compile composite state of them all
:return instance of <tree_node.DependencySummary>
"""
dependency_summary = DependentOnSummary(self)
for dependent_on in self.tree.dependent_on:
node_b = self.find_counterpart_in(dependent_on)
if node_b is None:
# special case when counterpart tree has no process with corresponding time_qualifier
# for example Financial Monthly has no counterpart in Third-party Daily Report -
# so we assume that it's not blocked
continue
dependency_summary.enlist(node_b)
return dependency_summary
class TreeNode(AbstractTreeNode):
def __init__(self, tree, parent, process_name, timeperiod, job_record):
super(TreeNode, self).__init__(tree, parent, process_name, timeperiod, job_record)
self.time_qualifier = context.process_context[process_name].time_qualifier
child_hierarchy_entry = tree.process_hierarchy.get_child_by_qualifier(self.time_qualifier)
if child_hierarchy_entry:
children = dict()
else:
# this is the bottom process of the process hierarchy with no children
children = ImmutableDict({})
self.children = children
def __str__(self) -> str:
state = self.job_record.state if self.job_record else 'unknown'
return f'{self.tree.tree_name}.{self.process_name}@{self.timeperiod}({state})'
class RootNode(AbstractTreeNode):
def __init__(self, tree):
super(RootNode, self).__init__(tree, None, None, None, None)
self.time_qualifier = None
self.children = dict()
def __str__(self) -> str:
return f'{self.tree.tree_name}.root'
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/scheduler/tree_node.py",
"copies": "1",
"size": "9847",
"license": "bsd-3-clause",
"hash": 5602275472174949000,
"line_mean": 41.4439655172,
"line_max": 118,
"alpha_frac": 0.6525845435,
"autogenerated": false,
"ratio": 4.065648224607762,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002252975077852574,
"num_lines": 232
} |
__author__ = 'Bohdan Mushkevych'
from odm.document import BaseDocument
from odm.fields import StringField, DictField, ListField
from synergy.scheduler.scheduler_constants import EXCHANGE_UTILS
class DaemonProcessEntry(BaseDocument):
""" Non-persistent model. This class presents Process Context Entry record """
process_name = StringField()
classname = StringField()
token = StringField()
mq_queue = StringField()
mq_exchange = StringField()
mq_routing_key = StringField()
arguments = DictField()
present_on_boxes = ListField(null=True) # list of boxes where this process is monitored by the Supervisor
pid_filename = StringField()
log_filename = StringField()
@classmethod
def key_fields(cls):
return cls.process_name.name
def daemon_context_entry(process_name,
classname,
token,
exchange=EXCHANGE_UTILS,
present_on_boxes=None,
arguments=None,
queue=None,
routing=None,
pid_file=None,
log_file=None):
""" forms process context entry """
_ROUTING_PREFIX = 'routing_'
_QUEUE_PREFIX = 'queue_'
_SUFFIX = '_daemon'
if arguments is not None:
assert isinstance(arguments, dict)
process_entry = DaemonProcessEntry(
process_name=process_name,
classname=classname,
token=token,
mq_queue=queue if queue is not None else _QUEUE_PREFIX + token + _SUFFIX,
mq_routing_key=routing if routing is not None else _ROUTING_PREFIX + token + _SUFFIX,
mq_exchange=exchange,
present_on_boxes=present_on_boxes,
arguments=arguments if arguments is not None else dict(),
log_filename=log_file if log_file is not None else token + _SUFFIX + '.log',
pid_filename=pid_file if pid_file is not None else token + _SUFFIX + '.pid')
return process_entry
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/db/model/daemon_process_entry.py",
"copies": "1",
"size": "2032",
"license": "bsd-3-clause",
"hash": 6181570959778046000,
"line_mean": 34.649122807,
"line_max": 110,
"alpha_frac": 0.6146653543,
"autogenerated": false,
"ratio": 4.360515021459228,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5475180375759228,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from odm.document import BaseDocument
from odm.fields import StringField, IntegerField, BooleanField, DateTimeField
TIMEPERIOD = 'timeperiod'
TIMESTAMP = 'timestamp'
CREATION_TIME = 'creation_time'
SESSION_ID = 'session_id'
IP = 'ip'
OS = 'os'
BROWSER = 'browser'
USER_ID = 'user_id'
LANGUAGE = 'language'
COUNTRY = 'country'
PAGE = 'page'
USER = 'user'
DOMAIN_NAME = 'domain'
PAGE_VIEW = 'page_view'
SCREEN_X = 'screen_x'
SCREEN_Y = 'screen_y'
TOTAL_DURATION = 'total_duration'
NUMBER_OF_UNIQUE_VISITORS = 'number_of_uniques'
NUMBER_OF_VISITS = 'number_of_visits'
NUMBER_OF_ENTRIES = 'number_of_entries'
NUMBER_OF_PAGEVIEWS = 'number_of_pageviews'
# FAMILIES
FAMILY_STAT = 'stat'
FAMILY_SITES = 'site'
FAMILY_USER_PROFILE = 'user_profile'
FAMILY_SITE_PROFILE = 'site_profile'
FAMILY_BROWSING_HISTORY = 'browsing_history'
FAMILY_ENTRIES = 'entries'
FAMILY_VISITS = 'visit'
FAMILY_PAGEVIEWS = 'pageview'
FAMILY_DURATION = 'duration'
FAMILY_SESSIONS = 'session'
FAMILY_COUNTRIES = 'country'
FAMILY_OS = 'os'
FAMILY_BROWSERS = 'browser'
FAMILY_SCREEN_RESOLUTIONS = 'screen_resolution'
FAMILY_LANGUAGES = 'language'
class RawData(BaseDocument):
""" Non-persistent model. Instance of this class presents single message to the SingleSessionWorker """
domain_name = StringField(DOMAIN_NAME)
timestamp = DateTimeField(TIMESTAMP)
session_id = StringField(SESSION_ID)
ip = StringField(IP)
screen_x = IntegerField(SCREEN_X)
screen_y = IntegerField(SCREEN_Y)
os = StringField(OS)
browser = StringField(BROWSER)
language = StringField(LANGUAGE)
country = StringField(COUNTRY)
is_page_view = BooleanField(PAGE_VIEW)
@BaseDocument.key.getter
def key(self):
return self.domain_name, self.timestamp, self.session_id
@key.setter
def key(self, value):
self.domain_name = value[0]
self.timestamp = value[1]
self.session_id = value[2]
@property
def screen_res(self):
return self.screen_x, self.screen_y
@screen_res.setter
def screen_res(self, value):
self.screen_x = value[0]
self.screen_y = value[1]
| {
"repo_name": "eggsandbeer/scheduler",
"path": "db/model/raw_data.py",
"copies": "1",
"size": "2161",
"license": "bsd-3-clause",
"hash": -5514135305181858000,
"line_mean": 26.0125,
"line_max": 107,
"alpha_frac": 0.6992133272,
"autogenerated": false,
"ratio": 2.9765840220385673,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41757973492385675,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from odm.document import BaseDocument
from odm.fields import StringField, ListField
class TimetableTreeEntry(BaseDocument):
""" Non-persistent model. Class presents single process tree (an atomic entry for the Timetable) """
tree_name = StringField()
dependent_on = ListField()
enclosed_processes = ListField()
mx_name = StringField()
mx_page = StringField()
@classmethod
def key_fields(cls):
return cls.tree_name.name
def timetable_tree_entry(tree_name,
enclosed_processes,
dependent_on=None,
mx_name=None,
mx_page=None):
""" creates timetable context entry """
assert enclosed_processes is not None and not isinstance(enclosed_processes, str)
assert dependent_on is not None and not isinstance(dependent_on, str)
timetable_entry = TimetableTreeEntry(tree_name=tree_name,
enclosed_processes=enclosed_processes,
dependent_on=dependent_on,
mx_name=mx_name,
mx_page=mx_page)
return timetable_entry
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/db/model/timetable_tree_entry.py",
"copies": "1",
"size": "1261",
"license": "bsd-3-clause",
"hash": -5698079545335060000,
"line_mean": 35.0285714286,
"line_max": 104,
"alpha_frac": 0.5804916733,
"autogenerated": false,
"ratio": 4.602189781021898,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5682681454321898,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from odm.document import BaseDocument
from odm.fields import StringField, ListField
TREE_NAME = 'tree_name'
DEPENDENT_ON = 'dependent_on'
ENCLOSED_PROCESSES = 'enclosed_processes'
MX_PAGE = 'mx_page'
MX_NAME = 'mx_name'
class TimetableTreeEntry(BaseDocument):
""" Non-persistent model. Class presents single process tree (an atomic entry for the Timetable) """
tree_name = StringField(TREE_NAME)
dependent_on = ListField(DEPENDENT_ON)
enclosed_processes = ListField(ENCLOSED_PROCESSES)
mx_name = StringField(MX_NAME)
mx_page = StringField(MX_PAGE)
@BaseDocument.key.getter
def key(self):
return self.tree_name
@key.setter
def key(self, value):
self.tree_name = value
def timetable_tree_entry(tree_name,
enclosed_processes,
dependent_on=None,
mx_name=None,
mx_page=None):
""" creates timetable context entry """
assert enclosed_processes is not None and not isinstance(enclosed_processes, str)
assert dependent_on is not None and not isinstance(dependent_on, str)
timetable_entry = TimetableTreeEntry(tree_name=tree_name,
enclosed_processes=enclosed_processes,
dependent_on=dependent_on,
mx_name=mx_name,
mx_page=mx_page)
return timetable_entry
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/db/model/timetable_tree_entry.py",
"copies": "1",
"size": "1527",
"license": "bsd-3-clause",
"hash": 5522360935908390000,
"line_mean": 32.9333333333,
"line_max": 104,
"alpha_frac": 0.6018336608,
"autogenerated": false,
"ratio": 4.0611702127659575,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5163003873565958,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from odm.document import BaseDocument
from odm.fields import StringField
MQ_QUEUE = 'mq_queue'
MQ_EXCHANGE = 'mq_exchange'
MQ_ROUTING_KEY = 'mq_routing_key'
class QueueContextEntry(BaseDocument):
""" Non-persistent model. This class presents Queue Context Entry record """
mq_queue = StringField(MQ_QUEUE)
mq_exchange = StringField(MQ_EXCHANGE)
mq_routing_key = StringField(MQ_ROUTING_KEY)
@BaseDocument.key.getter
def key(self):
return self.mq_queue
@key.setter
def key(self, value):
""" :param value: name of the mq queue """
self.mq_queue = value
def queue_context_entry(exchange,
queue_name,
routing=None):
""" forms queue's context entry """
if routing is None:
routing = queue_name
queue_entry = QueueContextEntry(mq_queue=queue_name,
mq_exchange=exchange,
mq_routing_key=routing)
return queue_entry
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/db/model/queue_context_entry.py",
"copies": "1",
"size": "1049",
"license": "bsd-3-clause",
"hash": -8475873796806015000,
"line_mean": 26.6052631579,
"line_max": 80,
"alpha_frac": 0.6081982841,
"autogenerated": false,
"ratio": 3.9141791044776117,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003248862897985705,
"num_lines": 38
} |
__author__ = 'Bohdan Mushkevych'
from odm.document import BaseDocument
from odm.fields import StringField, ObjectIdField, DateTimeField
# Step record created in the DB
# Next valid states: STATE_IN_PROGRESS
STATE_EMBRYO = 'state_embryo'
# Step execution started by a worker
STATE_IN_PROGRESS = 'state_in_progress'
# Step was successfully processed by the worker
STATE_PROCESSED = 'state_processed'
# Job has been manually marked as SKIPPED via MX
# all non-completed Steps are marked as STATE_CANCELED
STATE_CANCELED = 'state_canceled'
# Step has failed with an exception during the execution
STATE_INVALID = 'state_invalid'
# Step was completed, but no data was found to process
STATE_NOOP = 'state_noop'
class Step(BaseDocument):
""" Module represents persistent Model for a single step in a flow """
db_id = ObjectIdField(name='_id', null=True)
flow_name = StringField()
step_name = StringField()
timeperiod = StringField(null=True)
state = StringField(choices=[STATE_INVALID, STATE_EMBRYO, STATE_IN_PROGRESS,
STATE_PROCESSED, STATE_CANCELED, STATE_NOOP])
created_at = DateTimeField()
started_at = DateTimeField()
finished_at = DateTimeField()
related_flow = ObjectIdField()
@classmethod
def key_fields(cls):
return cls.flow_name.name, cls.step_name.name, cls.timeperiod.name
@property
def is_failed(self):
return self.state in [STATE_INVALID, STATE_CANCELED]
@property
def is_processed(self):
return self.state == STATE_PROCESSED
@property
def is_in_progress(self):
return self.state == STATE_IN_PROGRESS
TIMEPERIOD = Step.timeperiod.name
FLOW_NAME = Step.flow_name.name
STEP_NAME = Step.step_name.name
RELATED_FLOW = Step.related_flow.name
| {
"repo_name": "mushkevych/synergy_flow",
"path": "flow/db/model/step.py",
"copies": "1",
"size": "1800",
"license": "bsd-3-clause",
"hash": -7769299203127902000,
"line_mean": 27.5714285714,
"line_max": 80,
"alpha_frac": 0.7066666667,
"autogenerated": false,
"ratio": 3.592814371257485,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4799481037957485,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from odm.document import BaseDocument
from odm.fields import StringField, ObjectIdField, DateTimeField
RUN_MODE_NOMINAL = 'run_mode_nominal'
RUN_MODE_RECOVERY = 'run_mode_recovery'
# Flow can get into STATE_INVALID if:
# a. related Job was marked for reprocessing via MX
# b. have failed with an exception at the step level
# NOTICE: FlowDriver changes STATE_INVALID -> STATE_IN_PROGRESS during re-posting
STATE_INVALID = 'state_invalid'
# given Flow was successfully executed
# This is a final state
STATE_PROCESSED = 'state_processed'
# given Flow had no steps to process
# This is a final state
STATE_NOOP = 'state_noop'
# FlowDriver triggers the flow execution.
# Next valid states: STATE_NOOP, STATE_PROCESSED, STATE_INVALID
STATE_IN_PROGRESS = 'state_in_progress'
# Flow record created in the DB
# Next valid states: STATE_IN_PROGRESS
STATE_EMBRYO = 'state_embryo'
class Flow(BaseDocument):
""" class presents status for a Flow run """
db_id = ObjectIdField(name='_id', null=True)
flow_name = StringField()
timeperiod = StringField()
start_timeperiod = StringField()
end_timeperiod = StringField()
state = StringField(choices=[STATE_EMBRYO, STATE_IN_PROGRESS, STATE_PROCESSED, STATE_NOOP, STATE_INVALID])
# run_mode override rules:
# - default value is read from ProcessEntry.arguments['run_mode']
# - if the ProcessEntry.arguments['run_mode'] is None then run_mode is assumed `run_mode_nominal`
# - Flow.run_mode, if specified, overrides ProcessEntry.arguments['run_mode']
# - UOW.arguments['run_mode'] overrides Flow.run_mode
run_mode = StringField(choices=[RUN_MODE_NOMINAL, RUN_MODE_RECOVERY])
created_at = DateTimeField()
started_at = DateTimeField()
finished_at = DateTimeField()
@classmethod
def key_fields(cls):
return cls.flow_name.name, cls.timeperiod.name
TIMEPERIOD = Flow.timeperiod.name
FLOW_NAME = Flow.flow_name.name
| {
"repo_name": "mushkevych/synergy_flow",
"path": "flow/db/model/flow.py",
"copies": "1",
"size": "1967",
"license": "bsd-3-clause",
"hash": -7517891078223204000,
"line_mean": 32.3389830508,
"line_max": 110,
"alpha_frac": 0.7269954245,
"autogenerated": false,
"ratio": 3.4691358024691357,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4696131226969135,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Bohdan Mushkevych'
from odm.document import BaseDocument
from odm.fields import StringField, ObjectIdField, IntegerField, DictField, DateTimeField
from synergy.scheduler.scheduler_constants import TYPE_FREERUN, TYPE_MANAGED
TIMEPERIOD = 'timeperiod'
START_TIMEPERIOD = 'start_timeperiod' # lower boundary (as Synergy date) of the period that needs to be processed
END_TIMEPERIOD = 'end_timeperiod' # upper boundary (as Synergy date) of the period that needs to be processed
START_OBJ_ID = 'start_obj_id' # lower boundary (as DB _id) of the period that needs to be processed
END_OBJ_ID = 'end_obj_id' # upper boundary (as DB _id) of the period that needs to be processed
STATE = 'state'
CREATED_AT = 'created_at'
STARTED_AT = 'started_at'
FINISHED_AT = 'finished_at'
NUMBER_OF_AGGREGATED_DOCUMENTS = 'number_of_aggregated_documents'
NUMBER_OF_PROCESSED_DOCUMENTS = 'number_of_processed_documents'
NUMBER_OF_RETRIES = 'number_of_retries'
PROCESS_NAME = 'process_name' # process name of the aggregator/alarm/etc that processed the range
SOURCE = 'source' # defines source of data for the computation
SINK = 'sink' # defines sink where the aggregated data will be inserted
PROCESSED_LOG = 'processed_log' # log contains list of processed files or other artifacts
FILE_NAME = 'file_name' # Name of processed file
MD5 = 'md5' # MD5 tag for the hash of the file
ARGUMENTS = 'arguments' # task-level arguments that could supplement or override process-level ones
UNIT_OF_WORK_TYPE = 'unit_of_work_type' # whether the unit_of_work is TYPE_MANAGED or TYPE_FREERUN
# UOW was successfully processed by the worker
STATE_PROCESSED = 'state_processed'
# UOW was received by the worker and it started the processing
STATE_IN_PROGRESS = 'state_in_progress'
# UOW was instantiated and send to the worker
STATE_REQUESTED = 'state_requested'
# Job has been manually marked as SKIPPED via MX
# and so the associated UOW got cancelled
# or the life-support threshold has been crossed for failing UOW
STATE_CANCELED = 'state_canceled'
# UOW can get into STATE_INVALID if:
# a. related Job was marked for reprocessing via MX
# b. have failed with an exception at the worker level
# NOTICE: GarbageCollector changes STATE_INVALID -> STATE_REQUESTED during re-posting
STATE_INVALID = 'state_invalid'
# UOW was received by a worker,
# but no data was found to process
STATE_NOOP = 'state_noop'
class UnitOfWork(BaseDocument):
""" Module represents persistent Model for atomic unit of work performed by the system.
UnitOfWork Instances are stored in the <unit_of_work> collection """
db_id = ObjectIdField('_id', null=True)
process_name = StringField(PROCESS_NAME)
timeperiod = StringField(TIMEPERIOD)
start_timeperiod = StringField(START_TIMEPERIOD)
end_timeperiod = StringField(END_TIMEPERIOD)
start_id = ObjectIdField(START_OBJ_ID)
end_id = ObjectIdField(END_OBJ_ID)
source = StringField(SOURCE)
sink = StringField(SINK)
arguments = DictField(ARGUMENTS)
state = StringField(STATE, choices=[STATE_INVALID, STATE_REQUESTED, STATE_IN_PROGRESS,
STATE_PROCESSED, STATE_CANCELED, STATE_NOOP])
created_at = DateTimeField(CREATED_AT)
started_at = DateTimeField(STARTED_AT)
finished_at = DateTimeField(FINISHED_AT)
number_of_aggregated_documents = IntegerField(NUMBER_OF_AGGREGATED_DOCUMENTS)
number_of_processed_documents = IntegerField(NUMBER_OF_PROCESSED_DOCUMENTS)
number_of_retries = IntegerField(NUMBER_OF_RETRIES, default=0)
processed_log = DictField(PROCESSED_LOG)
unit_of_work_type = StringField(UNIT_OF_WORK_TYPE, choices=[TYPE_MANAGED, TYPE_FREERUN])
@property
def key(self):
return self.process_name, self.timeperiod, self.start_id, self.end_id
@property
def is_active(self):
return self.state in [STATE_REQUESTED, STATE_IN_PROGRESS, STATE_INVALID]
@property
def is_finished(self):
return self.state in [STATE_PROCESSED, STATE_CANCELED, STATE_NOOP]
@property
def is_processed(self):
return self.state == STATE_PROCESSED
@property
def is_noop(self):
return self.state == STATE_NOOP
@property
def is_canceled(self):
return self.state == STATE_CANCELED
@property
def is_invalid(self):
return self.state == STATE_INVALID
@property
def is_requested(self):
return self.state == STATE_REQUESTED
@property
def is_in_progress(self):
return self.state == STATE_IN_PROGRESS
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/db/model/unit_of_work.py",
"copies": "1",
"size": "4693",
"license": "bsd-3-clause",
"hash": -7704751275372359000,
"line_mean": 40.1666666667,
"line_max": 114,
"alpha_frac": 0.7048796079,
"autogenerated": false,
"ratio": 3.663544106167057,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.986018583319952,
"avg_score": 0.001647576173507687,
"num_lines": 114
} |
__author__ = 'Bohdan Mushkevych'
from odm.document import BaseDocument
from odm.fields import StringField, ObjectIdField, IntegerField, DictField, DateTimeField
TYPE_MANAGED = 'type_managed' # identifies UOW created by Abstract State Machine child for Managed Process
TYPE_FREERUN = 'type_freerun' # identifies UOW created by FreerunStateMachine for ad-hock processing
# UOW was successfully processed by the worker
STATE_PROCESSED = 'state_processed'
# UOW was received by the worker and it started the processing
STATE_IN_PROGRESS = 'state_in_progress'
# UOW was instantiated and send to the worker
STATE_REQUESTED = 'state_requested'
# Job has been manually marked as SKIPPED via MX
# and so the associated UOW got cancelled
# or the life-support threshold has been crossed for failing UOW
STATE_CANCELED = 'state_canceled'
# UOW can get into STATE_INVALID if:
# a. related Job was marked for reprocessing via MX
# b. have failed with an exception at the worker level
# NOTICE: GarbageCollector changes STATE_INVALID -> STATE_REQUESTED during re-posting
STATE_INVALID = 'state_invalid'
# UOW was received by a worker,
# but no data was found to process
STATE_NOOP = 'state_noop'
class UnitOfWork(BaseDocument):
""" Module represents persistent Model for atomic unit of work performed by the system.
UnitOfWork Instances are stored in the <unit_of_work> collection """
db_id = ObjectIdField(name='_id', null=True)
process_name = StringField()
timeperiod = StringField(null=True)
start_timeperiod = StringField(null=True) # [synergy date] lower boundary of the period that needs to be processed
end_timeperiod = StringField(null=True) # [synergy date] upper boundary of the period that needs to be processed
start_id = ObjectIdField(name='start_obj_id') # [DB _id] lower boundary of the period that needs to be processed
end_id = ObjectIdField(name='end_obj_id') # [DB _id] upper boundary of the period that needs to be processed
source = StringField(null=True) # defines source of data for the computation
sink = StringField(null=True) # defines sink where the aggregated data will be saved
arguments = DictField() # task-level arguments that could supplement or override process-level ones
state = StringField(choices=[STATE_INVALID, STATE_REQUESTED, STATE_IN_PROGRESS,
STATE_PROCESSED, STATE_CANCELED, STATE_NOOP])
created_at = DateTimeField()
submitted_at = DateTimeField()
started_at = DateTimeField()
finished_at = DateTimeField()
number_of_aggregated_documents = IntegerField()
number_of_processed_documents = IntegerField()
number_of_retries = IntegerField(default=0)
unit_of_work_type = StringField(choices=[TYPE_MANAGED, TYPE_FREERUN])
@classmethod
def key_fields(cls):
return (cls.process_name.name,
cls.timeperiod.name,
cls.start_id.name,
cls.end_id.name)
@property
def is_active(self):
return self.state in [STATE_REQUESTED, STATE_IN_PROGRESS, STATE_INVALID]
@property
def is_finished(self):
return self.state in [STATE_PROCESSED, STATE_CANCELED, STATE_NOOP]
@property
def is_processed(self):
return self.state == STATE_PROCESSED
@property
def is_noop(self):
return self.state == STATE_NOOP
@property
def is_canceled(self):
return self.state == STATE_CANCELED
@property
def is_invalid(self):
return self.state == STATE_INVALID
@property
def is_requested(self):
return self.state == STATE_REQUESTED
@property
def is_in_progress(self):
return self.state == STATE_IN_PROGRESS
PROCESS_NAME = UnitOfWork.process_name.name
TIMEPERIOD = UnitOfWork.timeperiod.name
START_TIMEPERIOD = UnitOfWork.start_timeperiod.name
END_TIMEPERIOD = UnitOfWork.end_timeperiod.name
START_ID = UnitOfWork.start_id.name
END_ID = UnitOfWork.end_id.name
STATE = UnitOfWork.state.name
UNIT_OF_WORK_TYPE = UnitOfWork.unit_of_work_type.name
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/db/model/unit_of_work.py",
"copies": "1",
"size": "4120",
"license": "bsd-3-clause",
"hash": -3814641550784682000,
"line_mean": 37.1481481481,
"line_max": 120,
"alpha_frac": 0.7065533981,
"autogenerated": false,
"ratio": 3.7694419030192132,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9969557037283931,
"avg_score": 0.0012876527670564256,
"num_lines": 108
} |
__author__ = 'Bohdan Mushkevych'
from odm.document import BaseDocument
from odm.fields import StringField, ObjectIdField, ListField, IntegerField
MAX_NUMBER_OF_LOG_ENTRIES = 32
TIMEPERIOD = 'timeperiod'
PROCESS_NAME = 'process_name'
START_OBJ_ID = 'start_obj_id'
END_OBJ_ID = 'end_obj_id'
STATE = 'state'
RELATED_UNIT_OF_WORK = 'related_unit_of_work'
NUMBER_OF_FAILURES = 'number_of_failures'
# contains list of MAX_NUMBER_OF_LOG_ENTRIES last log messages
HISTORIC_LOG = 'historic_log'
# given Job was _not_ processed by aggregator because of multiple errors/missing data
# this state allows to mute current Job abd allow other timeperiods/Jobs to be processed
# only manual "re-processing" can re-run the skipped Job
STATE_SKIPPED = 'state_skipped'
# given Job was successfully processed by an aggregator
# no further processing for this Job is performed
STATE_PROCESSED = 'state_processed'
# no processing was performed for this Job
# no further processing for this Job is performed
STATE_NOOP = 'state_noop'
# Scheduler assumes that all timeperiod data is in the database, and asks an aggregator to run a "final" aggregation
# Job will be marked as STATE_PROCESSED afterwards if the processing succeed
STATE_FINAL_RUN = 'state_final_run'
# Aggregator is asked to perform a routine aggregation.
# Further state of the Job depends on the governing state machine:
# it could be either STATE_PROCESSED, STATE_IN_PROGRESS, STATE_NOOP, STATE_FINAL_RUN or STATE_SKIPPED
STATE_IN_PROGRESS = 'state_in_progress'
# Given timetable record serves as place-holder in the Tree
# TimeRecord can move to STATE_IN_PROGRESS
STATE_EMBRYO = 'state_embryo'
class Job(BaseDocument):
""" class presents status for the time-period, and indicates whether data was process by particular process"""
db_id = ObjectIdField('_id', null=True)
process_name = StringField(PROCESS_NAME)
timeperiod = StringField(TIMEPERIOD)
start_id = ObjectIdField(START_OBJ_ID)
end_id = ObjectIdField(END_OBJ_ID)
state = StringField(STATE, choices=[STATE_IN_PROGRESS, STATE_PROCESSED, STATE_FINAL_RUN,
STATE_EMBRYO, STATE_SKIPPED, STATE_NOOP])
related_unit_of_work = ObjectIdField(RELATED_UNIT_OF_WORK)
log = ListField(HISTORIC_LOG)
number_of_failures = IntegerField(NUMBER_OF_FAILURES, default=0)
@BaseDocument.key.getter
def key(self):
return self.process_name, self.timeperiod
@key.setter
def key(self, value):
""" :param value: tuple (name of the process, timeperiod as string in Synergy Data format) """
self.process_name = value[0]
self.timeperiod = value[1]
@property
def is_active(self):
return self.state in [STATE_FINAL_RUN, STATE_IN_PROGRESS, STATE_EMBRYO]
@property
def is_finished(self):
return self.state in [STATE_PROCESSED, STATE_SKIPPED, STATE_NOOP]
@property
def is_processed(self):
return self.state == STATE_PROCESSED
@property
def is_noop(self):
return self.state == STATE_NOOP
@property
def is_skipped(self):
return self.state == STATE_SKIPPED
@property
def is_embryo(self):
return self.state == STATE_EMBRYO
@property
def is_in_progress(self):
return self.state == STATE_IN_PROGRESS
@property
def is_final_run(self):
return self.state == STATE_FINAL_RUN
| {
"repo_name": "eggsandbeer/scheduler",
"path": "synergy/db/model/job.py",
"copies": "1",
"size": "3409",
"license": "bsd-3-clause",
"hash": -6139948443617114000,
"line_mean": 33.4343434343,
"line_max": 116,
"alpha_frac": 0.7095922558,
"autogenerated": false,
"ratio": 3.5180598555211557,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9723482061204498,
"avg_score": 0.0008340100233314464,
"num_lines": 99
} |
__author__ = 'Bohdan Mushkevych'
from odm.document import BaseDocument
from odm.fields import StringField, ObjectIdField, ListField, IntegerField
# number of job events to be stored in Job.event_log, such as emission of the UOW
EVENT_LOG_MAX_SIZE = 128
# given Job was _not_ processed by aggregator because of multiple errors/missing data
# this state allows to mute current Job abd allow other timeperiods/Jobs to be processed
# only manual "re-processing" can re-run the skipped Job
STATE_SKIPPED = 'state_skipped'
# given Job was successfully processed by an aggregator
# no further processing for this Job is performed
STATE_PROCESSED = 'state_processed'
# no processing was performed for this Job
# no further processing for this Job is performed
STATE_NOOP = 'state_noop'
# Scheduler assumes that all timeperiod data is in the database, and asks an aggregator to run a "final" aggregation
# Job will be marked as STATE_PROCESSED afterwards if the processing succeed
STATE_FINAL_RUN = 'state_final_run'
# Aggregator is asked to perform a routine aggregation.
# Further state of the Job depends on the governing state machine:
# it could be either STATE_PROCESSED, STATE_IN_PROGRESS, STATE_NOOP, STATE_FINAL_RUN or STATE_SKIPPED
STATE_IN_PROGRESS = 'state_in_progress'
# Given timetable record serves as place-holder in the Tree
# TimeRecord can move to STATE_IN_PROGRESS
STATE_EMBRYO = 'state_embryo'
class Job(BaseDocument):
""" class presents status for the time-period, and indicates whether data was process by particular process"""
db_id = ObjectIdField(name='_id', null=True)
process_name = StringField()
timeperiod = StringField()
state = StringField(choices=[STATE_IN_PROGRESS, STATE_PROCESSED, STATE_FINAL_RUN,
STATE_EMBRYO, STATE_SKIPPED, STATE_NOOP])
related_unit_of_work = ObjectIdField()
event_log = ListField()
number_of_failures = IntegerField(default=0)
@classmethod
def key_fields(cls):
return cls.process_name.name, cls.timeperiod.name
@property
def is_active(self):
return self.state in [STATE_FINAL_RUN, STATE_IN_PROGRESS, STATE_EMBRYO]
@property
def is_finished(self):
return self.state in [STATE_PROCESSED, STATE_SKIPPED, STATE_NOOP]
@property
def is_processed(self):
return self.state == STATE_PROCESSED
@property
def is_noop(self):
return self.state == STATE_NOOP
@property
def is_skipped(self):
return self.state == STATE_SKIPPED
@property
def is_embryo(self):
return self.state == STATE_EMBRYO
@property
def is_in_progress(self):
return self.state == STATE_IN_PROGRESS
@property
def is_final_run(self):
return self.state == STATE_FINAL_RUN
TIMEPERIOD = Job.timeperiod.name
PROCESS_NAME = Job.process_name.name
STATE = Job.state.name
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/db/model/job.py",
"copies": "1",
"size": "2896",
"license": "bsd-3-clause",
"hash": -7586497326456092000,
"line_mean": 31.9090909091,
"line_max": 116,
"alpha_frac": 0.716160221,
"autogenerated": false,
"ratio": 3.6751269035532994,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9887097722664071,
"avg_score": 0.0008378803778458715,
"num_lines": 88
} |
__author__ = 'Bohdan Mushkevych'
from odm.document import BaseDocument
from odm.fields import StringField, ObjectIdField
class MqTransmission(BaseDocument):
""" Non-persistent model. Instance of this class presents either:
- single request from Synergy Scheduler to any worker
- response/report from the worker to the Synergy Scheduler """
process_name = StringField() # name of the process to handle the schedulables
entry_name = StringField(null=True) # name of the schedulable, if applicable
record_db_id = ObjectIdField() # associated with either UOW.db_id or Job.db_id
@BaseDocument.key.getter
def key(self):
return self.process_name, self.entry_name
@key.setter
def key(self, value):
if isinstance(value, (list, tuple)):
self.process_name = value[0]
self.entry_name = value[1]
else:
self.process_name = value
self.entry_name = None
def __str__(self):
return f'{self.process_name}::{self.entry_name}#{self.record_db_id}'
| {
"repo_name": "mushkevych/scheduler",
"path": "synergy/db/model/mq_transmission.py",
"copies": "1",
"size": "1083",
"license": "bsd-3-clause",
"hash": -7772328595199005000,
"line_mean": 35.1,
"line_max": 92,
"alpha_frac": 0.6528162512,
"autogenerated": false,
"ratio": 3.8268551236749118,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49796713748749116,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.