code
stringlengths 1
199k
|
|---|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('murra')
mobileTemplate.setLevel(28)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Herbivore Meat")
mobileTemplate.setMeatAmount(55)
mobileTemplate.setHideType("Bristly Hide")
mobileTemplate.setHideAmount(35)
mobileTemplate.setBoneType("Animal Bones")
mobileTemplate.setBoneAmount(32)
mobileTemplate.setSocialGroup("murra")
mobileTemplate.setAssistRange(2)
mobileTemplate.setStalker(False)
mobileTemplate.setOptionsBitmask(Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_murra.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_charge_2')
attacks.add('bm_defensive_2')
attacks.add('bm_slash_2')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('murra', mobileTemplate)
return
|
import sys
from java.util import Vector
def addSpawnArea(core):
dynamicGroups = Vector()
dynamicGroups.add('yavin4_black_sun')
dynamicGroups.add('yavin4_choku')
dynamicGroups.add('yavin4_skreeg')
dynamicGroups.add('yavin4_klinik')
core.spawnService.addDynamicSpawnArea(dynamicGroups, 5500, 0, 3500, 'yavin4')
return
|
"""Test function :func:`iris._lazy data.is_lazy_data`."""
import iris.tests as tests
import dask.array as da
import numpy as np
from iris._lazy_data import is_lazy_data
class Test_is_lazy_data(tests.IrisTest):
def test_lazy(self):
values = np.arange(30).reshape((2, 5, 3))
lazy_array = da.from_array(values, chunks="auto")
self.assertTrue(is_lazy_data(lazy_array))
def test_real(self):
real_array = np.arange(24).reshape((2, 3, 4))
self.assertFalse(is_lazy_data(real_array))
if __name__ == "__main__":
tests.main()
|
import sys
def setup(core, actor, buff):
return
def add(core, actor, buff):
core.skillModService.addSkillMod(actor, 'dot_bleed', 90)
return
def remove(core, actor, buff):
core.skillModService.deductSkillMod(actor, 'dot_bleed', 90)
return
|
from weboob.browser import AbstractBrowser, URL
from .pages import LoginPage
class HumanisBrowser(AbstractBrowser):
PARENT = 'cmes'
login = URL('epsens/(?P<client_space>.*)fr/identification/authentification.html', LoginPage)
client_space = ''
def __init__(self, login, password, baseurl, subsite, *args, **kwargs):
self.weboob = kwargs['weboob']
super(HumanisBrowser, self).__init__(login, password, baseurl, subsite, *args, **kwargs)
|
import heapq
import threading
import itertools
class PriorityQueue(object):
"""
Threadsafe priority queue based on the python heapq module.
Ties are resolved by popping the element that was added first. If
the elements are tuples (p1, p2, ..., element), the elements are
never considered for comparison!
"""
def __init__(self):
self._heap = []
self._lock = threading.Lock()
self._count = itertools.count()
def push(self, item):
item = self._expandItem(item)
with self._lock:
heapq.heappush(self._heap, item)
def pop(self):
with self._lock:
item = heapq.heappop(self._heap)
return self._reduceItem(item)
def __len__(self):
return len(self._heap)
def _expandItem(self, item):
"""
convert to internal tuple format, if necessary
"""
c = self._count.next()
if isinstance(item, tuple) and len(item) > 1:
new = list(item)
new.insert(-1, c)
else:
new = [item, c]
return tuple(new)
def _reduceItem(self, item):
"""
restore item to original format
"""
# if the tuple is of length 2, it was created by the else case
if len(item) == 2:
return item[0]
else:
item = list(item)
item.pop(-2)
return tuple(item)
|
try:
import ujson as json
except:
#print('ujson not found, using json')
import json
import logging
import threading
import time
import zmq
from sensorika.tools import getLocalIp
class Worker(threading.Thread):
def __init__(self, name, configFile=None, *args, **kwargs):
self.Estop = threading.Event()
threading.Thread.__init__(self, args=args, kwargs=args)
self.src = ""
self.command = []
time.sleep(0.1)
self.name = name
self.dt = 0.001
self.ns_ip = getLocalIp()
self.name = name
self.canGo = True
self.lastSSend = time.time()
self.data = [(time.time(), 0)]
self.wcontext = zmq.Context()
self.sync_socket = self.wcontext.socket(zmq.REP)
self.async_socket = self.wcontext.socket(zmq.PUB)
self._configFile = "." + name
self.ptimer = threading.Timer(2.0, self.populate)
if configFile:
self._configFile = configFile
need_create = True
try:
f = open(self._configFile, "r")
except IOError as e:
print('Creating config File with random port')
self.sync_port = self.sync_socket.bind_to_random_port("tcp://*")
self.async_port = self.sync_socket.bind_to_random_port("tcp://*")
self.params = {}
self.params['port'] = self.sync_port
self.params['sync_port'] = self.sync_port
self.params['async_port'] = self.async_port
self.params['frequency'] = 10
self.params['name'] = name
f = open(self._configFile, "w")
f.write(json.dumps(self.params))
f.close()
need_create = False
if need_create:
try:
self.params = json.load(open(self._configFile, "r"))
self.sync_socket.bind("tcp://*:{0}".format(self.params['port']))
self.async_socket.bind("tcp://*:{0}".format(self.params['async_port']))
except Exception as e:
print(e)
return
self.ptimer.start()
print("Serving at sync {0} and async {1}".format(self.params['sync_port'], self.params['async_port']))
self.start()
def populate(self):
logging.debug('populating')
ctx = zmq.Context()
sock = ctx.socket(zmq.REQ)
s = "tcp://" + self.ns_ip + ":15701"
poller = zmq.Poller()
poller.register(sock, zmq.POLLIN | zmq.POLLOUT)
sock.connect(s)
if poller.poll(3 * 1000): # 10s timeout in milliseconds
sock.send_json(dict(action='register', name=self.name, port=self.params['port'],
async_port=self.params['async_port'],
ip=self.ns_ip, params=self.params))
else:
logging.error("No locator on {0}:{1}".format(self.ns_ip, 15701))
if poller.poll(3 * 1000): # 10s timeout in milliseconds
sock.recv_json()
else:
logging.error("No locator on {0}:{1}".format(self.ns_ip, 15701))
sock.close()
ctx.term()
if not self.Estop.is_set():
self.ptimer = threading.Timer(10.0, self.populate)
self.ptimer.start()
def add(self, data):
self.data.append((time.time(), data))
self.async_socket.send_json(self.data[-1])
if len(self.data) > 100:
self.data = self.data[-100:]
return data
def get(self, cnt=1):
try:
return self.command[-cnt:]
except:
return None
def run(self):
self.canGo = True
self.command = []
cnt = 0
try:
while True:
if self.Estop.is_set():
break
try:
data = self.sync_socket.recv(zmq.DONTWAIT).decode("utf8")
except:
time.sleep(0.001)
continue
data = json.loads(data)
senddata = None
try:
if data['action'] == 'call':
pass
if data['action'] == 'source':
if self.src:
senddata = dict(source=self.src)
if data['action'] == 'line':
if self.src:
senddata = dict(line=self.src_line)
if data['action'] == 'get':
try:
count = data['count']
except KeyError:
count = 1
if count == 1:
senddata = self.data[-1]
else:
senddata = self.data[-count:]
if data['action'] == 'set':
self.command.append((time.time(), data['data']))
if len(self.command) > 100:
self.command = self.command[-100:]
senddata = dict(status='ok')
except Exception as e:
self.sync_socket.send_json(dict(status='wrong params'))
continue
try:
self.sync_socket.send_json(senddata)
except Exception as e:
print(e)
time.sleep(self.dt)
self.sync_socket.close()
self.wcontext.term()
except Exception as e:
print(e)
self.sync_socket.send_json(dict(status='error', error=str(e)))
def stop(self):
self.sync_socket.setsockopt(zmq.LINGER, 0)
self.async_socket.setsockopt(zmq.LINGER, 0)
self.Estop.set()
self.ptimer.cancel()
self.sync_socket.close()
self.async_socket.close()
def mkPeriodicWorker(name, function, params={}, configFile=None):
w = Worker(name, configFile)
w.params.update(params)
spd = 1.0 / w.params['frequency']
def W():
while not w.Estop.is_set():
t0 = time.time()
result = function()
w.add(result)
tt = time.time() - t0
time.sleep(spd - tt)
t = threading.Thread(target=W)
t.start()
return w
|
from .module import OpenEDXModule
__all__ = ['OpenEDXModule']
|
"""Memory watchdog: periodically read the memory usage of the main test process
and print it out, until terminated."""
import os
import sys
import time
try:
page_size = os.sysconf('SC_PAGESIZE')
except (ValueError, AttributeError):
try:
page_size = os.sysconf('SC_PAGE_SIZE')
except (ValueError, AttributeError):
page_size = 4096
while True:
sys.stdin.seek(0)
statm = sys.stdin.read()
data = int(statm.split()[5])
sys.stdout.write(" ... process data size: {data:.1f}G\n"
.format(data=data * page_size / (1024 ** 3)))
sys.stdout.flush()
time.sleep(1)
|
"""Test for the BigQuery tornadoes example."""
import logging
import unittest
import apache_beam as beam
from apache_beam.examples.cookbook import bigquery_tornadoes
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
class BigQueryTornadoesTest(unittest.TestCase):
def test_basics(self):
p = TestPipeline()
rows = (p | 'create' >> beam.Create([
{'month': 1, 'day': 1, 'tornado': False},
{'month': 1, 'day': 2, 'tornado': True},
{'month': 1, 'day': 3, 'tornado': True},
{'month': 2, 'day': 1, 'tornado': True}]))
results = bigquery_tornadoes.count_tornadoes(rows)
assert_that(results, equal_to([{'month': 1, 'tornado_count': 2},
{'month': 2, 'tornado_count': 1}]))
p.run().wait_until_finish()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
"""Triggers."""
from __future__ import annotations
import asyncio
from collections.abc import Callable
import logging
from typing import Any
import voluptuous as vol
from homeassistant.const import CONF_ID, CONF_PLATFORM
from homeassistant.core import CALLBACK_TYPE, HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.loader import IntegrationNotFound, async_get_integration
from .typing import ConfigType, TemplateVarsType
_PLATFORM_ALIASES = {
"device_automation": ("device",),
"homeassistant": ("event", "numeric_state", "state", "time_pattern", "time"),
}
async def _async_get_trigger_platform(hass: HomeAssistant, config: ConfigType) -> Any:
platform_and_sub_type = config[CONF_PLATFORM].split(".")
platform = platform_and_sub_type[0]
for alias, triggers in _PLATFORM_ALIASES.items():
if platform in triggers:
platform = alias
break
try:
integration = await async_get_integration(hass, platform)
except IntegrationNotFound:
raise vol.Invalid(f"Invalid platform '{platform}' specified") from None
try:
return integration.get_platform("trigger")
except ImportError:
raise vol.Invalid(
f"Integration '{platform}' does not provide trigger support"
) from None
async def async_validate_trigger_config(
hass: HomeAssistant, trigger_config: list[ConfigType]
) -> list[ConfigType]:
"""Validate triggers."""
config = []
for conf in trigger_config:
platform = await _async_get_trigger_platform(hass, conf)
if hasattr(platform, "async_validate_trigger_config"):
conf = await platform.async_validate_trigger_config(hass, conf)
else:
conf = platform.TRIGGER_SCHEMA(conf)
config.append(conf)
return config
async def async_initialize_triggers(
hass: HomeAssistant,
trigger_config: list[ConfigType],
action: Callable,
domain: str,
name: str,
log_cb: Callable,
home_assistant_start: bool = False,
variables: TemplateVarsType = None,
) -> CALLBACK_TYPE | None:
"""Initialize triggers."""
triggers = []
for idx, conf in enumerate(trigger_config):
platform = await _async_get_trigger_platform(hass, conf)
trigger_id = conf.get(CONF_ID, f"{idx}")
trigger_idx = f"{idx}"
trigger_data = {"id": trigger_id, "idx": trigger_idx}
info = {
"domain": domain,
"name": name,
"home_assistant_start": home_assistant_start,
"variables": variables,
"trigger_data": trigger_data,
}
triggers.append(platform.async_attach_trigger(hass, conf, action, info))
attach_results = await asyncio.gather(*triggers, return_exceptions=True)
removes = []
for result in attach_results:
if isinstance(result, HomeAssistantError):
log_cb(logging.ERROR, f"Got error '{result}' when setting up triggers for")
elif isinstance(result, Exception):
log_cb(logging.ERROR, "Error setting up trigger", exc_info=result)
elif result is None:
log_cb(
logging.ERROR, "Unknown error while setting up trigger (empty result)"
)
else:
removes.append(result)
if not removes:
return None
log_cb(logging.INFO, "Initialized trigger")
@callback
def remove_triggers(): # type: ignore
"""Remove triggers."""
for remove in removes:
remove()
return remove_triggers
|
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import shlex
import subprocess
import sys
try:
from shlex import quote as shellquote
except ImportError:
from pipes import quote as shellquote
class BuildOptions(object):
def __init__(self, num_jobs, external_dir, install_dir):
self.num_jobs = num_jobs
if not self.num_jobs:
import multiprocessing
self.num_jobs = multiprocessing.cpu_count()
self.external_dir = external_dir
if install_dir is None:
install_dir = os.path.join(self.external_dir, "install")
self.install_dir = install_dir
def project_dir(self, name, *paths):
return os.path.join(self.external_dir, name, *paths)
class Project(object):
def __init__(self, name, opts, updater, builder):
self.name = name
self.opts = opts
self.updater = updater
self.builder = builder
self.path = self.opts.project_dir(self.name)
def update(self):
self.updater.update(self)
def ensure_checkedout(self):
self.updater.ensure_checkedout(self)
def build(self):
self.builder.build(self)
def clean(self):
self.updater.clean(self)
class GitUpdater(object):
def __init__(self, repo, branch="master"):
self.origin_repo = repo
self.branch = branch
def ensure_checkedout(self, project):
if not os.path.exists(project.path):
self._checkout(project)
def update(self, project):
if os.path.exists(project.path):
print("Updating %s..." % project.name)
run_cmd(["git", "-C", project.path, "fetch", "origin"])
run_cmd(
[
"git",
"-C",
project.path,
"merge",
"--ff-only",
"origin/%s" % self.branch,
]
)
else:
self._checkout(project)
def _checkout(self, project):
print("Cloning %s..." % project.name)
run_cmd(
[
"git",
"clone",
"--depth=100",
self.origin_repo,
project.path,
"--branch",
self.branch,
]
)
def clean(self, project):
run_cmd(["git", "-C", project.path, "clean", "-fxd"])
def homebrew_prefix():
""" returns the homebrew installation prefix """
return subprocess.check_output(["brew", "--prefix"]).strip()
def fixup_env_for_darwin(env):
def add_flag(name, item, separator, append=True):
val = env.get(name, "").split(separator)
if append:
val.append(item)
else:
val.insert(0, item)
env[name] = separator.join(val)
# The brew/openssl installation situation is a bit too weird for vanilla
# cmake logic to find, and most packages don't deal with this correctly,
# so inject these into the environment to give them a hand
p = homebrew_prefix()
add_flag("PKG_CONFIG_PATH", "%s/opt/openssl/lib/pkgconfig" % p, ":")
add_flag("LDFLAGS", "-L%s/opt/openssl/lib" % p, " ")
add_flag("CPPFLAGS", "-I%s/opt/openssl/include" % p, " ")
# system bison is ancient, so ensure that the brew installed one takes
# precedence. Brew refuses to to install or link bison into /usr/local/bin,
# so we have to insert this opt path instead. Likewise for flex.
add_flag("PATH", "%s/opt/bison/bin" % p, ":", append=False)
add_flag("PATH", "%s/opt/flex/bin" % p, ":", append=False)
# flex generates code that sprinkles the `register` keyword liberally
# and the thrift compilation flags hate that in C++17 code. Disable
# the warning that promotes this to an error.
add_flag("CXXFLAGS", "-Wno-register", " ")
class BuilderBase(object):
def __init__(self, subdir=None, env=None, build_dir=None):
self.env = os.environ.copy()
if sys.platform == "darwin":
fixup_env_for_darwin(self.env)
if env:
self.env.update(env)
self.subdir = subdir
self.build_dir = build_dir
self._build_path = None
def _run_cmd(self, cmd):
run_cmd(cmd=cmd, env=self.env, cwd=self._build_path)
def build(self, project):
print("Building %s..." % project.name)
if self.subdir:
build_path = os.path.join(project.path, self.subdir)
else:
build_path = project.path
if self.build_dir is not None:
build_path = os.path.join(build_path, self.build_dir)
if not os.path.isdir(build_path):
os.mkdir(build_path)
self._build_path = build_path
try:
self._build(project)
finally:
self._build_path = None
class MakeBuilder(BuilderBase):
def __init__(self, subdir=None, env=None, args=None):
super(MakeBuilder, self).__init__(subdir=subdir, env=env)
self.args = args or []
def _build(self, project):
cmd = ["make", "-j%s" % project.opts.num_jobs] + self.args
self._run_cmd(cmd)
install_cmd = ["make", "install", "PREFIX=" + project.opts.install_dir]
self._run_cmd(install_cmd)
class AutoconfBuilder(BuilderBase):
def __init__(self, subdir=None, env=None, args=None):
super(AutoconfBuilder, self).__init__(subdir=subdir, env=env)
self.args = args or []
def _build(self, project):
configure_path = os.path.join(self._build_path, "configure")
if not os.path.exists(configure_path):
self._run_cmd(["autoreconf", "--install"])
configure_cmd = [
configure_path,
"--prefix=" + project.opts.install_dir,
] + self.args
self._run_cmd(configure_cmd)
self._run_cmd(["make", "-j%s" % project.opts.num_jobs])
self._run_cmd(["make", "install"])
class CMakeBuilder(BuilderBase):
def __init__(self, subdir=None, env=None, defines=None):
super(CMakeBuilder, self).__init__(subdir=subdir, env=env, build_dir="_build")
self.defines = defines or {}
def _build(self, project):
defines = {
"CMAKE_INSTALL_PREFIX": project.opts.install_dir,
"BUILD_SHARED_LIBS": "OFF",
# Some of the deps (rsocket) default to UBSAN enabled if left
# unspecified. Some of the deps fail to compile in release mode
# due to warning->error promotion. RelWithDebInfo is the happy
# medium.
"CMAKE_BUILD_TYPE": "RelWithDebInfo",
}
# If any of these env vars are set, set the corresponding cmake def.
for e in [
"OPENSSL_ROOT_DIR",
"BOOST_ROOT",
"LIBEVENT_INCLUDE_DIR",
"LIBEVENT_LIB",
"CMAKE_SYSTEM_PREFIX_PATH",
]:
var = os.environ.get(e, None)
if var:
defines[e] = var
if is_win():
defines["CMAKE_TOOLCHAIN_FILE"] = (
vcpkg_dir() + "/scripts/buildsystems/vcpkg.cmake"
)
defines.update(self.defines)
define_args = ["-D%s=%s" % (k, v) for (k, v) in defines.items()]
if is_win():
define_args += ["-G", "Visual Studio 15 2017 Win64"]
self._run_cmd(["cmake", "configure", ".."] + define_args)
if is_win():
self._run_cmd(
[
"cmake",
"--build",
self._build_path,
"--target",
"install",
"--config",
"Release",
# With a sufficiently new cmake available, we could
# ask for concurrency, but for now we don't have it.
# "-j", str(project.opts.num_jobs),
]
)
else:
# The only thing stopping us from using the same cmake --build
# approach as above is that the cmake that ships with ubuntu 16
# is too old and doesn't know about the -j flag, so we do this
# bit the old fashioned way
self._run_cmd(["make", "-j%s" % project.opts.num_jobs])
self._run_cmd(["make", "install"])
def run_cmd(cmd, env=None, cwd=None, allow_fail=False):
cmd_str = " ".join(shellquote(arg) for arg in cmd)
print("+ " + cmd_str)
if allow_fail:
subprocess.call(cmd, env=env, cwd=cwd)
else:
subprocess.check_call(cmd, env=env, cwd=cwd)
def install_apt(pkgs):
cmd = ["sudo", "apt-get", "install", "-yq"] + pkgs
run_cmd(cmd)
def vcpkg_dir():
""" Figure out where vcpkg is installed.
vcpkg-exported is populated in some flavors of FB internal builds.
C:/tools/vcpkg is the appveyor location.
C:/open/vcpkg is my local location.
"""
for p in ["vcpkg-exported", "C:/tools/vcpkg", "C:/open/vcpkg"]:
if os.path.isdir(p):
return os.path.realpath(p)
raise Exception("cannot find vcpkg")
def install_vcpkg(pkgs):
vcpkg = os.path.join(vcpkg_dir(), "vcpkg")
run_cmd([vcpkg, "install", "--triplet", "x64-windows"] + pkgs)
def get_projects(opts):
projects = [
Project(
"mstch",
opts,
GitUpdater("https://github.com/no1msd/mstch.git"),
CMakeBuilder(),
),
Project(
"googletest",
opts,
GitUpdater("https://github.com/google/googletest.git"),
CMakeBuilder(),
),
]
if not is_win():
# Ubuntu 16 has an old version of zstd, so build our own.
# We can't use the MakeBuilder on windows, but we can get zstd
# from vcpkg so we're ok there.
projects += [
Project(
"zstd",
opts,
GitUpdater("https://github.com/facebook/zstd.git"),
MakeBuilder(),
)
]
projects += [
# TODO: see if we can get get a faster and/or static build working
# by building things ourselves.
# Project(
# "libevent",
# opts,
# GitUpdater("https://github.com/libevent/libevent.git",
# branch="release-2.1.8-stable"),
# CMakeBuilder(defines={
# 'EVENT__DISABLE_BENCHMARK': 'ON',
# 'EVENT__DISABLE_TESTS': 'ON',
# 'EVENT__DISABLE_REGRESS': 'ON',
# 'EVENT__DISABLE_SAMPLES': 'ON',
# }),
# ),
Project(
"folly",
opts,
GitUpdater("https://github.com/facebook/folly.git"),
CMakeBuilder(),
)
]
need_thrift = not is_win()
if need_thrift:
projects += [
Project(
"libsodium",
opts,
GitUpdater("https://github.com/jedisct1/libsodium.git"),
AutoconfBuilder(args=["--disable-shared"]),
),
Project(
"fizz",
opts,
GitUpdater("https://github.com/facebookincubator/fizz.git"),
CMakeBuilder(
subdir="fizz",
defines={"BUILD_EXAMPLES": "OFF", "BUILD_TESTS": "OFF"},
),
),
Project(
"fmt",
opts,
GitUpdater("https://github.com/fmtlib/fmt.git"),
CMakeBuilder(),
),
Project(
"wangle",
opts,
GitUpdater("https://github.com/facebook/wangle.git"),
CMakeBuilder(subdir="wangle", defines={"BUILD_TESTS": "OFF"}),
),
Project(
"rsocket-cpp",
opts,
GitUpdater("https://github.com/rsocket/rsocket-cpp.git"),
CMakeBuilder(
defines={"BUILD_EXAMPLES": "OFF", "BUILD_BENCHMARKS": "OFF"}
),
),
Project(
"fbthrift",
opts,
GitUpdater("https://github.com/facebook/fbthrift.git"),
CMakeBuilder(),
),
]
return projects
def get_linux_type():
try:
with open("/etc/os-release") as f:
data = f.read()
except EnvironmentError:
return (None, None)
os_vars = {}
for line in data.splitlines():
parts = line.split("=", 1)
if len(parts) != 2:
continue
key = parts[0].strip()
value_parts = shlex.split(parts[1].strip())
if not value_parts:
value = ""
else:
value = value_parts[0]
os_vars[key] = value
return os_vars.get("NAME"), os_vars.get("VERSION_ID")
def get_os_type():
if sys.platform.startswith("linux"):
return get_linux_type()
elif sys.platform.startswith("darwin"):
return ("darwin", None)
elif sys.platform.startswith("win"):
return ("windows", sys.getwindowsversion().major)
else:
return (None, None)
def is_win():
return get_os_type()[0] == "windows"
def install_platform_deps():
os_name, os_version = get_os_type()
if os_name is None:
raise Exception("unable to detect OS type")
elif os_name == "Centos" or os_name == "Fedora":
print("Untested! Installing necessary packages...")
# FIXME: make this work. Contributions welcomed!
pkgs = (
"autoconf automake libdouble-conversion-dev "
"libssl-dev make zip git libtool g++ libboost-all-dev "
"libevent-dev flex bison libgoogle-glog-dev libkrb5-dev "
"libsnappy-dev libsasl2-dev libnuma-dev libcurl4-gnutls-dev "
"libpcap-dev libdb5.3-dev cmake pkg-config python-dev "
).split()
run_cmd(["yum", "install"] + pkgs)
raise Exception("implement me")
elif os_name == "Ubuntu" or os_name.startswith("Debian"):
# These dependencies have been tested on Ubuntu 16.04
print("Installing necessary Ubuntu packages...")
ubuntu_pkgs = (
"autoconf automake libdouble-conversion-dev "
"libssl-dev make zip git libtool g++ libboost-all-dev "
"libevent-dev flex bison libgoogle-glog-dev libkrb5-dev "
"libsnappy-dev libsasl2-dev libnuma-dev libcurl4-gnutls-dev "
"libpcap-dev libdb5.3-dev cmake pkg-config python-dev "
"libpcre3-dev "
).split()
install_apt(ubuntu_pkgs)
elif os_name == "windows":
install_vcpkg(
[
"boost-chrono",
"boost-context",
"boost-conversion",
"boost-crc",
"boost-date-time",
"boost-filesystem",
"boost-multi-index",
"boost-program-options",
"boost-regex",
"boost-system",
"boost-thread",
"boost-variant",
"double-conversion",
"gflags",
"glog",
"libevent",
"openssl",
]
)
elif os_name == "darwin":
print("Installing necessary packages via Homebrew...")
run_cmd(
[
"brew",
"install",
"autoconf",
"automake",
"bison",
"boost",
"boost-python",
"cmake",
"curl",
"double-conversion",
"flex",
"gflags",
"glog",
"libevent",
"libgit2",
"libtool",
"lz4",
"openssl",
"snappy",
"xz",
"zstd",
],
allow_fail=True,
)
else:
raise Exception(
"installing OS dependencies on %s is not supported yet" % (os_name,)
)
def main():
ap = argparse.ArgumentParser()
ap.add_argument(
"-o",
"--external-dir",
help="The directory where external projects should be "
'created (default="external")',
)
ap.add_argument(
"-u",
"--update",
action="store_true",
default=False,
help="Updates the external projects repositories before building them",
)
ap.add_argument(
"-C",
"--clean",
action="store_true",
default=None,
help="Cleans the external project repositories before "
"building them (defaults to on when updating projects)",
)
ap.add_argument(
"--no-clean",
action="store_false",
default=None,
dest="clean",
help="Do not clean the external project repositories "
"even after updating them.",
)
ap.add_argument(
"-j",
"--jobs",
dest="num_jobs",
type=int,
default=None,
help="The number of jobs to run in parallel when building",
)
ap.add_argument(
"--install-dir",
help="Directory where external projects should be "
"installed (default=<external-dir>/install)",
)
ap.add_argument(
"--install-deps",
action="store_true",
default=False,
help="Install necessary system packages",
)
args = ap.parse_args()
if args.external_dir is None:
script_dir = os.path.abspath(os.path.dirname(__file__))
args.external_dir = os.path.join(script_dir, "external")
if args.clean is None:
args.clean = args.update
opts = BuildOptions(args.num_jobs, args.external_dir, args.install_dir)
if args.install_deps:
install_platform_deps()
if not os.path.isdir(opts.external_dir):
os.makedirs(opts.external_dir)
projects = get_projects(opts)
for project in projects:
if args.update:
project.update()
else:
project.ensure_checkedout()
if args.clean:
for project in projects:
project.clean()
for project in projects:
project.build()
if __name__ == "__main__":
main()
|
import datetime
import sys
import unittest
import test_env
test_env.setup_test_env()
from google.appengine.ext import ndb
from components import auth
from components import config
from support import test_case
class ConfigTest(test_case.TestCase):
def setUp(self):
super(ConfigTest, self).setUp()
# Disable in-memory NDB cache, it messes with cache related test cases.
ndb.get_context().set_cache_policy(lambda _: False)
def test_bootstrap(self):
class Config(config.GlobalConfig):
param = ndb.StringProperty()
def set_defaults(self):
self.param = 'abc'
conf = Config.cached()
self.assertEqual('abc', conf.param)
self.assertEqual(conf.to_dict(), conf.fetch().to_dict())
def test_fetch_store(self):
class Config(config.GlobalConfig):
param = ndb.StringProperty()
conf = Config.fetch()
self.assertIsNone(conf)
conf = Config.cached()
self.assertIsNotNone(conf)
conf.param = '1234'
now = self.mock_now(datetime.datetime(2010, 1, 1))
conf.store(updated_by=auth.Anonymous)
self.mock_now(datetime.datetime(2010, 1, 1), 100)
conf = Config.fetch()
self.assertEqual('1234', conf.param)
self.assertEqual(now, conf.updated_ts)
def test_expiration(self):
self.mock_now(datetime.datetime(2014, 1, 2, 3, 4, 5, 6))
class Config(config.GlobalConfig):
param = ndb.StringProperty(default='default')
# Bootstrap the config.
Config.cached()
# fetch-update cycle, necessary to avoid modifying cached copy in-place.
conf = Config.fetch()
conf.param = 'new-value'
conf.store(updated_by=auth.Anonymous)
# Right before expiration.
self.mock_now(datetime.datetime(2014, 1, 2, 3, 4, 5, 6), 59)
self.assertEqual('default', Config.cached().param)
# After expiration.
self.mock_now(datetime.datetime(2014, 1, 2, 3, 4, 5, 6), 61)
self.assertEqual('new-value', Config.cached().param)
if __name__ == '__main__':
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
unittest.main()
|
from __future__ import absolute_import, division, print_function, unicode_literals
from amaascore.core.amaas_model import AMaaSModel
class Link(AMaaSModel):
def __init__(self, linked_asset_id, *args, **kwargs):
self.linked_asset_id = linked_asset_id
super(Link, self).__init__(*args, **kwargs)
|
from tkinter import *
from factory import Factory
Config = Factory.get('Config')
from ark.thread_handler import ThreadHandler
from ark.gui.tasks import GuiTasks
from ark.gui.control import Control
import time
class PyArcGui(Frame):
gui_title = "pyarc - Rcon for Ark Survival"
gui_size = "1200x700"
def __init__(self, master):
Frame.__init__(self,master)
self.pack(fill=BOTH, expand=1)
self.create_widgets()
ThreadHandler.create_thread(GuiTasks.loop)
def create_widgets(self):
self.feedback = Text(self,width=100,height=40,wrap=WORD)
self.feedback.place(x=0,y=0)
self.feedback_scrollbar = Scrollbar(self.feedback)
self.feedback_scrollbar.place(x=785,y=0,height=640)
self.feedback.config(yscrollcommand=self.feedback_scrollbar.set)
self.feedback_scrollbar.config(command=self.feedback.yview)
Label(self,text="Command:", width=10).place(y=650,x=0)
self.command = Entry(self, width=120)
self.command.bind('<Return>',Control.process_input)
self.command.place(y=650,x=80)
Label(self,text="Server version:", width=20, anchor=W).place(y=0,x=810)
self.server_version = Label(self,text="[Unknown]", width=20, anchor=W, relief=GROOVE)
self.server_version.place(y=0,x=960)
Label(self,text="Server address:", width=20, anchor=W).place(y=25,x=810)
self.server_info = Label(self,text=Config.rcon_host, width=20, anchor=W, relief=GROOVE)
self.server_info.place(y=25,x=960)
Label(self,text="Config file:", width=20, anchor=W).place(y=50,x=810)
self.config_file = Label(self,text=Config.filename, width=20, anchor=W, relief=GROOVE)
self.config_file.place(y=50,x=960)
Label(self,text="Last keepalive:", width=20, anchor=W).place(y=75,x=810)
self.last_keepalive = Label(self,text="Never", width=20, anchor=W, relief=GROOVE)
self.last_keepalive.place(y=75,x=960)
Label(self,text="Last server response:", width=20, anchor=W).place(y=100,x=810)
self.last_serverresponse = Label(self,text="Never", width=20, anchor=W, relief=GROOVE)
self.last_serverresponse.place(y=100,x=960)
Label(self,text="Last player activity:", width=20, anchor=W).place(y=125,x=810)
self.last_player_activity = Label(self,text="Never", width=20, anchor=W, relief=GROOVE)
self.last_player_activity.place(y=125,x=960)
Label(self,text="Active threads:", width=20, anchor=W).place(y=150,x=810)
self.active_threads = Label(self,text="", width=20, anchor=W, relief=GROOVE)
self.active_threads.place(y=150,x=960)
Label(self,text="List of players:").place(y=400,x=810)
self.player_list = Listbox(self, relief=SUNKEN, height=10, width=40)
self.player_list.insert(END,'[Not available]')
self.player_list.place(y=425,x=810)
Button(text='Restart Now',command=self.ev_restart_now, bg='#666', fg="#EEE").place(y=600,x=810)
Button(text='Restart 60min',command=lambda:self.ev_restart_min(60), bg='#666', fg="#EEE").place(y=600,x=900)
Button(text='Restart 30min',command=lambda:self.ev_restart_min(30), bg='#666', fg="#EEE").place(y=600,x=990)
Button(text='Restart 10min',command=lambda:self.ev_restart_min(10), bg='#666', fg="#EEE").place(y=600,x=1080)
def write(self,message):
self.feedback.insert(END,message)
self.feedback.see('end')
def log(self,message):
self.feedback.insert(END,message + "\n")
self.feedback.see('end')
def is_online(self):
#self.log('Not connected to RCON')
return True
def ev_restart_min(self,minutes):
if not self.is_online():
return False
from ark.rcon import Rcon
Rcon.delayed_restart(minutes)
def ev_restart_now(self):
if not self.is_online():
return False
self.log('Restart button pressed')
from ark.rcon import Rcon
Rcon.callback_restart()
|
from openstack import resource
class SecurityGroupRule(resource.Resource, resource.TagMixin):
resource_key = 'security_group_rule'
resources_key = 'security_group_rules'
base_path = '/security-group-rules'
# capabilities
allow_create = True
allow_fetch = True
allow_commit = False
allow_delete = True
allow_list = True
_query_mapping = resource.QueryParameters(
'description', 'direction', 'protocol',
'remote_group_id', 'security_group_id',
'port_range_max', 'port_range_min',
'remote_ip_prefix', 'revision_number',
'project_id', 'tenant_id',
'sort_dir', 'sort_key',
ether_type='ethertype',
**resource.TagMixin._tag_query_parameters
)
# Properties
#: Timestamp when the security group rule was created.
created_at = resource.Body('created_at')
#: The security group rule description.
description = resource.Body('description')
#: ``ingress`` or ``egress``: The direction in which the security group
#: rule is applied. For a compute instance, an ingress security group
#: rule is applied to incoming ingress traffic for that instance.
#: An egress rule is applied to traffic leaving the instance.
direction = resource.Body('direction')
#: Must be IPv4 or IPv6, and addresses represented in CIDR must match
#: the ingress or egress rules.
ether_type = resource.Body('ethertype')
#: The maximum port number in the range that is matched by the
#: security group rule. The port_range_min attribute constrains
#: the port_range_max attribute. If the protocol is ICMP, this
#: value must be an ICMP type.
port_range_max = resource.Body('port_range_max', type=int)
#: The minimum port number in the range that is matched by the
#: security group rule. If the protocol is TCP or UDP, this value
#: must be less than or equal to the value of the port_range_max
#: attribute. If the protocol is ICMP, this value must be an ICMP type.
port_range_min = resource.Body('port_range_min', type=int)
#: The ID of the project this security group rule is associated with.
project_id = resource.Body('project_id')
#: The protocol that is matched by the security group rule.
#: Valid values are ``null``, ``tcp``, ``udp``, and ``icmp``.
protocol = resource.Body('protocol')
#: The remote security group ID to be associated with this security
#: group rule. You can specify either ``remote_group_id`` or
#: ``remote_ip_prefix`` in the request body.
remote_group_id = resource.Body('remote_group_id')
#: The remote IP prefix to be associated with this security group rule.
#: You can specify either ``remote_group_id`` or ``remote_ip_prefix``
#: in the request body. This attribute matches the specified IP prefix
#: as the source IP address of the IP packet.
remote_ip_prefix = resource.Body('remote_ip_prefix')
#: Revision number of the security group rule. *Type: int*
revision_number = resource.Body('revision_number', type=int)
#: The security group ID to associate with this security group rule.
security_group_id = resource.Body('security_group_id')
#: The ID of the project this security group rule is associated with.
tenant_id = resource.Body('tenant_id')
#: Timestamp when the security group rule was last updated.
updated_at = resource.Body('updated_at')
|
from neutron_lib import context
from neutron_lib.db import api as db_api
from oslo_config import cfg
from oslo_db import options as db_options
from oslo_log import log as logging
from neutron.api import converters as n_converters
from neutron.objects import ports as port_obj
from neutron.objects.qos import binding as qos_binding_obj
from neutron.objects.qos import rule as qos_rule_obj
LOG = logging.getLogger(__name__)
def setup_conf():
conf = cfg.CONF
db_group, neutron_db_opts = db_options.list_opts()[0]
cfg.CONF.register_cli_opts(neutron_db_opts, db_group)
conf()
def main():
"""Main method for sanitizing "ml2_port_bindings.profile" column.
This script will sanitize "ml2_port_bindings.profile" columns existing in
the database. In Yoga release the format of this column has changed from:
{'allocation': '<rp_uuid>'}
to:
{'allocation': {'<group_uuid>': '<rp_uuid>'}}
where group_uuid is generated based on port_id and ID of QoS rules
belonging to that group.
"""
setup_conf()
admin_ctx = context.get_admin_context()
with db_api.CONTEXT_WRITER.using(admin_ctx):
for port_binding in port_obj.PortBinding.get_objects(admin_ctx):
# NOTE(przszc): Before minimum packet rate rule was introduced,
# binding-profile.allocation attribute could contain only a single
# RP UUID, responsible for providing minimum bandwidth resources.
# Because of that, whenever we find allocation attribute that still
# uses old format, we can safely assume that we need to generate
# minimum bandwidth group UUID.
allocation = port_binding.profile.get('allocation')
if (not allocation or isinstance(allocation, dict)):
continue
qos_port_binding = qos_binding_obj.QosPolicyPortBinding.get_object(
admin_ctx, port_id=port_binding.port_id)
if not qos_port_binding:
LOG.error(
'Failed to sanitize binding-profile.allocation attribute '
'%s for port %s: Did not find associated QoS policy.',
allocation, port_binding.port_id)
continue
min_bw_rules = qos_rule_obj.QosMinimumBandwidthRule.get_objects(
admin_ctx, qos_policy_id=qos_port_binding.policy_id)
if not min_bw_rules:
LOG.error(
'Failed to sanitize binding-profile.allocation attribute '
'%s for port %s: Associated QoS policy %s has no minimum '
'bandwidth rules.', allocation, port_binding.port_id,
qos_port_binding.policy_id)
continue
port_binding.profile = {'allocation':
n_converters.convert_to_sanitized_binding_profile_allocation(
allocation, port_binding.port_id, min_bw_rules)}
LOG.info('Port %s updated, New binding-profile.allocation format: '
'%s', port_binding.port_id, port_binding.profile)
port_binding.update()
|
import os
import subprocess
import time
import client
import swagger_client
import v2_swagger_client
try:
from urllib import getproxies
except ImportError:
from urllib.request import getproxies
class Server:
def __init__(self, endpoint, verify_ssl):
self.endpoint = endpoint
self.verify_ssl = verify_ssl
class Credential:
def __init__(self, type, username, password):
self.type = type
self.username = username
self.password = password
def get_endpoint():
harbor_server = os.environ.get("HARBOR_HOST", "localhost:8080")
return os.environ.get("HARBOR_HOST_SCHEMA", "https")+ "://"+harbor_server+"/api/v2.0"
def _create_client(server, credential, debug, api_type="products"):
cfg = None
if api_type in ('projectv2', 'artifact', 'repository', 'scanner', 'scan', 'scanall', 'preheat', 'quota',
'replication', 'registry', 'robot', 'gc', 'retention', 'immutable', 'system_cve_allowlist',
'configure', 'user', 'member', 'health', 'label', 'webhook'):
cfg = v2_swagger_client.Configuration()
else:
cfg = swagger_client.Configuration()
cfg.host = server.endpoint
cfg.verify_ssl = server.verify_ssl
# support basic auth only for now
cfg.username = credential.username
cfg.password = credential.password
cfg.debug = debug
proxies = getproxies()
proxy = proxies.get('http', proxies.get('all', None))
if proxy:
cfg.proxy = proxy
if cfg.username is None and cfg.password is None:
# returns {} for auth_settings for anonymous access
import types
cfg.auth_settings = types.MethodType(lambda self: {}, cfg)
return {
"chart": client.ChartRepositoryApi(client.ApiClient(cfg)),
"products": swagger_client.ProductsApi(swagger_client.ApiClient(cfg)),
"projectv2":v2_swagger_client.ProjectApi(v2_swagger_client.ApiClient(cfg)),
"artifact": v2_swagger_client.ArtifactApi(v2_swagger_client.ApiClient(cfg)),
"preheat": v2_swagger_client.PreheatApi(v2_swagger_client.ApiClient(cfg)),
"quota": v2_swagger_client.QuotaApi(v2_swagger_client.ApiClient(cfg)),
"repository": v2_swagger_client.RepositoryApi(v2_swagger_client.ApiClient(cfg)),
"scan": v2_swagger_client.ScanApi(v2_swagger_client.ApiClient(cfg)),
"scanall": v2_swagger_client.ScanAllApi(v2_swagger_client.ApiClient(cfg)),
"scanner": v2_swagger_client.ScannerApi(v2_swagger_client.ApiClient(cfg)),
"replication": v2_swagger_client.ReplicationApi(v2_swagger_client.ApiClient(cfg)),
"registry": v2_swagger_client.RegistryApi(v2_swagger_client.ApiClient(cfg)),
"robot": v2_swagger_client.RobotApi(v2_swagger_client.ApiClient(cfg)),
"gc": v2_swagger_client.GcApi(v2_swagger_client.ApiClient(cfg)),
"retention": v2_swagger_client.RetentionApi(v2_swagger_client.ApiClient(cfg)),
"immutable": v2_swagger_client.ImmutableApi(v2_swagger_client.ApiClient(cfg)),
"system_cve_allowlist": v2_swagger_client.SystemCVEAllowlistApi(v2_swagger_client.ApiClient(cfg)),
"configure": v2_swagger_client.ConfigureApi(v2_swagger_client.ApiClient(cfg)),
"label": v2_swagger_client.LabelApi(v2_swagger_client.ApiClient(cfg)),
"user": v2_swagger_client.UserApi(v2_swagger_client.ApiClient(cfg)),
"member": v2_swagger_client.MemberApi(v2_swagger_client.ApiClient(cfg)),
"health": v2_swagger_client.HealthApi(v2_swagger_client.ApiClient(cfg)),
"webhook": v2_swagger_client.WebhookApi(v2_swagger_client.ApiClient(cfg))
}.get(api_type,'Error: Wrong API type')
def _assert_status_code(expect_code, return_code, err_msg = r"HTTPS status code s not as we expected. Expected {}, while actual HTTPS status code is {}."):
if str(return_code) != str(expect_code):
raise Exception(err_msg.format(expect_code, return_code))
def _assert_status_body(expect_status_body, returned_status_body):
if str(returned_status_body.strip()).lower().find(expect_status_body.lower()) < 0:
raise Exception(r"HTTPS status body s not as we expected. Expected {}, while actual HTTPS status body is {}.".format(expect_status_body, returned_status_body))
def _random_name(prefix):
return "%s-%d" % (prefix, int(round(time.time() * 1000)))
def _get_id_from_header(header):
try:
location = header["Location"]
return int(location.split("/")[-1])
except Exception:
return None
def _get_string_from_unicode(udata):
result=''
for u in udata:
tmp = u.encode('utf8')
result = result + tmp.strip('\n\r\t')
return result
def restart_process(process):
if process == "dockerd":
full_process_name = process
elif process == "containerd":
full_process_name = "/usr/local/bin/containerd"
else:
raise Exception("Please input dockerd or containerd for process retarting.")
run_command_with_popen("ps aux |grep " + full_process_name)
for i in range(10):
pid = run_command_with_popen(["pidof " + full_process_name])
if pid in [None, ""]:
break
run_command_with_popen(["kill " + str(pid)])
time.sleep(3)
run_command_with_popen("ps aux |grep " + full_process_name)
run_command_with_popen("rm -rf /var/lib/" + process + "/*")
run_command_with_popen(full_process_name + " > ./daemon-local.log 2>&1 &")
time.sleep(3)
pid = run_command_with_popen(["pidof " + full_process_name])
if pid in [None, ""]:
raise Exception("Failed to start process {}.".format(full_process_name))
run_command_with_popen("ps aux |grep " + full_process_name)
def run_command_with_popen(command):
print("Command: ", command)
try:
proc = subprocess.Popen(command, universal_newlines=True, shell=True,
stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
output, errors = proc.communicate()
except Exception as e:
print("Run command caught exception:", e)
output = None
else:
print(proc.returncode, errors, output)
finally:
proc.stdout.close()
print("output: ", output)
return output
def run_command(command, expected_error_message = None):
print("Command: ", subprocess.list2cmdline(command))
try:
output = subprocess.check_output(command,
stderr=subprocess.STDOUT,
universal_newlines=True)
except subprocess.CalledProcessError as e:
print("Run command error:", str(e))
print("expected_error_message:", expected_error_message)
if expected_error_message is not None:
if str(e.output).lower().find(expected_error_message.lower()) < 0:
raise Exception(r"Error message {} is not as expected {}".format(str(e.output), expected_error_message))
else:
raise Exception('Error: Exited with error code: %s. Output:%s'% (e.returncode, e.output))
else:
print("output:", output)
return output
class Base(object):
def __init__(self, server=None, credential=None, debug=True, api_type="products"):
if server is None:
server = Server(endpoint=get_endpoint(), verify_ssl=False)
if not isinstance(server.verify_ssl, bool):
server.verify_ssl = server.verify_ssl == "True"
if credential is None:
credential = Credential(type="basic_auth", username="admin", password="Harbor12345")
self.server = server
self.credential = credential
self.debug = debug
self.api_type = api_type
self.client = _create_client(server, credential, debug, api_type=api_type)
def _get_client(self, **kwargs):
if len(kwargs) == 0:
return self.client
server = self.server
if "endpoint" in kwargs:
server.endpoint = kwargs.get("endpoint")
if "verify_ssl" in kwargs:
if not isinstance(kwargs.get("verify_ssl"), bool):
server.verify_ssl = kwargs.get("verify_ssl") == "True"
else:
server.verify_ssl = kwargs.get("verify_ssl")
credential = Credential(
kwargs.get("type", self.credential.type),
kwargs.get("username", self.credential.username),
kwargs.get("password", self.credential.password),
)
return _create_client(server, credential, self.debug, kwargs.get('api_type', self.api_type))
|
import sys
import os
import ast
from pprint import pprint
from collections import defaultdict
from collections import OrderedDict
from operator import itemgetter
import matplotlib.pyplot as plt
from matplotlib import rcParams
import matplotlib
stage_names = ["CreateMemoryDeltalist", "CreateDiskDeltalist", "DeltaDedup", "CompressProc"]
class ProfilingError(Exception):
pass
class Experiment(object):
def __init__(self):
pass
def __repr__(self):
return "%s,R:%s,P:%s" % (self.workload, self.get_total_R(), self.get_total_P())
def get_total_P(self):
# get processing time per block using total time of stages
#memory_in_size = self.stage_size_in['CreateMemoryDeltalist']
#disk_in_size = self.stage_size_in['CreateDiskDeltalist']
#alpha = float(memory_in_size)/(memory_in_size+disk_in_size)
disk_diff = self.stage_time['CreateMemoryDeltalist']
memory_diff = self.stage_time['CreateDiskDeltalist']
delta = self.stage_time['DeltaDedup']
comp = self.stage_time['CompressProc']
# should not weight using alpha
# calculate using p of all stages --> use alpha
# calculate using time at all stage --> do not use alpha
total_processing_time = memory_diff+disk_diff+delta+comp
total_block = self.block['CreateDiskDeltalist'] + self.block['CreateMemoryDeltalist']
total_p_block = float(total_processing_time) / total_block
return total_p_block*1000
def estimate_total_P(self):
# estimate processing time per block using block processing time of each stage
# calculate using p of all stages --> use alpha
# calculate using time at all stage --> do not use alpha
memory_in_size = self.stage_size_in['CreateMemoryDeltalist']
disk_in_size = self.stage_size_in['CreateDiskDeltalist']
alpha = float(memory_in_size)/(memory_in_size+disk_in_size)
total_P_from_each_stage = (self.block_time['CreateMemoryDeltalist']*alpha + self.block_time['CreateDiskDeltalist']*(1-alpha))\
+ self.block_time['DeltaDedup'] + self.block_time['CompressProc']
return total_P_from_each_stage
def get_total_R(self):
disk_diff = self.stage_size_in['CreateMemoryDeltalist']
memory_diff = self.stage_size_in['CreateDiskDeltalist']
comp = self.stage_size_out['CompressProc']
total_R = float(comp)/(disk_diff+memory_diff)
return round(total_R, 4)
def estimate_total_R(self):
# weight using input size
memory_in_size = self.stage_size_in['CreateMemoryDeltalist']
disk_in_size = self.stage_size_in['CreateDiskDeltalist']
alpha = float(memory_in_size)/(memory_in_size+disk_in_size)
total_R_from_each_stage = (self.block_size_ratio['CreateMemoryDeltalist']*alpha +\
self.block_size_ratio['CreateDiskDeltalist']*(1-alpha))\
* self.block_size_ratio['DeltaDedup']\
* self.block_size_ratio['CompressProc']
return total_R_from_each_stage
@staticmethod
def mode_diff_str(exp1, exp2):
mode1 = exp1.mode
mode2 = exp2.mode
set_mode1, set_mode2= set(mode1.keys()), set(mode2.keys())
intersect = set_mode1.intersection(set_mode2)
changed_keys = [o for o in intersect if mode1[o] != mode2[o]]
changed_list = list()
for key in changed_keys:
value1 = mode1[key]
value2 = mode2[key]
changed = "%s: %s->%s" % (key, value1, value2)
changed_list.append(changed)
changed_str = ", ".join(changed_list)
return changed_str
def parse_each_experiement(lines):
# get configuration
config_lines = ""
is_start_config_line = False
workload = lines[0].split(" ")[-1]
migration_total_time = 0
migration_downtime = 0
for line in lines[1:]:
if line.find("* Overlay creation mode start") != -1:
is_start_config_line = True
continue
if is_start_config_line == True:
config_lines += line
if line.find("}") != -1:
break
config_dict = ast.literal_eval(config_lines)
# filter out only profiling log
profile_lines = list()
for line in lines:
# see only DEBUG message
if line.find("DEBUG") == -1:
continue
if line.find("profiling") != -1:
# see only profiling message
log = line.split("profiling")[1].strip()
profile_lines.append(log)
elif line.find("Time for finishing transferring") != -1:
log = line.split(":")[-1]
migration_total_time = float(log.strip())
elif line.find("Finish migration") != -1:
log = line.split(":")[-1]
migration_total_time = float(log.strip())
elif line.find("migration downtime") != -1:
log = line.split(":")[-1]
migration_downtime = float(log.strip())
# process filtered log data
exp = Experiment()
workload = lines[0].split(" ")[-1]
setattr(exp, 'workload', os.path.basename(workload))
setattr(exp, 'migration_total_time', migration_total_time)
setattr(exp, 'migration_downtime', migration_downtime)
setattr(exp, 'mode', config_dict)
setattr(exp, 'stage_size_in', dict.fromkeys(stage_names, 0))
setattr(exp, 'stage_size_out', dict.fromkeys(stage_names, 0))
setattr(exp, 'stage_size_ratio', dict.fromkeys(stage_names, 0))
setattr(exp, 'stage_time', dict.fromkeys(stage_names, 0))
setattr(exp, 'block', dict.fromkeys(stage_names, 0))
setattr(exp, 'block_size_in', dict.fromkeys(stage_names, 0))
setattr(exp, 'block_size_ratio', dict.fromkeys(stage_names, 0))
setattr(exp, 'block_size_out', dict.fromkeys(stage_names, 0))
setattr(exp, 'block_time', dict.fromkeys(stage_names, 0))
for line in profile_lines:
log = line.split("\t")
stage_name = log[0]
profile_type = str(log[1])
if stage_name not in stage_names:
continue
if profile_type == "size":
in_size = long(log[2])
out_size = long(log[3])
ratio = float(log[4])
exp.stage_size_in[stage_name] = in_size
exp.stage_size_out[stage_name] = out_size
exp.stage_size_ratio[stage_name] = ratio
if profile_type == "block-size":
in_size = float(log[2])
out_size = float(log[3])
block_count = long(log[4])
exp.block[stage_name] = block_count
exp.block_size_in[stage_name] = in_size
exp.block_size_out[stage_name] = out_size
exp.block_size_ratio[stage_name] = out_size/float(in_size)
if profile_type == "time":
duration = float(log[-1])
exp.stage_time[stage_name] = duration
if profile_type == "block-time":
duration = round(float(log[-1])*1000, 6)
exp.block_time[stage_name] = duration
return exp
def parsing(inputfile):
lines = open(inputfile, "r").read().split("\n")
test_list = list()
new_log = list()
for line in lines:
if line.find("==========================================") != -1:
if len(new_log) > 0:
test_list.append(new_log)
new_log = list()
else:
new_log.append(line)
test_list.append(new_log)
test_ret_list = list()
for each_exp_log in test_list:
test_ret = parse_each_experiement(each_exp_log)
test_ret_list.append(test_ret)
return test_ret_list
def _split_experiment(test_ret_list):
moped_exps = list()
speech_exps = list()
fluid_exps = list()
face_exps = list()
mar_exps = list()
random_exps = list()
delta_exps = list()
for each_exp in test_ret_list:
if each_exp.workload.find("moped") != -1:
moped_exps.append(each_exp)
elif each_exp.workload.find("fluid") != -1:
fluid_exps.append(each_exp)
elif each_exp.workload.find("face") != -1:
face_exps.append(each_exp)
elif each_exp.workload.find("mar") != -1:
mar_exps.append(each_exp)
elif each_exp.workload.find("speech") != -1:
speech_exps.append(each_exp)
elif each_exp.workload.find("random") != -1:
random_exps.append(each_exp)
elif each_exp.workload.find("delta") != -1:
delta_exps.append(each_exp)
else:
msg = "Invalid workload %s" % each_exp['workload']
print msg
sys.exit(1)
raise ProfilingError(msg)
#if (len(moped_exps) == len(fluid_exps) == len(face_exps) == len(mar_exps)) == False:
# msg = "workloads have different experiement size"
# print msg
# sys.exit(1)
# raise ProfilingError(msg)
return moped_exps, speech_exps, fluid_exps, face_exps, mar_exps, random_exps, delta_exps
def multikeysort(items, columns):
comparers = [ ((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1)) for col in columns]
def comparer(left, right):
for fn, mult in comparers:
result = cmp(fn(left), fn(right))
if result:
return mult * result
else:
return 0
return sorted(items, cmp=comparer)
def print_bw(exps):
# sort by compression algorithm gzip 1, .., gzip9, .., lzma1, .., lzma9
def compare_comp_algorithm(a):
d = {"xdelta3":3,
"bsdiff":4,
"xor":2,
"none":1}
return (d[a.mode['DISK_DIFF_ALGORITHM']], -a.mode['COMPRESSION_ALGORITHM_TYPE'], a.mode['COMPRESSION_ALGORITHM_SPEED'])
exps.sort(key=compare_comp_algorithm)
result_dict = OrderedDict()
for each_exp in exps:
in_data_size = each_exp.stage_size_in['CreateMemoryDeltalist'] + each_exp.stage_size_in['CreateDiskDeltalist']
in_data_disk = each_exp.stage_size_in['CreateDiskDeltalist']
in_data_mem = each_exp.stage_size_in['CreateMemoryDeltalist']
alpha = float(in_data_mem)/in_data_size
out_data_size = each_exp.stage_size_out['CompressProc']
duration = each_exp.migration_total_time
est_duration1 = each_exp.stage_time['CreateMemoryDeltalist'] + each_exp.stage_time['CreateDiskDeltalist']+\
each_exp.stage_time['DeltaDedup'] + each_exp.stage_time['CompressProc']
key = "%s,%d,%d" % (each_exp.mode['DISK_DIFF_ALGORITHM'], each_exp.mode['COMPRESSION_ALGORITHM_TYPE'],each_exp.mode['COMPRESSION_ALGORITHM_SPEED'])
value = (in_data_size, out_data_size, duration, 8*float(out_data_size)/1024.0/1024/duration)
item_list = result_dict.get(key, list())
item_list.append(value)
result_dict[key] = item_list
#print "%s,%d,%d\t%ld\t%ld\t%f,%f,%f\t%f" %\
# (each_exp.mode['DISK_DIFF_ALGORITHM'],\
# each_exp.mode['COMPRESSION_ALGORITHM_TYPE'],\
# each_exp.mode['COMPRESSION_ALGORITHM_SPEED'],\
# in_data_size, out_data_size, duration, est_duration1, float(duration)/est_duration1,
# 8*float(out_data_size)/1024.0/1024/duration)
# chose the median throughput value
for (key, value_list) in result_dict.iteritems():
value_list.sort(key=itemgetter(3))
value_len = len(value_list)
value = value_list[value_len/2]
print "%s\t%s\t%s\t%s\t%s" % ("\t".join(key.split(",")), value[0], value[1], value[2], value[3])
def sort_experiment_by_diff_compress(exps):
def compare_comp_algorithm(a):
d = {"xdelta3":3,
"bsdiff":4,
"xor":2,
"none":1}
return (d[a.mode['DISK_DIFF_ALGORITHM']], -a.mode['COMPRESSION_ALGORITHM_TYPE'], a.mode['COMPRESSION_ALGORITHM_SPEED'])
exps.sort(key=compare_comp_algorithm)
return exps
def print_bw_block(exps):
# sort by compression algorithm gzip 1, .., gzip9, .., lzma1, .., lzma9
exps = sort_experiment_by_diff_compress(exps)
result_dict = OrderedDict()
for each_exp in exps:
in_data_size = each_exp.stage_size_in['CreateMemoryDeltalist'] + each_exp.stage_size_in['CreateDiskDeltalist']
in_data_disk = each_exp.stage_size_in['CreateDiskDeltalist']
in_data_mem = each_exp.stage_size_in['CreateMemoryDeltalist']
alpha = float(in_data_mem)/in_data_size
out_data_size = each_exp.stage_size_out['CompressProc']
duration = each_exp.migration_total_time
est_duration = each_exp.stage_time['CreateMemoryDeltalist'] + each_exp.stage_time['CreateDiskDeltalist']+\
each_exp.stage_time['DeltaDedup'] + each_exp.stage_time['CompressProc']
est_duration += 14# serial part
total_r = each_exp.get_total_R()
total_p = each_exp.get_total_P()
total_r_est = each_exp.estimate_total_R()
total_p_est = each_exp.estimate_total_P()
key = "%s,%d,%d" % (each_exp.mode['DISK_DIFF_ALGORITHM'], each_exp.mode['COMPRESSION_ALGORITHM_TYPE'],each_exp.mode['COMPRESSION_ALGORITHM_SPEED'])
value = (in_data_size, out_data_size, duration, est_duration,
8*float(out_data_size)/1024.0/1024/duration,
total_p, total_r, total_p_est, total_r_est)
item_list = result_dict.get(key, list())
item_list.append(value)
result_dict[key] = item_list
#print "%s,%d,%d\t%ld\t%ld\t%f,%f,%f\t%f\t%f,%f\t%f,%f" %\
# (each_exp.mode['DISK_DIFF_ALGORITHM'],\
# each_exp.mode['COMPRESSION_ALGORITHM_TYPE'],\
# each_exp.mode['COMPRESSION_ALGORITHM_SPEED'],\
# in_data_size, out_data_size, duration, est_duration, float(duration)/est_duration1,
# 8*float(out_data_size)/1024.0/1024/duration,\
# total_p, total_p_est,
# total_r, total_r_est)
# chose the median throughput value
for (key, value_list) in result_dict.iteritems():
value_list.sort(key=itemgetter(3))
value_len = len(value_list)
(insize, outsize, duration, est_duration, bw, total_p, total_r, total_p_est, total_r_est) = value_list[value_len/2]
print "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % ("\t".join(key.split(",")),
insize, outsize, duration,
est_duration,
float(duration)/float(est_duration),
bw, total_p, total_r)
def averaged_pr(measure_history, duration):
avg_p = float(0)
avg_r = float(0)
counter = 0
(cur_time, cur_p, cur_r) = measure_history[-1]
for (measured_time, p, r) in reversed(measure_history):
if cur_time - measured_time > duration:
break
avg_p += p
avg_r += r
counter += 1
#print "%f measure last %d/%d" % (cur_time, counter, len(measure_history))
return avg_p/counter, avg_r/counter
def averaged_value(measure_hist, cur_time, avg_time=1):
avg_value = float(0)
counter = 0
for (measured_time, value) in reversed(measure_hist):
if cur_time - measured_time > avg_time:
break
avg_value += float(value)
counter += 1
#LOG.debug("coutning for avg : %d" % counter)
return float(avg_value)/counter
def print_p_r_over_time(inputfile):
lines = open(inputfile, "r").read().split("\n")
adaptation_log_lines = list()
mode_test_log_lines = list()
mode_change_log_lines = list()
for line in lines:
# see only DEBUG message
if line.find("DEBUG") == -1:
continue
if line.find("adaptation") != -1:
# see only profiling message
log = line.split("adaptation")[1].strip()
adaptation_log_lines.append(log)
if line.find("mode-change-test") != -1:
log = line.split("mode-change-test")[1].strip()
mode_test_log_lines.append(log)
if line.find("mode-change") != -1 and line.find("mode-change-test") == -1:
log = line.split("mode-change")[1].strip()
mode_change_log_lines.append(log)
migration_start_time = 0
system_in_hist = list()
system_out_hist = list()
iteration_time_list = list()
p_and_r_list = list()
measure_history = list()
time_list = list()
p_list = list()
p_list_cur = list()
r_list = list()
r_list_cur = list()
network_bw_list = list()
system_in_bw_list = list()
system_out_bw_list = list()
system_out_bw_potential_list = list()
for line in adaptation_log_lines:
if line.startswith("start time"):
migration_start_time = float(line.split(":")[-1])
elif line.startswith("new iteration"):
iter_time, iter_seq_old, iter_seq_new, iter_mem_size = line.split("\t")[1:]
iteration_time_list.append((iter_seq_old, iter_time))
else:
(time_measured, duration_measured, network_bw_mbps,\
system_out_bw_actual, system_in_bw_actual,\
system_out_bw_cur_est, system_in_bw_cur_est,\
total_p, total_r,\
total_p_cur, total_r_cur) = line.split("\t")
p_and_r_list.append((duration_measured, total_p, total_p_cur, total_r, total_r_cur))
measure_history.append((float(duration_measured), float(total_p_cur), float(total_r_cur)))
network_bw_list.append(network_bw_mbps)
# data for plot
duration = float(time_measured)-float(migration_start_time)
time_list.append(duration)
#time_list.append(duration_measured)
p_list.append(float(total_p))
p_list_cur.append(float(total_p_cur))
r_list.append(float(total_r))
r_list_cur.append(float(total_r_cur))
system_out_bw_potential_list.append(float(system_out_bw_cur_est))
system_in_hist.append((float(time_measured), system_in_bw_actual))
system_out_hist.append((float(time_measured), system_out_bw_actual))
system_in_actual_avg = averaged_value(system_in_hist, float(time_measured), avg_time=2)
system_out_actual_avg = averaged_value(system_out_hist, float(time_measured), avg_time=2)
#system_in_actual_avg = system_in_bw_actual
#system_out_actual_avg = system_out_bw_actual
system_in_bw_list.append(system_in_actual_avg)
system_out_bw_list.append(system_out_actual_avg)
# averaged p and r over time window
p_list_avg1 = list()
p_list_avg2 = list()
r_list_avg1 = list()
r_list_avg2 = list()
for index, (duration_measured, total_p_cur, total_r_cur) in enumerate(measure_history):
avg_p1, avg_r1 = averaged_pr(measure_history[0:index+1], 1)
avg_p2, avg_r2 = averaged_pr(measure_history[0:index+1], 5)
p_list_avg1.append(avg_p1)
p_list_avg2.append(avg_p2)
# print result
print "iter #\tduration\ttime"
for index, (iter_seq_old, iter_time) in enumerate(iteration_time_list):
print "%s\t%f\t%s" % (iter_seq_old, (float(iter_time)-float(migration_start_time)), iter_time)
print "\n\n"
print "duration\ttotal_p\tcur_p\ttotal_r\tcur_r"
for values in p_and_r_list:
(duration_measured, total_p, total_p_cur, total_r, total_r_cur) = values
print "%s\t%s\t%s\t%s\t%s" % (duration_measured, total_p, total_p_cur, total_r, total_r_cur)
# figure plot
fig, ax = plt.subplots()
font = {'family' : 'Times New Roman',
'weight' : 'normal',
'size' : 22}
matplotlib.rc('font', **font)
# plot p and r
f1, (p_plot, r_plot)= plt.subplots(2, 1, sharex=True)
p_plot.set_title("P - " + inputfile)
r_plot.set_title("R - " + inputfile)
p_plot.plot(time_list, p_list, 'r-', time_list, p_list_cur, 'b-')
r_plot.plot(time_list, r_list, 'r-', time_list, r_list_cur, 'b-')
cur_xlim = f1.gca().get_xlim()[1]
p_plot.set_xlim([0, cur_xlim])
(y_min, y_max) = p_plot.get_ylim()
p_plot.set_ylim([0, max(1, y_max)])
r_plot.set_ylim([0, 1])
plt.savefig(inputfile + "-pr" + '.png')
# plot performance
f2, (bw_plot)= plt.subplots(1, 1, sharex=True)
f2.set_size_inches(12,5)
bw_plot.set_ylabel("Throughput (MBps)")
bw_plot.set_xlabel("Time (s)")
bw_plot.set_title("Trace - " + inputfile)
LINE_WIDTH = 3
bw_plot.plot(time_list, system_in_bw_list, 'r-o', label="input throughput", linewidth=LINE_WIDTH)
bw_plot.plot(time_list, system_out_bw_list, 'b-+', label="Output throughput", linewidth=LINE_WIDTH)
bw_plot.plot(time_list, system_out_bw_potential_list, 'b--', label="Potential output throughput", linewidth=LINE_WIDTH)
bw_plot.plot(time_list, network_bw_list, 'g-', label="Network throughput", linewidth=LINE_WIDTH-1)
bw_plot.set_xlim([0, cur_xlim])
#bw_plot.legend(shadow=True, bbox_to_anchor=(0., 1.02, 1., .102), loc=3)
bw_plot.legend(shadow=True, loc="best", prop={'size':18})
# plot mode change if it exists
for each_mode_change in mode_change_log_lines:
if each_mode_change.lower().find("current mode is the best") != -1:
continue
mode_change_time = float(each_mode_change.split("\t")[0]) - migration_start_time
#mode_change_time = float(each_mode_change.split("\t")[1])
bw_plot.axvline(x=mode_change_time, linewidth=2, color='k', linestyle='-.')
f2.gca().grid(True)
#bw_plot.axhline(y=network_bw, linewidth=1, color='g', linestyle='--')
plt.gcf().subplots_adjust(bottom=0.15)
#plt.gcf().subplots_adjust(left=0.01)
plt.savefig(inputfile + "-bw" + '.png')
if __name__ == "__main__":
command_list = ["profiling", "over-time", "summary"]
if len(sys.argv) != 3:
sys.stderr.write("Need command(%s) and input filename\n" % (','.join(command_list)))
sys.exit(1)
command = sys.argv[1]
inputfile = sys.argv[2]
if command == "profiling":
test_ret_list = parsing(inputfile)
moped_exps, speech_exps, fluid_exps, face_exps, mar_exps, random_exps, delta_exps = _split_experiment(test_ret_list)
#print_bw(face_exps)
print_bw_block(moped_exps)
elif command == "over-time":
print_p_r_over_time(inputfile)
elif command == "summary":
exp_list = parsing(inputfile)
exp = exp_list[0]
workload = exp.workload
in_size = exp.stage_size_in['CreateMemoryDeltalist'] + exp.stage_size_in['CreateDiskDeltalist']
out_size = exp.stage_size_in['CompressProc']
handoff_time = exp.migration_total_time
migration_downtime = exp.migration_downtime
total_ratio = (float(out_size)/in_size)
total_cpu_time = sum(exp.stage_time.values())
system_in_bw = 8*float(in_size)/handoff_time/1024.0/1024.0
system_out_bw = 8*float(out_size)/handoff_time/1024.0/1024.0
print "workload: %s (%s)" % (inputfile, workload)
print "total\tdown\tcpu\tin-size\tout\tin-bw\tout-bw"
print "%4.2f\t%4.2f\t%4.2f\t%6.2f\t%6.2f\t%6.2f\t%6.2f" % (handoff_time, migration_downtime, total_cpu_time, in_size/1024.0/1024, out_size/1024.0/1024, system_in_bw, system_out_bw)
else:
pass
'''
# how change in mode will affect system performance?
comparison = defaultdict(list)
pivot_mode = moped_exps[0]
pivot_R = pivot_mode.get_total_R()
pivot_P = pivot_mode.get_total_P()
for other_mode in moped_exps:
other_r = other_mode.get_total_R()
other_p = other_mode.get_total_P()
ratio_r = round(other_r/pivot_R, 4)
ratio_p = round(other_p/pivot_P, 4)
mode_diff_str = Experiment.mode_diff_str(pivot_mode, other_mode)
if len(mode_diff_str) == 0:
mode_diff_str = "original"
comparison[mode_diff_str].append((ratio_r, ratio_p))
#print "%s\t%s %s" % (mode_diff_str, ratio_r, ratio_p)
'''
|
import next.utils as utils
from datetime import datetime,timedelta
import celery
from next.broker.celery_app import tasks as tasks
from next.broker.celery_app.celery_broker import app
import os
import next.constants
import redis
import json
import time
import next.utils as utils
class JobBroker:
# Initialization method for the broker
def __init__(self):
self.hostname = None
# location of hashes
self.r = redis.StrictRedis(host=next.constants.RABBITREDIS_HOSTNAME, port=next.constants.RABBITREDIS_PORT, db=0)
def applyAsync(self, app_id, exp_uid, task_name, args, ignore_result=False):
"""
Run a task (task_name) on a set of args with a given app_id, and exp_uid.
Waits for computation to finish and returns the answer unless ignore_result=True in which case its a non-blocking call.
No guarantees about order of execution.
Inputs: ::\n
(string) app_id, (string) exp_id, (string) task_name, (json) args
Outputs: ::\n
task_name(app_id, exp_id, args)
"""
submit_timestamp = utils.datetimeNow('string')
domain = self.__get_domain_for_job(app_id+"_"+exp_uid)
if next.constants.CELERY_ON:
result = tasks.apply.apply_async(args=[app_id,
exp_uid,
task_name,
args,
submit_timestamp],
exchange='async@'+domain,
routing_key='async@'+domain)
if ignore_result:
return True
else:
return result.get(interval=0.001)
else:
result = tasks.apply(app_id,exp_uid,task_name, args, submit_timestamp)
if ignore_result:
return True
else:
return result
def dashboardAsync(self, app_id, exp_uid, args, ignore_result=False):
"""
Run a task (task_name) on a set of args with a given app_id, and exp_uid.
Waits for computation to finish and returns the answer unless ignore_result=True in which case its a non-blocking call.
No guarantees about order of execution.
Inputs: ::\n
(string) app_id, (string) exp_id, (string) task_name, (json) args
Outputs: ::\n
task_name(app_id, exp_id, args)
"""
submit_timestamp = utils.datetimeNow('string')
domain = self.__get_domain_for_job(app_id+"_"+exp_uid)
if next.constants.CELERY_ON:
result = tasks.apply_dashboard.apply_async(args=[app_id,
exp_uid,
args,
submit_timestamp],
exchange='dashboard@'+domain,
routing_key='dashboard@'+domain)
if ignore_result:
return True
else:
return result.get(interval=0.001)
else:
result = tasks.apply_dashboard(app_id,exp_uid, args, submit_timestamp)
if ignore_result:
return True
else:
return result
def applySyncByNamespace(self, app_id, exp_uid, alg_id, alg_label, task_name, args, namespace=None, ignore_result=False,time_limit=0):
"""
Run a task (task_name) on a set of args with a given app_id, and exp_uid asynchronously.
Waits for computation to finish and returns the answer unless ignore_result=True in which case its a non-blocking call.
If this method is called a sequence of times with the same namespace (defaults to exp_uid if not set) it is guaranteed that they will execute in order, each job finishing before the next begins
Inputs: ::\n
(string) app_id, (string) exp_id, (string) task_name, (json) args
"""
submit_timestamp = utils.datetimeNow('string')
if namespace==None:
namespace=exp_uid
domain = self.__get_domain_for_job(app_id+"_"+exp_uid)
num_queues = next.constants.CELERY_SYNC_WORKER_COUNT
# assign namespaces to queues (with worker of concurrency 1) in round-robbin
try:
namespace_cnt = int(self.r.get(namespace+"_cnt"))
except:
pipe = self.r.pipeline(True)
while 1:
try:
pipe.watch(namespace+"_cnt","namespace_counter")
if not pipe.exists(namespace+"_cnt"):
if not pipe.exists('namespace_counter'):
namespace_counter = 0
else:
namespace_counter = pipe.get('namespace_counter')
pipe.multi()
pipe.set(namespace+"_cnt",int(namespace_counter)+1)
pipe.set('namespace_counter',int(namespace_counter)+1)
pipe.execute()
else:
pipe.unwatch()
break
except redis.exceptions.WatchError:
continue
finally:
pipe.reset()
namespace_cnt = int(self.r.get(namespace+"_cnt"))
queue_number = (namespace_cnt % num_queues) + 1
queue_name = 'sync_queue_'+str(queue_number)+'@'+domain
job_uid = utils.getNewUID()
if time_limit == 0:
soft_time_limit = None
hard_time_limit = None
else:
soft_time_limit = time_limit
hard_time_limit = time_limit + .01
if next.constants.CELERY_ON:
result = tasks.apply_sync_by_namespace.apply_async(args=[app_id,exp_uid,
alg_id,alg_label,
task_name, args,
namespace, job_uid,
submit_timestamp, time_limit],
queue=queue_name,
soft_time_limit=soft_time_limit,
time_limit=hard_time_limit)
if ignore_result:
return True
else:
return result.get(interval=.001)
else:
result = tasks.apply_sync_by_namespace(app_id,exp_uid,alg_id,alg_label,task_name, args, namespace, job_uid, submit_timestamp, time_limit)
if ignore_result:
return True
else:
return result
def __get_domain_for_job(self, job_id):
"""
Computes which domain to run a given job_id on.
Git Commit: c1e4f8aacaa42fae80e111979e3f450965643520 has support
for multiple worker nodes. See the code in broker.py, cluster_monitor.py, and the docker-compose
file in that commit to see how to get that up and running. It uses
a simple circular hashing scheme to load balance getQuery/processAnswer calls.
This implementation assumes just a single master node and no workers
so only a single hostname (e.g. localhost) has celery workers.
"""
if self.r.exists('MINIONWORKER_HOSTNAME'):
self.hostname = self.r.get('MINIONWORKER_HOSTNAME')
utils.debug_print('Found hostname: {} (Redis)'.format(self.hostname))
else:
with open('/etc/hosts', 'r') as fid:
for line in fid:
if 'MINIONWORKER' in line:
self.hostname = line.split('\t')[1].split(' ')[1]
self.r.set('MINIONWORKER_HOSTNAME', self.hostname, ex=360) # expire after 10 minutes
utils.debug_print('Found hostname: {} (/etc/hosts)'.format(self.hostname))
break
if self.hostname is None:
import socket
self.hostname = socket.gethostname()
self.r.set('MINIONWORKER_HOSTNAME', self.hostname, ex=360) # expire after 10 minutes
utils.debug_print('Found hostname: {} (socket.gethostname())'.format(self.hostname))
return self.hostname
|
"""Run memtier_benchmark against Redis.
memtier_benchmark is a load generator created by RedisLabs to benchmark
Redis.
Redis homepage: http://redis.io/
memtier_benchmark homepage: https://github.com/RedisLabs/memtier_benchmark
"""
import logging
from perfkitbenchmarker import configs
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.packages import redis_server
flags.DEFINE_integer('redis_numprocesses', 1, 'Number of Redis processes to '
'spawn per processor.')
flags.DEFINE_integer('redis_clients', 5, 'Number of redis loadgen clients')
flags.DEFINE_string('redis_setgetratio', '1:0', 'Ratio of reads to write '
'performed by the memtier benchmark, default is '
'\'1:0\', ie: writes only.')
MEMTIER_COMMIT = '1.2.0'
FIRST_PORT = 6379
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'redis'
BENCHMARK_CONFIG = """
redis:
description: >
Run memtier_benchmark against Redis.
Specify the number of client VMs with --redis_clients.
vm_groups:
default:
vm_spec: *default_single_core
"""
def GetConfig(user_config):
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
config['vm_groups']['default']['vm_count'] = 1 + FLAGS.redis_clients
return config
def PrepareLoadgen(load_vm):
load_vm.Install('memtier')
def Prepare(benchmark_spec):
"""Install Redis on one VM and memtier_benchmark on another.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
redis_vm = vms[0]
# Install latest redis on the 1st machine.
redis_vm.Install('redis_server')
sed_cmd = (r"sed -i -e '/save 900/d' -e '/save 300/d' -e '/save 60/d' -e 's/#"
" save \"\"/save \"\"/g' %s/redis.conf")
redis_vm.RemoteCommand(sed_cmd % redis_server.REDIS_DIR)
for i in range(redis_vm.num_cpus * FLAGS.redis_numprocesses):
port = FIRST_PORT + i
redis_vm.RemoteCommand(
'cp %s/redis.conf %s/redis-%d.conf' %
(redis_server.REDIS_DIR, redis_server.REDIS_DIR, port))
redis_vm.RemoteCommand(
r'sed -i -e "s/port 6379/port %d/g" %s/redis-%d.conf' %
(port, redis_server.REDIS_DIR, port))
redis_vm.RemoteCommand(
'nohup sudo %s/src/redis-server %s/redis-%d.conf &> /dev/null &' %
(redis_server.REDIS_DIR, redis_server.REDIS_DIR, port))
args = [((vm,), {}) for vm in vms[1:]]
vm_util.RunThreaded(PrepareLoadgen, args)
def RunLoad(redis_vm, load_vm, threads, port, test_id, results):
"""Spawn a memteir_benchmark on the load_vm against the redis_vm:port.
Args:
redis_vm: The target of the memtier_benchmark
load_vm: The vm that will run the memtier_benchmark.
threads: The number of threads to run in this memtier_benchmark process.
port: the port to target on the redis_vm.
test_id: a number unique run this iteration load_vm
results: a dictonary within which the results of the run will be stored.
The format of the results will be id : a tuple containing
throughput acheived and average latency.
"""
if threads == 0:
return
base_cmd = ('memtier_benchmark -s %s -p %d -d 128 '
'--ratio %s --key-pattern S:S -x 1 -c 1 -t %d '
'--test-time=%d --random-data > %s ;')
final_cmd = (base_cmd % (redis_vm.internal_ip, port,
FLAGS.redis_setgetratio, threads, 10,
'/dev/null') +
base_cmd % (redis_vm.internal_ip, port,
FLAGS.redis_setgetratio, threads, 20,
'outfile-%d' % test_id) +
base_cmd % (redis_vm.internal_ip, port,
FLAGS.redis_setgetratio, threads, 10,
'/dev/null'))
load_vm.RemoteCommand(final_cmd)
output, _ = load_vm.RemoteCommand('cat outfile-%d | grep Totals | '
'tr -s \' \' | cut -d \' \' -f 2' % test_id)
throughput = float(output)
output, _ = load_vm.RemoteCommand('cat outfile-%d | grep Totals | '
'tr -s \' \' | cut -d \' \' -f 5' % test_id)
latency = float(output)
output, _ = load_vm.RemoteCommand('cat outfile-%d' % test_id)
logging.info(output)
results[test_id] = (throughput, latency)
def Run(benchmark_spec):
"""Run memtier_benchmark against Redis.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
vms = benchmark_spec.vms
redis_vm = vms[0]
load_vms = vms[1:]
latency = 0.0
latency_threshold = 1000000.0
threads = 0
results = []
num_servers = redis_vm.num_cpus * FLAGS.redis_numprocesses
max_throughput_for_completion_latency_under_1ms = 0.0
while latency < latency_threshold:
iteration_results = {}
threads += max(1, int(threads * .15))
num_loaders = len(load_vms) * num_servers
args = [((redis_vm, load_vms[i % len(load_vms)], threads / num_loaders +
(0 if (i + 1) > threads % num_loaders else 1),
FIRST_PORT + i % num_servers, i, iteration_results),
{}) for i in range(num_loaders)]
logging.error('BEFORE: %s', args)
vm_util.RunThreaded(RunLoad, args)
throughput = 0.0
latency = 0.0
logging.error('%s', iteration_results)
for result in iteration_results.values():
throughput += result[0]
for result in iteration_results.values():
latency += result[1] * result[0] / throughput
if latency < 1.0:
max_throughput_for_completion_latency_under_1ms = max(
max_throughput_for_completion_latency_under_1ms,
throughput)
results.append(sample.Sample('throughput', throughput, 'req/s',
{'latency': latency, 'threads': threads}))
logging.info('Threads : %d (%f, %f) < %f', threads, throughput, latency,
latency_threshold)
if threads == 1:
latency_threshold = latency * 20
results.append(sample.Sample(
'max_throughput_for_completion_latency_under_1ms',
max_throughput_for_completion_latency_under_1ms,
'req/s'))
return results
def Cleanup(benchmark_spec):
"""Remove Redis and YCSB.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
pass
|
import os
try:
import pty
except ImportError:
# to support --cover-inclusive on Windows
if os.name not in ['nt']:
raise
from subprocess import Popen
from subprocess import STDOUT
import time
from .execute_process_nopty import _close_fds
from .execute_process_nopty import _yield_data
def _execute_process_pty(cmd, cwd, env, shell, stderr_to_stdout=True):
stdout_master, stdout_slave = None, None
stderr_master, stderr_slave = None, None
fds_to_close = [stdout_master, stdout_slave, stderr_master, stderr_slave]
try:
stdout_master, stdout_slave = pty.openpty()
if stderr_to_stdout:
stderr_master, stderr_slave = stdout_master, stdout_slave
else:
stderr_master, stderr_slave = pty.openpty()
p = None
while p is None:
try:
p = Popen(
cmd,
stdin=stdout_slave, stdout=stderr_slave, stderr=STDOUT,
cwd=cwd, env=env, shell=shell, close_fds=False)
except OSError as exc:
# This can happen if a file you are trying to execute is being
# written to simultaneously on Linux
# (doesn't appear to happen on OS X)
# It seems like the best strategy is to just try again later
# Worst case is that the file eventually gets deleted, then a
# different OSError would occur.
if 'Text file busy' in '{0}'.format(exc):
# This is a transient error, try again shortly
time.sleep(0.01)
continue
raise
# This causes the below select to exit when the subprocess closes.
# On Linux, this sometimes causes Errno 5 OSError's when os.read
# is called from within _yield_data, so on Linux _yield_data
# catches and passes on that particular OSError.
os.close(stdout_slave)
if not stderr_to_stdout:
os.close(stderr_slave)
left_overs = {stdout_master: b'', stderr_master: b''}
fds = [stdout_master]
if stderr_master != stdout_master:
fds.append(stderr_master)
finally:
# Make sure we don't leak file descriptors
_close_fds(fds_to_close)
# The linesep with pty's always seems to be "\r\n", even on OS X
return _yield_data(p, fds, left_overs, "\r\n", fds_to_close)
|
from .EngineApiClient import EngineApiClient
|
"""
The program reads an existing model file and generates models for different amounts of occlusions
"""
from __future__ import print_function
from detector_model_pb2 import DetectorModel
import detector_model_pb2 as dm
import detections_pb2 as det
import os, os.path#, glob
from optparse import OptionParser
from plot_detector_model import read_cascade, read_model
def add_feature_to_channels(channel_index, box, weight):
#for y in range(box.min_corner.y, box.max_corner.y+1):
# for x in range(box.min_corner.x, box.max_corner.x+1):
# channels[channel_index, y, x] += weight
slice_y = slice(box.min_corner.y, box.max_corner.y+1)
slice_x = slice(box.min_corner.x, box.max_corner.x+1)
channels[channel_index, slice_y, slice_x] += weight
if print_the_features:
print("box (min x,y) (max x,y) ==",
(box.min_corner.x, box.min_corner.y),
(box.max_corner.x, box.max_corner.y),
"\tweight ==", weight)
return
def get_stump_box(stump):
feature = stump.feature
return feature.box
def get_node_boxes(node):
if node.decision_stump:
return get_stump_box(node.decision_stump)
def getMaxXY_tree(tree):
nodes = []
for node in tree.nodes:
nodes.append(get_node_boxes(node))
#check for the maximal y position
maxy = -1
maxx = -1
for node in nodes:
x = node.max_corner.x
y = node.max_corner.y
if x> maxx:
maxx = x
if y> maxy:
maxy = y
return [maxx, maxy]
def update_cascade(old_cascade, new_cascade, yThresh):
new_cascade.Clear()
for i, stage in enumerate(old_cascade.stages):
tree = 0
if stage.feature_type == stage.Level2DecisionTree:
maxx, maxy = getMaxXY_tree(stage.level2_decision_tree)
else:
print("stage.feature_type ==", stage.feature_type)
raise Exception("Received an unhandled stage.feature_type")
if maxy< yThresh:
new_stage = new_cascade.stages.add()
new_stage.CopyFrom(stage)
return
def update_cascade_left(old_cascade, new_cascade, xThresh):
new_cascade.Clear()
for i, stage in enumerate(old_cascade.stages):
tree = 0
if stage.feature_type == stage.Level2DecisionTree:
maxx, maxy = getMaxXY_tree(stage.level2_decision_tree)
else:
print("stage.feature_type ==", stage.feature_type)
raise Exception("Received an unhandled stage.feature_type")
if maxx< xThresh:
new_stage = new_cascade.stages.add()
new_stage.CopyFrom(stage)
return
def generate_occlusionClassifier(input_model):
width = 32
half_width = 16
model=read_model(input_model)
for i in range(1+half_width):
yThresh = half_width-i
new_model = DetectorModel()
new_model.CopyFrom(model)
if model.model_window_size:
model_width = model.model_window_size.x
model_height = model.model_window_size.y
print("model.detector_type", model.detector_type)
if model.detector_type == model.SoftCascadeOverIntegralChannels:
old_cascade = model.soft_cascade_model
new_cascade = new_model.soft_cascade_model
print("Model has %i stages" % len(old_cascade.stages))
update_cascade(old_cascade, new_cascade, width-yThresh)
output_path = input_model + "_artificial_crop_" + str(yThresh*4)
out_file = open(output_path, "wb")
out_file.write(new_model.SerializeToString())
out_file.close()
print("Create output model file", output_path)
def generate_occlusionClassifier_left(input_model):
height = 16
half_height = 8
model=read_model(input_model)
for i in range(1+half_height):
xThresh = half_height-i
new_model = DetectorModel()
new_model.CopyFrom(model)
if model.model_window_size:
model_width = model.model_window_size.x
model_height = model.model_window_size.y
print("model.detector_type", model.detector_type)
if model.detector_type == model.SoftCascadeOverIntegralChannels:
old_cascade = model.soft_cascade_model
new_cascade = new_model.soft_cascade_model
print("Model has %i stages" % len(old_cascade.stages))
update_cascade_left(old_cascade, new_cascade, height-xThresh)
output_path = input_model + "_artificial_crop_" + str(xThresh*4)
out_file = open(output_path, "wb")
out_file.write(new_model.SerializeToString())
out_file.close()
print("Create output model file", output_path)
def parse_arguments():
parser = OptionParser()
parser.description = \
"The program reads an existing model file and generates models for different amounts of occlusions"
parser.add_option("-i", "--input_model", dest="input_model",
type="string",
help="path to the trained model.")
parser.add_option("-c", "--classifier_type", dest="classifier_type",
type="string",
help="this option is required and denotes the type of the classifier: \"up\" or \"left\"")
(options, args) = parser.parse_args()
#print (options, args)
if not options.classifier_type:
parser.error("'classifier_type' has to be specified")
if not options.input_model:
parser.error("'input' option is required to run this program")
else:
if not os.path.exists(options.input_model):
parser.error("Could not find the input file %s" % options.input_model)
return options
def main():
options = parse_arguments()
if options.classifier_type == "up":
generate_occlusionClassifier(options.input_model)
elif options.classifier_type =="left":
generate_occlusionClassifier_left(options.input_model)
else:
raise Exception("classifier type must be eighter 'up or 'left'")
return
print("End of game, have a nice day!")
return
if __name__ == "__main__":
main()
|
"""一個字符集過濾的程式,篩選輸出指定字符集的行
輸入input.txt,輸出output.txt
"""
INPUT_NAME = "input.txt" #輸入input.txt
OUTPUT_NAME = "output.txt" #輸出output.txt
ENC = "utf-8"
ENCODINGS = ["utf-8-sig", "utf-16", "gbk", "gb18030"] #嘗試編碼列表
def open_file(filename):
"嘗試解碼文件"
for enc in ENCODINGS:
try:
input_file = open(filename, encoding=enc)
return input_file
except UnicodeError:
continue
def is_visible(char):
"判斷是否爲可顯示字符"
code_point = ord(char)
if (
#(0x3400 <= code_point <= 0x4DBF) or # CJK Unified Ideographs Extension A
(0x20000 <= code_point <= 0x2A6DF) or # CJK Unified Ideographs Extension B
(0x2A700 <= code_point <= 0x2B73F) or # CJK Unified Ideographs Extension C
(0x2B740 <= code_point <= 0x2B81F) or # CJK Unified Ideographs Extension D
(0x2B820 <= code_point <= 0x2CEAF) or # CJK Unified Ideographs Extension E
(0x2F800 <= code_point <= 0x2FA1F) # CJK Compatibility Ideographs Supplement
):
return False
return True
def in_charset(line, charset="visible"):
"檢查是否屬於charset字符集"
if charset == "visible":
return all(map(is_visible, line))
try:
line.encode(charset)
return True
except UnicodeError:
return False
def main():
"讀取文件並篩選"
input_file = open_file(INPUT_NAME)
if input_file:
lines = [line for line in input_file if in_charset(line.strip().rsplit("\t", 1)[0])]
if lines:
open(OUTPUT_NAME, "w", encoding=ENC).writelines(lines)
if __name__ == "__main__":
main()
|
import time
import sys
import logging
import signal
import getopt
from simulation import *
from statistics import *
from system import *
def usage(arg):
print arg, ": -h [--help] -l [--log] -m <mission_time> [--mission_time <mission_time>]"
print "-i <num_iterations> [--iterations <num_iterations>] -r <raid_type> [--raid <raid_type>]"
print "-n <num_raids> [--raid_num <num_raids>] -c <capacity_factor> [--capacity <capacity_factor>]"
print "-F <disk_fail_dist> [--disk_fail_dist <disk_fail_dist>]"
print "-R <disk_repair_dist> [--disk_repair_dist <disk_repair_dist>]"
print "-L <disk_lse_dist> [--disk_lse_dist <disk_lse_dist>]"
print "-S <disk_scrubbing_dist> [--disk_scrubbing_dist <disk_scrubbing_dist>]"
print "-a <required_re> [--accuracy <required_re>]"
# file system model;
print "-t <trace> [--trace <trace_file>]"
print "-f [--filelevel]"
print "-d [--dedup]"
print "-w [--weighted]"
print ""
print "Detail:"
print "mission_time = simulation end time in hours, default is 87600"
print ""
print "num_iterations = number of simulation runs, default is 10000"
print ""
print "raid_type = the raid configuration , 14_2_mds by default"
print ""
print "num_raids = number of raids in the system, defaut is 1"
print ""
print "capacity_factor = the disk capacity factor, defaut is 1 (2*1024*1024*1024 sectors (1TB)),"
print ""
print "disk_fail_dist = \"(shape = 1.2, scale = 461386 by default)\" OR"
print " \"(scale)\" OR"
print " \"(shape, scale)\" OR"
print " \"(shape, scale, location)\""
print "disk_repair_dist = \"(shape = 2.0, scale = 12, location = 6 by default)\" OR"
print " \"(scale)\" OR"
print " \"(shape, scale)\" OR"
print " \"(shape, scale, location)\""
print "disk_scrubbing_dist = \"(shape = 3.0, scale = 168, location = 6 by default)\" OR"
print " \"(scale)\" OR"
print " \"(shape, scale)\" OR"
print " \"(shape, scale, location)\""
print " shape = shape parameter of a Weibull (1 for Exponential)"
print " scale = scale parameter of a Weibull"
print " location = location parameter of a Weibull"
print "disk_lse_dist = \"(rate = 1.08/10000 by default)\""
print ""
print "required_re = the required relative error, disable by default"
print ""
print "Samples:"
print arg, "-i 10000 -r \"mds_5_1\" -a 0.05"
sys.exit(2)
def get_parms():
logging.basicConfig(level = getattr(logging, "WARNING"))
# 87600 hours, for 10 years
mission_time = 87600
# more iterations, more accurate estimate
iterations = 10000L
# the data/parity configuration
# such as mds_7_1
raid_type = "mds_14_2"
# the number of raid
raid_num = 1
# the number of sectors in each disk
# 512 bytes for each sector
# So the default is 1TB
disk_capacity = 2*1024*1024*1024L
capacity_factor = 1.0
parms = "Elerath2014A"
disk_fail_parms = None
disk_repair_parms = None
disk_lse_parms = None
disk_scrubbing_parms = None
# This indicates the simulation will not end until reach a required relative error
force_re = False
required_re = 0.05
# file system trace
fs_trace = None
filelevel = False
dedup = False
weighted = False
# output all data loss events
output_events = None
try:
(opts, args) = getopt.getopt(sys.argv[1:], "hl:m:i:r:n:c:p:F:R:L:S:a:t:fdwo:", ["help", "log", "mission_time",
"iterations",
"raid", "raid_num",
"capacity",
"parameters",
"disk_fail_dist",
"disk_repair_dist",
"disk_lse_dist",
"disk_scrubbing_dist",
"accuracy",
"trace",
"filelevel"
"dedup",
"weighted",
"output",
])
except:
usage(sys.argv[0])
print "getopts excepted"
sys.exit(1)
for o, a in opts:
if o in ("-h", "--help"):
print usage(sys.argv[0])
sys.exit(0)
if o in ("-l", "--log"):
logger = logging.getLogger("sim")
logger.setLevel(getattr(logging, a.upper()))
if o in ("-F", "--disk_fail_dist"):
if len(eval(a)) == 1:
disk_fail_parms = (1, eval(a), 0)
elif len(eval(a)) == 2:
(shape, scale) = eval(a)
disk_fail_parms = (shape, scale, 0)
elif len(eval(a)) == 3:
(shape, scale, location) = eval(a)
disk_fail_parms = (shape, scale, location)
else:
bad_opt = o + " : " + a
break
elif o in ("-R", "--disk_repair_dist"):
if len(eval(a)) == 1:
disk_repair_parms = (1, eval(a), 0)
elif len(eval(a)) == 2:
(shape, scale) = eval(a)
disk_repair_parms = (shape, scale, 0)
elif len(eval(a)) == 3:
(shape, scale, location) = eval(a)
disk_repair_parms = (shape, scale, location)
else:
bad_opt = o + " : " + a
break
elif o in ("-L", "--disk_lse_dist"):
if len(eval(a)) == 1: # the lse rate
disk_lse_parms = eval(a)
else:
bad_opt = o + " : num args must be 1"
break
elif o in ("-m", "--mission_time"):
mission_time = float(a)
elif o in ("-i", "--iterations"):
iterations = long(a)
elif o in ("-r", "--raid"):
raid_type = a
elif o in ("-n", "--raid_num"):
raid_num = int(a)
elif o in ("-c", "--capacity"):
capacity_factor = float(a)
elif o in ("-p", "--parameters"):
parms = a
elif o in ("-a", "--accuracy"):
force_re = True
required_re = float(a)
elif o in ("-t", "--trace"):
fs_trace = a
elif o in ("-f", "--filelevel"):
filelevel = True
elif o in ("-d", "--dedup"):
dedup = True
elif o in ("-w", "--weighted"):
weighted = True
elif o in ("-o", "--output"):
output_events = a
# TO-DO: We should verify these numbers
# We assume larger disks will have longer repair and scrubbing time
disk_capacity *= capacity_factor
# The following parameters may change with disk capacity
# For failure, restore, and scrubbing, the parameters are (shape, scale, location)
if disk_fail_parms != None and disk_repair_parms != None and disk_lse_parms != None and disk_scrubbing_dist != None:
parms = None
if parms == "Elerath2009":
# data from [Elerath2009]
disk_fail_parms = (1.2, 461386.0, 0)
disk_repair_parms = (2.0, 12.0 * capacity_factor, 6.0 * capacity_factor)
disk_lse_parms = (1.08/10000)
disk_scrubbing_parms = (3, 168 * capacity_factor, 6 * capacity_factor)
elif parms == "Elerath2014A":
#data from [Elerath2014], SATA Disk A
disk_fail_parms = (1.13, 302016.0, 0)
disk_repair_parms = (1.65, 22.7 * capacity_factor, 0)
disk_lse_parms = (1.0/12325)
disk_scrubbing_parms = (1, 186 * capacity_factor, 0)
elif parms == "Elerath2014B":
#data from [Elerath2014], SATA Disk B
disk_fail_parms = (0.576, 4833522.0, 0)
disk_repair_parms = (1.15, 20.25 * capacity_factor, 0)
disk_lse_parms = (1.0/42857)
disk_scrubbing_parms = (0.97, 160 * capacity_factor, 0)
else:
if parms != None:
usage(sys.argv[0])
print "Invaid parms"
exit(2)
return (mission_time, iterations, raid_type, raid_num, disk_capacity,
disk_fail_parms, disk_repair_parms, disk_lse_parms, disk_scrubbing_parms, force_re, required_re,
fs_trace, filelevel, dedup, weighted, output_events)
def print_result(model, raid_failure_samples, lse_samples, systems_with_data_loss,
systems_with_raid_failures, systems_with_lse, iterations, raid_type, raid_num, disk_capacity, df):
(type, d, p) = raid_type.split("_");
data_fragments = int(d)
total_capacity = data_fragments * disk_capacity * raid_num * 512/1024/1024/1024/1024 * df
localtime = time.asctime(time.localtime(time.time()))
print "**************************************"
print "System (%s): %.2fTB data, D/F = %.4f, %d of %s RAID, %ld iterations" % (localtime, total_capacity, df, raid_num, raid_type, iterations)
print "Filelevel =", model.filelevel, ", Dedup =", model.dedup, ", Weighted =", model.weighted
print "Summary: %d of systems with data loss events (%d by raid failures, %d by lse)" % (systems_with_data_loss, systems_with_raid_failures, systems_with_lse)
prob_result = (raid_failure_samples.prob_mean, 100*raid_failure_samples.prob_re, raid_failure_samples.prob_mean - raid_failure_samples.prob_ci,
raid_failure_samples.prob_mean + raid_failure_samples.prob_ci, raid_failure_samples.prob_dev)
value_result = (raid_failure_samples.value_mean, 100*raid_failure_samples.value_re, raid_failure_samples.value_mean - raid_failure_samples.value_ci,
raid_failure_samples.value_mean + raid_failure_samples.value_ci, raid_failure_samples.value_dev)
print "******** RAID Failure Part ***********"
print "Probability of RAID Failures: %e +/- %f Percent , CI (%e,%e), StdDev: %e" % prob_result
if model.filelevel == False:
print "Fraction of Blocks/Chunks Lost in the Failed Disk: %e +/- %f Percent, CI (%e,%e), StdDev: %e" % value_result
elif model.weighted == False:
print "Fraction of Files Lost: %e +/- %f Percent, CI (%e,%e), StdDev: %e" % value_result
else:
print "Fraction of Files Lost Weighted by Bytes: %e +/- %f Percent, CI (%e,%e), StdDev: %e" % value_result
prob_result = (lse_samples.prob_mean, 100*lse_samples.prob_re, lse_samples.prob_mean - lse_samples.prob_ci,
lse_samples.prob_mean + lse_samples.prob_ci, lse_samples.prob_dev)
value_result = (lse_samples.value_mean, 100*lse_samples.value_re, lse_samples.value_mean - lse_samples.value_ci,
lse_samples.value_mean + lse_samples.value_ci, lse_samples.value_dev)
print "************* LSE Part ***************"
print "Probability of LSEs: %e +/- %f Percent , CI (%e,%e), StdDev: %e" % prob_result
NOMDL = value_result[0]/total_capacity
if model.filelevel == False:
if model.weighted == False:
print "# of Blocks/Chunks Lost: %e +/- %f Percent, CI (%f,%f), StdDev: %e" % value_result
print "NOMDL (Normalized Magnitude of Data Loss): %e chunks per TB" % NOMDL
else:
print "Bytes of Blocks/Chunks Lost: %e +/- %f Percent, CI (%f,%f), StdDev: %e" % value_result
print "NOMDL (Normalized Magnitude of Data Loss): %e bytes per TB" % NOMDL
else:
if model.weighted == False:
print "# of Corrupted Files: %e +/- %f Percent, CI (%f,%f), StdDev: %e" % value_result
print "NOMDL (Normalized Magnitude of Data Loss): %e files per TB" % NOMDL
else:
print "Size of Corrupted Files: %e +/- %f Percent, CI (%f,%f), StdDev: %e" % value_result
print "NOMDL (Normalized Magnitude of Data Loss): %e bytes per TB" % NOMDL
print "**************************************"
def do_it():
parms = get_parms()
simulation = Simulation(*parms)
(model, raid_failure_samples, lse_samples, systems_with_data_loss,
systems_with_raid_failures, systems_with_lse, iterations, df) = simulation.simulate()
raid_type = parms[2]
raid_num = parms[3]
disk_capacity = parms[4]
print_result(model, raid_failure_samples, lse_samples, systems_with_data_loss,
systems_with_raid_failures, systems_with_lse, iterations, raid_type, raid_num, disk_capacity, df)
def sig_quit(sig, frame):
# backtrace to get the simulation object
object = frame.f_locals.get("self", None)
while not isinstance(object, Simulation):
frame = frame.f_back
object = frame.f_locals.get("self", None)
print >>sys.stderr, "\nThe simulation is interrupted!"
object.raid_failure_samples.calcResults("0.95")
object.lse_samples.calcResults("0.95")
iterations = object.iterations - object.more_iterations + object.cur_i
print_result(object.system.dedup_model, object.raid_failure_samples, object.lse_samples, object.systems_with_data_loss,
object.systems_with_raid_failures, object.systems_with_lse,
iterations, object.raid_type, object.raid_num, object.disk_capacity, object.system.get_df())
if object.output is not None:
print >>object.output, "I=%d" % iterations
object.output.close()
sys.exit(1)
if __name__ == "__main__":
simulation = None
signal.signal(signal.SIGINT, sig_quit)
do_it()
|
import logging
import os
import fixtures
import testtools
from diskimage_builder import element_dependencies
logger = logging.getLogger(__name__)
data_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'test-elements'))
def _populate_element(element_dir, element_name, element_deps=[], provides=[]):
logger.debug("Populate %s <%s>", element_name, element_dir)
element_home = os.path.join(element_dir, element_name)
os.mkdir(element_home)
deps_path = os.path.join(element_home, 'element-deps')
with open(deps_path, 'w') as deps_file:
deps_file.write("\n".join(element_deps))
provides_path = os.path.join(element_home, 'element-provides')
with open(provides_path, 'w') as provides_file:
provides_file.write("\n".join(provides))
class TestElementDeps(testtools.TestCase):
def setUp(self):
super(TestElementDeps, self).setUp()
self.element_root_dir = self.useFixture(fixtures.TempDir()).path
self.element_dir = os.path.join(self.element_root_dir, 'elements')
self.element_override_dir = os.path.join(self.element_root_dir,
'element-override')
os.mkdir(self.element_dir)
os.mkdir(self.element_override_dir)
self.log_fixture = self.useFixture(
fixtures.FakeLogger(level=logging.DEBUG))
_populate_element(self.element_dir, 'requires-foo', ['foo'])
_populate_element(self.element_dir,
'foo',
[],
['operating-system'])
_populate_element(self.element_dir,
'requires-requires-foo',
['requires-foo'])
_populate_element(self.element_dir, 'self', ['self'])
_populate_element(self.element_dir,
'provides_virtual',
[],
['virtual'])
_populate_element(self.element_dir,
'also_provides_virtual',
[],
['virtual'])
_populate_element(self.element_dir,
'requires_virtual',
['virtual'],
['operating-system'])
_populate_element(self.element_dir, 'virtual', ['extra_dependency'])
_populate_element(self.element_dir, 'extra_dependency', [])
_populate_element(self.element_dir,
'circular1',
['circular2'],
['operating-system'])
_populate_element(self.element_dir, 'circular2', ['circular1'])
_populate_element(self.element_dir,
'provides_new_virtual',
[],
['new_virtual', 'operating-system'])
_populate_element(self.element_dir,
'requires_new_virtual',
['new_virtual'])
# second element should override the first one here
_populate_element(self.element_dir, 'override_element', [])
_populate_element(self.element_override_dir, 'override_element', [])
# This simulates $ELEMENTS_PATH
self.element_dirs = "%s:%s" % (self.element_override_dir,
self.element_dir)
# helper to return an (element, path) tuple from the standard dir
def _e(self, element):
return (element, os.path.join(self.element_dir, element))
# helper to return an (element, path) tuple from the override dir
def _eo(self, element):
return (element, os.path.join(self.element_override_dir, element))
def test_non_transitive_deps(self):
result = element_dependencies.get_elements(['requires-foo'],
self.element_dirs)
self.assertCountEqual([self._e('foo'), self._e('requires-foo')],
result)
def test_missing_deps(self):
e = self.assertRaises(element_dependencies.MissingElementException,
element_dependencies.get_elements,
['fake'],
self.element_dirs)
self.assertIn("Element 'fake' not found", str(e))
def test_invalid_element_dir(self):
e = self.assertRaises(element_dependencies.InvalidElementDir,
element_dependencies.get_elements,
['fake'],
self.element_dirs + ":/not/a/dir")
self.assertIn("ELEMENTS_PATH entry '/not/a/dir' is not a directory",
str(e))
def test_transitive_deps(self):
result = element_dependencies.get_elements(
['requires-requires-foo'], self.element_dirs)
self.assertCountEqual([self._e('requires-requires-foo'),
self._e('requires-foo'),
self._e('foo')], result)
def test_no_deps(self):
result = element_dependencies.get_elements(['foo'], self.element_dirs)
self.assertEqual([self._e('foo')], result)
def test_self(self):
result = element_dependencies.get_elements(['self', 'foo'],
self.element_dirs)
self.assertCountEqual([self._e('self'),
self._e('foo')], result)
def test_circular(self):
result = element_dependencies.get_elements(['circular1'],
self.element_dirs)
self.assertCountEqual([self._e('circular1'),
self._e('circular2')], result)
def test_provide(self):
result = element_dependencies.get_elements(
['provides_virtual', 'requires_virtual'],
self.element_dirs)
self.assertCountEqual([self._e('requires_virtual'),
self._e('provides_virtual')], result)
def test_provide_conflict(self):
self.assertRaises(element_dependencies.AlreadyProvidedException,
element_dependencies.get_elements,
['virtual', 'provides_virtual'],
self.element_dirs)
def test_provide_virtual_ordering(self):
result = element_dependencies.get_elements(
['requires_new_virtual', 'provides_new_virtual'],
self.element_dirs)
self.assertCountEqual(
[self._e('requires_new_virtual'),
self._e('provides_new_virtual')], result)
def test_elements_provide_same(self):
msg = r"virtual: already provided by \['provides_virtual'\]"
self.assertRaisesRegex(element_dependencies.AlreadyProvidedException,
msg,
element_dependencies.get_elements,
['provides_virtual', 'also_provides_virtual'],
self.element_dirs)
def test_no_os_element(self):
self.assertRaises(element_dependencies.MissingOSException,
element_dependencies.get_elements,
['provides_virtual'],
self.element_dirs)
def test_duplicated_os_passed_as_element(self):
self.assertRaises(
element_dependencies.AlreadyProvidedException,
element_dependencies.get_elements,
['circular1', 'operating-system'],
self.element_dirs)
# ensure we get the error message about what's providing the
# conflicting package
self.assertIn("operating-system : already provided by ['circular1']",
self.log_fixture.output)
def test_element_override(self):
# make sure we picked up "override_element" from the override dir,
# not the base dir
result = element_dependencies.get_elements(['override_element', 'foo'],
self.element_dirs)
self.assertCountEqual([self._e('foo'),
self._eo('override_element')],
result)
def test_expand_dependencies_deprecated(self):
# test the deprecated expand_dependencies call
result = element_dependencies.expand_dependencies(
['foo', 'requires-foo'], self.element_dirs)
self.assertCountEqual(['foo', 'requires-foo'], result)
def test_output_sanity(self):
# very basic output sanity test
elements = element_dependencies._get_elements(['foo', 'requires-foo'],
self.element_dirs)
element_dependencies._output_env_vars(elements)
class TestElements(testtools.TestCase):
def test_depends_on_env(self):
self.useFixture(
fixtures.EnvironmentVariable('ELEMENTS_PATH', '/foo/bar'))
self.assertEqual('/foo/bar',
element_dependencies._get_elements_dir())
def test_env_not_set(self):
self.useFixture(fixtures.EnvironmentVariable('ELEMENTS_PATH', ''))
self.assertRaises(Exception,
element_dependencies._get_elements_dir, ())
|
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_stateful_set_spec import V1beta1StatefulSetSpec
class TestV1beta1StatefulSetSpec(unittest.TestCase):
""" V1beta1StatefulSetSpec unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1StatefulSetSpec(self):
"""
Test V1beta1StatefulSetSpec
"""
model = kubernetes.client.models.v1beta1_stateful_set_spec.V1beta1StatefulSetSpec()
if __name__ == '__main__':
unittest.main()
|
"""A library of basic combiner PTransform subclasses."""
from __future__ import absolute_import
import operator
import random
from apache_beam.transforms import core
from apache_beam.transforms import cy_combiners
from apache_beam.transforms import ptransform
from apache_beam.transforms.display import DisplayDataItem
from apache_beam.typehints import KV
from apache_beam.typehints import Any
from apache_beam.typehints import Dict
from apache_beam.typehints import List
from apache_beam.typehints import Tuple
from apache_beam.typehints import TypeVariable
from apache_beam.typehints import Union
from apache_beam.typehints import with_input_types
from apache_beam.typehints import with_output_types
try:
long # Python 2
except NameError:
long = int # Python 3
__all__ = [
'Count',
'Mean',
'Sample',
'Top',
'ToDict',
'ToList',
]
T = TypeVariable('T')
K = TypeVariable('K')
V = TypeVariable('V')
class Mean(object):
"""Combiners for computing arithmetic means of elements."""
class Globally(ptransform.PTransform):
"""combiners.Mean.Globally computes the arithmetic mean of the elements."""
def expand(self, pcoll):
return pcoll | core.CombineGlobally(MeanCombineFn())
class PerKey(ptransform.PTransform):
"""combiners.Mean.PerKey finds the means of the values for each key."""
def expand(self, pcoll):
return pcoll | core.CombinePerKey(MeanCombineFn())
@with_input_types(Union[float, int, long])
@with_output_types(float)
class MeanCombineFn(core.CombineFn):
"""CombineFn for computing an arithmetic mean."""
def create_accumulator(self):
return (0, 0)
def add_input(self, sum_count, element):
(sum_, count) = sum_count
return sum_ + element, count + 1
def merge_accumulators(self, accumulators):
sums, counts = zip(*accumulators)
return sum(sums), sum(counts)
def extract_output(self, sum_count):
(sum_, count) = sum_count
if count == 0:
return float('NaN')
return sum_ / float(count)
def for_input_type(self, input_type):
if input_type is int:
return cy_combiners.MeanInt64Fn()
elif input_type is float:
return cy_combiners.MeanFloatFn()
return self
class Count(object):
"""Combiners for counting elements."""
class Globally(ptransform.PTransform):
"""combiners.Count.Globally counts the total number of elements."""
def expand(self, pcoll):
return pcoll | core.CombineGlobally(CountCombineFn())
class PerKey(ptransform.PTransform):
"""combiners.Count.PerKey counts how many elements each unique key has."""
def expand(self, pcoll):
return pcoll | core.CombinePerKey(CountCombineFn())
class PerElement(ptransform.PTransform):
"""combiners.Count.PerElement counts how many times each element occurs."""
def expand(self, pcoll):
paired_with_void_type = KV[pcoll.element_type, Any]
return (pcoll
| ('%s:PairWithVoid' % self.label >> core.Map(lambda x: (x, None))
.with_output_types(paired_with_void_type))
| core.CombinePerKey(CountCombineFn()))
@with_input_types(Any)
@with_output_types(int)
class CountCombineFn(core.CombineFn):
"""CombineFn for computing PCollection size."""
def create_accumulator(self):
return 0
def add_input(self, accumulator, element):
return accumulator + 1
def add_inputs(self, accumulator, elements):
return accumulator + len(list(elements))
def merge_accumulators(self, accumulators):
return sum(accumulators)
def extract_output(self, accumulator):
return accumulator
class Top(object):
"""Combiners for obtaining extremal elements."""
# pylint: disable=no-self-argument
@staticmethod
@ptransform.ptransform_fn
def Of(pcoll, n, compare=None, *args, **kwargs):
"""Obtain a list of the compare-most N elements in a PCollection.
This transform will retrieve the n greatest elements in the PCollection
to which it is applied, where "greatest" is determined by the comparator
function supplied as the compare argument.
compare should be an implementation of "a < b" taking at least two arguments
(a and b). Additional arguments and side inputs specified in the apply call
become additional arguments to the comparator. Defaults to the natural
ordering of the elements.
The arguments 'key' and 'reverse' may instead be passed as keyword
arguments, and have the same meaning as for Python's sort functions.
Args:
pcoll: PCollection to process.
n: number of elements to extract from pcoll.
compare: as described above.
*args: as described above.
**kwargs: as described above.
"""
key = kwargs.pop('key', None)
reverse = kwargs.pop('reverse', False)
return pcoll | core.CombineGlobally(
TopCombineFn(n, compare, key, reverse), *args, **kwargs)
@staticmethod
@ptransform.ptransform_fn
def PerKey(pcoll, n, compare=None, *args, **kwargs):
"""Identifies the compare-most N elements associated with each key.
This transform will produce a PCollection mapping unique keys in the input
PCollection to the n greatest elements with which they are associated, where
"greatest" is determined by the comparator function supplied as the compare
argument.
compare should be an implementation of "a < b" taking at least two arguments
(a and b). Additional arguments and side inputs specified in the apply call
become additional arguments to the comparator. Defaults to the natural
ordering of the elements.
The arguments 'key' and 'reverse' may instead be passed as keyword
arguments, and have the same meaning as for Python's sort functions.
Args:
pcoll: PCollection to process.
n: number of elements to extract from pcoll.
compare: as described above.
*args: as described above.
**kwargs: as described above.
Raises:
TypeCheckError: If the output type of the input PCollection is not
compatible with KV[A, B].
"""
key = kwargs.pop('key', None)
reverse = kwargs.pop('reverse', False)
return pcoll | core.CombinePerKey(
TopCombineFn(n, compare, key, reverse), *args, **kwargs)
@staticmethod
@ptransform.ptransform_fn
def Largest(pcoll, n):
"""Obtain a list of the greatest N elements in a PCollection."""
return pcoll | Top.Of(n)
@staticmethod
@ptransform.ptransform_fn
def Smallest(pcoll, n):
"""Obtain a list of the least N elements in a PCollection."""
return pcoll | Top.Of(n, reverse=True)
@staticmethod
@ptransform.ptransform_fn
def LargestPerKey(pcoll, n):
"""Identifies the N greatest elements associated with each key."""
return pcoll | Top.PerKey(n)
@staticmethod
@ptransform.ptransform_fn
def SmallestPerKey(pcoll, n, reverse=True):
"""Identifies the N least elements associated with each key."""
return pcoll | Top.PerKey(n, reverse=True)
@with_input_types(T)
@with_output_types(List[T])
class TopCombineFn(core.CombineFn):
"""CombineFn doing the combining for all of the Top transforms.
This CombineFn uses a key or comparison operator to rank the elements.
Args:
compare: (optional) an implementation of "a < b" taking at least two
arguments (a and b). Additional arguments and side inputs specified
in the apply call become additional arguments to the comparator.
key: (optional) a mapping of elements to a comparable key, similar to
the key argument of Python's sorting methods.
reverse: (optional) whether to order things smallest to largest, rather
than largest to smallest
"""
_MIN_BUFFER_OVERSIZE = 100
_MAX_BUFFER_OVERSIZE = 1000
# TODO(robertwb): Allow taking a key rather than a compare.
def __init__(self, n, compare=None, key=None, reverse=False):
self._n = n
self._buffer_size = max(
min(2 * n, n + TopCombineFn._MAX_BUFFER_OVERSIZE),
n + TopCombineFn._MIN_BUFFER_OVERSIZE)
if compare is operator.lt:
compare = None
elif compare is operator.gt:
compare = None
reverse = not reverse
if compare:
self._compare = (
(lambda a, b, *args, **kwargs: not compare(a, b, *args, **kwargs))
if reverse
else compare)
else:
self._compare = operator.gt if reverse else operator.lt
self._key_fn = key
self._reverse = reverse
def _sort_buffer(self, buffer, lt):
if lt in (operator.gt, operator.lt):
buffer.sort(key=self._key_fn, reverse=self._reverse)
else:
buffer.sort(cmp=lambda a, b: (not lt(a, b)) - (not lt(b, a)),
key=self._key_fn)
def display_data(self):
return {'n': self._n,
'compare': DisplayDataItem(self._compare.__name__
if hasattr(self._compare, '__name__')
else self._compare.__class__.__name__)
.drop_if_none()}
# The accumulator type is a tuple (threshold, buffer), where threshold
# is the smallest element [key] that could possibly be in the top n based
# on the elements observed so far, and buffer is a (periodically sorted)
# list of candidates of bounded size.
def create_accumulator(self, *args, **kwargs):
return None, []
def add_input(self, accumulator, element, *args, **kwargs):
if args or kwargs:
lt = lambda a, b: self._compare(a, b, *args, **kwargs)
else:
lt = self._compare
threshold, buffer = accumulator
element_key = self._key_fn(element) if self._key_fn else element
if len(buffer) < self._n:
if not buffer:
return element_key, [element]
buffer.append(element)
if lt(element_key, threshold): # element_key < threshold
return element_key, buffer
return accumulator # with mutated buffer
elif lt(threshold, element_key): # threshold < element_key
buffer.append(element)
if len(buffer) < self._buffer_size:
return accumulator
self._sort_buffer(buffer, lt)
min_element = buffer[-self._n]
threshold = self._key_fn(min_element) if self._key_fn else min_element
return threshold, buffer[-self._n:]
return accumulator
def merge_accumulators(self, accumulators, *args, **kwargs):
accumulators = list(accumulators)
if args or kwargs:
add_input = lambda accumulator, element: self.add_input(
accumulator, element, *args, **kwargs)
else:
add_input = self.add_input
total_accumulator = None
for accumulator in accumulators:
if total_accumulator is None:
total_accumulator = accumulator
else:
for element in accumulator[1]:
total_accumulator = add_input(total_accumulator, element)
return total_accumulator
def extract_output(self, accumulator, *args, **kwargs):
if args or kwargs:
lt = lambda a, b: self._compare(a, b, *args, **kwargs)
else:
lt = self._compare
_, buffer = accumulator
self._sort_buffer(buffer, lt)
return buffer[:-self._n-1:-1] # tail, reversed
class Largest(TopCombineFn):
def default_label(self):
return 'Largest(%s)' % self._n
class Smallest(TopCombineFn):
def __init__(self, n):
super(Smallest, self).__init__(n, reverse=True)
def default_label(self):
return 'Smallest(%s)' % self._n
class Sample(object):
"""Combiners for sampling n elements without replacement."""
# pylint: disable=no-self-argument
@staticmethod
@ptransform.ptransform_fn
def FixedSizeGlobally(pcoll, n):
return pcoll | core.CombineGlobally(SampleCombineFn(n))
@staticmethod
@ptransform.ptransform_fn
def FixedSizePerKey(pcoll, n):
return pcoll | core.CombinePerKey(SampleCombineFn(n))
@with_input_types(T)
@with_output_types(List[T])
class SampleCombineFn(core.CombineFn):
"""CombineFn for all Sample transforms."""
def __init__(self, n):
super(SampleCombineFn, self).__init__()
# Most of this combiner's work is done by a TopCombineFn. We could just
# subclass TopCombineFn to make this class, but since sampling is not
# really a kind of Top operation, we use a TopCombineFn instance as a
# helper instead.
self._top_combiner = TopCombineFn(n)
def create_accumulator(self):
return self._top_combiner.create_accumulator()
def add_input(self, heap, element):
# Before passing elements to the Top combiner, we pair them with random
# numbers. The elements with the n largest random number "keys" will be
# selected for the output.
return self._top_combiner.add_input(heap, (random.random(), element))
def merge_accumulators(self, heaps):
return self._top_combiner.merge_accumulators(heaps)
def extract_output(self, heap):
# Here we strip off the random number keys we added in add_input.
return [e for _, e in self._top_combiner.extract_output(heap)]
class _TupleCombineFnBase(core.CombineFn):
def __init__(self, *combiners):
self._combiners = [core.CombineFn.maybe_from_callable(c) for c in combiners]
self._named_combiners = combiners
def display_data(self):
combiners = [c.__name__ if hasattr(c, '__name__') else c.__class__.__name__
for c in self._named_combiners]
return {'combiners': str(combiners)}
def create_accumulator(self):
return [c.create_accumulator() for c in self._combiners]
def merge_accumulators(self, accumulators):
return [c.merge_accumulators(a)
for c, a in zip(self._combiners, zip(*accumulators))]
def extract_output(self, accumulator):
return tuple([c.extract_output(a)
for c, a in zip(self._combiners, accumulator)])
class TupleCombineFn(_TupleCombineFnBase):
"""A combiner for combining tuples via a tuple of combiners.
Takes as input a tuple of N CombineFns and combines N-tuples by
combining the k-th element of each tuple with the k-th CombineFn,
outputting a new N-tuple of combined values.
"""
def add_input(self, accumulator, element):
return [c.add_input(a, e)
for c, a, e in zip(self._combiners, accumulator, element)]
def with_common_input(self):
return SingleInputTupleCombineFn(*self._combiners)
class SingleInputTupleCombineFn(_TupleCombineFnBase):
"""A combiner for combining a single value via a tuple of combiners.
Takes as input a tuple of N CombineFns and combines elements by
applying each CombineFn to each input, producing an N-tuple of
the outputs corresponding to each of the N CombineFn's outputs.
"""
def add_input(self, accumulator, element):
return [c.add_input(a, element)
for c, a in zip(self._combiners, accumulator)]
class ToList(ptransform.PTransform):
"""A global CombineFn that condenses a PCollection into a single list."""
def __init__(self, label='ToList'): # pylint: disable=useless-super-delegation
super(ToList, self).__init__(label)
def expand(self, pcoll):
return pcoll | self.label >> core.CombineGlobally(ToListCombineFn())
@with_input_types(T)
@with_output_types(List[T])
class ToListCombineFn(core.CombineFn):
"""CombineFn for to_list."""
def create_accumulator(self):
return []
def add_input(self, accumulator, element):
accumulator.append(element)
return accumulator
def merge_accumulators(self, accumulators):
return sum(accumulators, [])
def extract_output(self, accumulator):
return accumulator
class ToDict(ptransform.PTransform):
"""A global CombineFn that condenses a PCollection into a single dict.
PCollections should consist of 2-tuples, notionally (key, value) pairs.
If multiple values are associated with the same key, only one of the values
will be present in the resulting dict.
"""
def __init__(self, label='ToDict'): # pylint: disable=useless-super-delegation
super(ToDict, self).__init__(label)
def expand(self, pcoll):
return pcoll | self.label >> core.CombineGlobally(ToDictCombineFn())
@with_input_types(Tuple[K, V])
@with_output_types(Dict[K, V])
class ToDictCombineFn(core.CombineFn):
"""CombineFn for to_dict."""
def create_accumulator(self):
return dict()
def add_input(self, accumulator, element):
key, value = element
accumulator[key] = value
return accumulator
def merge_accumulators(self, accumulators):
result = dict()
for a in accumulators:
result.update(a)
return result
def extract_output(self, accumulator):
return accumulator
class _CurriedFn(core.CombineFn):
"""Wrapped CombineFn with extra arguments."""
def __init__(self, fn, args, kwargs):
self.fn = fn
self.args = args
self.kwargs = kwargs
def create_accumulator(self):
return self.fn.create_accumulator(*self.args, **self.kwargs)
def add_input(self, accumulator, element):
return self.fn.add_input(accumulator, element, *self.args, **self.kwargs)
def merge_accumulators(self, accumulators):
return self.fn.merge_accumulators(accumulators, *self.args, **self.kwargs)
def extract_output(self, accumulator):
return self.fn.extract_output(accumulator, *self.args, **self.kwargs)
def apply(self, elements):
return self.fn.apply(elements, *self.args, **self.kwargs)
def curry_combine_fn(fn, args, kwargs):
if not args and not kwargs:
return fn
else:
return _CurriedFn(fn, args, kwargs)
class PhasedCombineFnExecutor(object):
"""Executor for phases of combine operations."""
def __init__(self, phase, fn, args, kwargs):
self.combine_fn = curry_combine_fn(fn, args, kwargs)
if phase == 'all':
self.apply = self.full_combine
elif phase == 'add':
self.apply = self.add_only
elif phase == 'merge':
self.apply = self.merge_only
elif phase == 'extract':
self.apply = self.extract_only
else:
raise ValueError('Unexpected phase: %s' % phase)
def full_combine(self, elements): # pylint: disable=invalid-name
return self.combine_fn.apply(elements)
def add_only(self, elements): # pylint: disable=invalid-name
return self.combine_fn.add_inputs(
self.combine_fn.create_accumulator(), elements)
def merge_only(self, accumulators): # pylint: disable=invalid-name
return self.combine_fn.merge_accumulators(accumulators)
def extract_only(self, accumulator): # pylint: disable=invalid-name
return self.combine_fn.extract_output(accumulator)
|
import json
import logging
import os
import re
import sys
import tempfile
import unittest
BOT_DIR = os.path.dirname(os.path.abspath(__file__))
import test_env_bot
test_env_bot.setup_test_env()
from depot_tools import auto_stub
from depot_tools import fix_encoding
from utils import file_path
from utils import subprocess42
from utils import tools
import swarmingserver_bot_fake
from bot_code import bot_main
class TestCase(auto_stub.TestCase):
def setUp(self):
super(TestCase, self).setUp()
tools.clear_cache_all()
self._tmpdir = tempfile.mkdtemp(prefix='swarming_main')
self._zip_file = os.path.join(self._tmpdir, 'swarming_bot.zip')
code, _ = swarmingserver_bot_fake.gen_zip(self.url)
with open(self._zip_file, 'wb') as f:
f.write(code)
def tearDown(self):
try:
file_path.rmtree(self._tmpdir)
finally:
super(TestCase, self).tearDown()
class SimpleMainTest(TestCase):
@property
def url(self):
return 'http://localhost:1'
def test_attributes(self):
actual = json.loads(
subprocess42.check_output(
[sys.executable, self._zip_file, 'attributes'],
stderr=subprocess42.PIPE))
# get_config() doesn't work when called outside of a zip, so patch the
# server_version manually with the default value in config/config.json.
expected = bot_main.get_attributes(None)
self.assertEqual(['N/A'], expected['dimensions']['server_version'])
expected['dimensions']['server_version'] = ['1']
NON_DETERMINISTIC = ('cwd', 'disks', 'nb_files_in_temp', 'pid',
'running_time', 'started_ts', 'uptime')
for key in NON_DETERMINISTIC:
del actual['state'][key]
del expected['state'][key]
actual['state'].pop('temp', None)
expected['state'].pop('temp', None)
del actual['version']
del expected['version']
self.assertAlmostEqual(
actual['state'].pop('cost_usd_hour'),
expected['state'].pop('cost_usd_hour'),
places=5)
self.assertEqual(expected, actual)
def test_version(self):
version = subprocess42.check_output(
[sys.executable, self._zip_file, 'version'], stderr=subprocess42.PIPE)
lines = version.strip().split()
self.assertEqual(1, len(lines), lines)
self.assertTrue(re.match(br'^[0-9a-f]{64}$', lines[0]), lines[0])
class MainTest(TestCase):
def setUp(self):
self._server = swarmingserver_bot_fake.Server()
super(MainTest, self).setUp()
def tearDown(self):
try:
self._server.close()
finally:
super(MainTest, self).tearDown()
@property
def url(self):
return self._server.url
@unittest.skipIf(
sys.platform == 'win32',
'TODO(crbug.com/1017545): It gets stuck at proc.communicate()')
def test_run_bot_signal(self):
# Test SIGTERM signal handling. Run it as an external process to not mess
# things up.
proc = subprocess42.Popen([sys.executable, self._zip_file, 'start_slave'],
stdout=subprocess42.PIPE,
stderr=subprocess42.STDOUT,
detached=True)
# Wait for the grand-child process to poll the server.
self._server.has_polled.wait(60)
self.assertEqual(True, self._server.has_polled.is_set())
proc.terminate()
out, _ = proc.communicate()
if proc.returncode:
print('ERROR LOG:')
print(out)
self.assertEqual(0, proc.returncode)
events = self._server.get_bot_events()
for event in events:
event.pop('dimensions')
event.pop('state')
event.pop('version')
expected = [
{
'event': 'bot_shutdown',
'message': 'Signal was received',
},
]
if sys.platform == 'win32':
# Sadly, the signal handler generate an error.
# TODO(maruel): Fix one day.
self.assertEqual('bot_error', events.pop(0)['event'])
self.assertEqual(expected, events)
if __name__ == '__main__':
fix_encoding.fix_encoding()
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
logging.basicConfig(
level=logging.DEBUG if '-v' in sys.argv else logging.CRITICAL)
unittest.main()
|
"""Utilities for serializing Python objects."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import wrapt
from tensorflow.python.framework import tensor_shape
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.keras.utils import generic_utils
def get_json_type(obj):
"""Serializes any object to a JSON-serializable structure.
Arguments:
obj: the object to serialize
Returns:
JSON-serializable structure representing `obj`.
Raises:
TypeError: if `obj` cannot be serialized.
"""
# if obj is a serializable Keras class instance
# e.g. optimizer, layer
if hasattr(obj, 'get_config'):
return {
'class_name': generic_utils.get_registered_name(obj.__class__),
'config': obj.get_config()
}
# if obj is any numpy type
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
# misc functions (e.g. loss function)
if callable(obj):
return obj.__name__
# if obj is a python 'type'
if type(obj).__name__ == type.__name__:
return obj.__name__
if isinstance(obj, tensor_shape.Dimension):
return obj.value
if isinstance(obj, tensor_shape.TensorShape):
return obj.as_list()
if isinstance(obj, collections_abc.Mapping):
return dict(obj)
if isinstance(obj, wrapt.ObjectProxy):
return obj.__wrapped__
raise TypeError('Not JSON Serializable:', obj)
|
"""Alexa HTTP interface."""
import logging
from homeassistant import core
from homeassistant.components.http.view import HomeAssistantView
from homeassistant.const import (
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
ENTITY_CATEGORY_CONFIG,
ENTITY_CATEGORY_DIAGNOSTIC,
)
from homeassistant.helpers import entity_registry as er
from .auth import Auth
from .config import AbstractConfig
from .const import CONF_ENDPOINT, CONF_ENTITY_CONFIG, CONF_FILTER, CONF_LOCALE
from .smart_home import async_handle_message
from .state_report import async_enable_proactive_mode
_LOGGER = logging.getLogger(__name__)
SMART_HOME_HTTP_ENDPOINT = "/api/alexa/smart_home"
class AlexaConfig(AbstractConfig):
"""Alexa config."""
def __init__(self, hass, config):
"""Initialize Alexa config."""
super().__init__(hass)
self._config = config
if config.get(CONF_CLIENT_ID) and config.get(CONF_CLIENT_SECRET):
self._auth = Auth(hass, config[CONF_CLIENT_ID], config[CONF_CLIENT_SECRET])
else:
self._auth = None
@property
def supports_auth(self):
"""Return if config supports auth."""
return self._auth is not None
@property
def should_report_state(self):
"""Return if we should proactively report states."""
return self._auth is not None
@property
def endpoint(self):
"""Endpoint for report state."""
return self._config.get(CONF_ENDPOINT)
@property
def entity_config(self):
"""Return entity config."""
return self._config.get(CONF_ENTITY_CONFIG) or {}
@property
def locale(self):
"""Return config locale."""
return self._config.get(CONF_LOCALE)
@core.callback
def user_identifier(self):
"""Return an identifier for the user that represents this config."""
return ""
def should_expose(self, entity_id):
"""If an entity should be exposed."""
if not self._config[CONF_FILTER].empty_filter:
return self._config[CONF_FILTER](entity_id)
entity_registry = er.async_get(self.hass)
if registry_entry := entity_registry.async_get(entity_id):
auxiliary_entity = registry_entry.entity_category in (
ENTITY_CATEGORY_CONFIG,
ENTITY_CATEGORY_DIAGNOSTIC,
)
else:
auxiliary_entity = False
return not auxiliary_entity
@core.callback
def async_invalidate_access_token(self):
"""Invalidate access token."""
self._auth.async_invalidate_access_token()
async def async_get_access_token(self):
"""Get an access token."""
return await self._auth.async_get_access_token()
async def async_accept_grant(self, code):
"""Accept a grant."""
return await self._auth.async_do_auth(code)
async def async_setup(hass, config):
"""Activate Smart Home functionality of Alexa component.
This is optional, triggered by having a `smart_home:` sub-section in the
alexa configuration.
Even if that's disabled, the functionality in this module may still be used
by the cloud component which will call async_handle_message directly.
"""
smart_home_config = AlexaConfig(hass, config)
hass.http.register_view(SmartHomeView(smart_home_config))
if smart_home_config.should_report_state:
await async_enable_proactive_mode(hass, smart_home_config)
class SmartHomeView(HomeAssistantView):
"""Expose Smart Home v3 payload interface via HTTP POST."""
url = SMART_HOME_HTTP_ENDPOINT
name = "api:alexa:smart_home"
def __init__(self, smart_home_config):
"""Initialize."""
self.smart_home_config = smart_home_config
async def post(self, request):
"""Handle Alexa Smart Home requests.
The Smart Home API requires the endpoint to be implemented in AWS
Lambda, which will need to forward the requests to here and pass back
the response.
"""
hass = request.app["hass"]
user = request["hass_user"]
message = await request.json()
_LOGGER.debug("Received Alexa Smart Home request: %s", message)
response = await async_handle_message(
hass, self.smart_home_config, message, context=core.Context(user_id=user.id)
)
_LOGGER.debug("Sending Alexa Smart Home response: %s", response)
return b"" if response is None else self.json(response)
|
"""Windows Registry plugin for parsing the last shutdown time of a system."""
import os
from dfdatetime import filetime as dfdatetime_filetime
from dfdatetime import semantic_time as dfdatetime_semantic_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.lib import dtfabric_helper
from plaso.lib import errors
from plaso.parsers import winreg_parser
from plaso.parsers.winreg_plugins import interface
class ShutdownWindowsRegistryEventData(events.EventData):
"""Shutdown Windows Registry event data.
Attributes:
key_path (str): Windows Registry key path.
value_name (str): name of the Windows Registry value.
"""
DATA_TYPE = 'windows:registry:shutdown'
def __init__(self):
"""Initializes event data."""
super(ShutdownWindowsRegistryEventData, self).__init__(
data_type=self.DATA_TYPE)
self.key_path = None
self.value_name = None
class ShutdownWindowsRegistryPlugin(
interface.WindowsRegistryPlugin, dtfabric_helper.DtFabricHelper):
"""Windows Registry plugin for parsing the last shutdown time of a system."""
NAME = 'windows_shutdown'
DATA_FORMAT = 'Windows last shutdown Registry data'
FILTERS = frozenset([
interface.WindowsRegistryKeyPathFilter(
'HKEY_LOCAL_MACHINE\\System\\CurrentControlSet\\Control\\Windows')])
_DEFINITION_FILE = os.path.join(
os.path.dirname(__file__), 'filetime.yaml')
def _ParseFiletime(self, byte_stream):
"""Parses a FILETIME date and time value from a byte stream.
Args:
byte_stream (bytes): byte stream.
Returns:
dfdatetime.DateTimeValues: a FILETIME date and time values or a semantic
date and time values if the FILETIME date and time value is not set.
Raises:
ParseError: if the FILETIME could not be parsed.
"""
filetime_map = self._GetDataTypeMap('filetime')
try:
filetime = self._ReadStructureFromByteStream(
byte_stream, 0, filetime_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse FILETIME value with error: {0!s}'.format(
exception))
if filetime == 0:
return dfdatetime_semantic_time.NotSet()
try:
return dfdatetime_filetime.Filetime(timestamp=filetime)
except ValueError:
raise errors.ParseError('Invalid FILETIME value: 0x{0:08x}'.format(
filetime))
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
"""Extracts events from a ShutdownTime Windows Registry value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
"""
shutdown_value = registry_key.GetValueByName('ShutdownTime')
if shutdown_value:
try:
date_time = self._ParseFiletime(shutdown_value.data)
except errors.ParseError as exception:
parser_mediator.ProduceExtractionWarning(
'unable to determine shutdown timestamp with error: {0!s}'.format(
exception))
date_time = None
if date_time:
event_data = ShutdownWindowsRegistryEventData()
event_data.key_path = registry_key.path
event_data.value_name = shutdown_value.name
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_SHUTDOWN)
parser_mediator.ProduceEventWithEventData(event, event_data)
self._ProduceDefaultWindowsRegistryEvent(
parser_mediator, registry_key, names_to_skip=['ShutdownTime'])
winreg_parser.WinRegistryParser.RegisterPlugin(ShutdownWindowsRegistryPlugin)
|
from typing import Any
import pytest
from aiohttp import web
def test_entry_func_empty(mocker: Any) -> None:
error = mocker.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
argv = [""]
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with("'entry-func' not in 'module:function' syntax")
def test_entry_func_only_module(mocker: Any) -> None:
argv = ["test"]
error = mocker.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with("'entry-func' not in 'module:function' syntax")
def test_entry_func_only_function(mocker: Any) -> None:
argv = [":test"]
error = mocker.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with("'entry-func' not in 'module:function' syntax")
def test_entry_func_only_separator(mocker: Any) -> None:
argv = [":"]
error = mocker.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with("'entry-func' not in 'module:function' syntax")
def test_entry_func_relative_module(mocker: Any) -> None:
argv = [".a.b:c"]
error = mocker.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with("relative module names not supported")
def test_entry_func_non_existent_module(mocker: Any) -> None:
argv = ["alpha.beta:func"]
mocker.patch("aiohttp.web.import_module", side_effect=ImportError("Test Error"))
error = mocker.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with("unable to import alpha.beta: Test Error")
def test_entry_func_non_existent_attribute(mocker: Any) -> None:
argv = ["alpha.beta:func"]
import_module = mocker.patch("aiohttp.web.import_module")
error = mocker.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
module = import_module("alpha.beta")
del module.func
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with(
"module {!r} has no attribute {!r}".format("alpha.beta", "func")
)
def test_path_when_unsupported(mocker: Any, monkeypatch: Any) -> None:
argv = "--path=test_path.sock alpha.beta:func".split()
mocker.patch("aiohttp.web.import_module")
monkeypatch.delattr("socket.AF_UNIX", raising=False)
error = mocker.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with(
"file system paths not supported by your" " operating environment"
)
def test_entry_func_call(mocker: Any) -> None:
mocker.patch("aiohttp.web.run_app")
import_module = mocker.patch("aiohttp.web.import_module")
argv = (
"-H testhost -P 6666 --extra-optional-eins alpha.beta:func "
"--extra-optional-zwei extra positional args"
).split()
module = import_module("alpha.beta")
with pytest.raises(SystemExit):
web.main(argv)
module.func.assert_called_with(
("--extra-optional-eins --extra-optional-zwei extra positional " "args").split()
)
def test_running_application(mocker: Any) -> None:
run_app = mocker.patch("aiohttp.web.run_app")
import_module = mocker.patch("aiohttp.web.import_module")
exit = mocker.patch("aiohttp.web.ArgumentParser.exit", side_effect=SystemExit)
argv = (
"-H testhost -P 6666 --extra-optional-eins alpha.beta:func "
"--extra-optional-zwei extra positional args"
).split()
module = import_module("alpha.beta")
app = module.func()
with pytest.raises(SystemExit):
web.main(argv)
run_app.assert_called_with(app, host="testhost", port=6666, path=None)
exit.assert_called_with(message="Stopped\n")
|
import copy
import mock
from openstackclient.common import exceptions
from openstackclient.image.v1 import image
from openstackclient.tests import fakes
from openstackclient.tests.image.v1 import fakes as image_fakes
class TestImage(image_fakes.TestImagev1):
def setUp(self):
super(TestImage, self).setUp()
# Get a shortcut to the ServerManager Mock
self.images_mock = self.app.client_manager.image.images
self.images_mock.reset_mock()
class TestImageCreate(TestImage):
def setUp(self):
super(TestImageCreate, self).setUp()
self.images_mock.create.return_value = fakes.FakeResource(
None,
copy.deepcopy(image_fakes.IMAGE),
loaded=True,
)
# This is the return value for utils.find_resource()
self.images_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(image_fakes.IMAGE),
loaded=True,
)
self.images_mock.update.return_value = fakes.FakeResource(
None,
copy.deepcopy(image_fakes.IMAGE),
loaded=True,
)
# Get the command object to test
self.cmd = image.CreateImage(self.app, None)
def test_image_reserve_no_options(self):
mock_exception = {
'find.side_effect': exceptions.CommandError('x'),
'get.side_effect': exceptions.CommandError('x'),
}
self.images_mock.configure_mock(**mock_exception)
arglist = [
image_fakes.image_name,
]
verifylist = [
('container_format', image.DEFAULT_CONTAINER_FORMAT),
('disk_format', image.DEFAULT_DISK_FORMAT),
('name', image_fakes.image_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# ImageManager.create(name=, **)
self.images_mock.create.assert_called_with(
name=image_fakes.image_name,
container_format=image.DEFAULT_CONTAINER_FORMAT,
disk_format=image.DEFAULT_DISK_FORMAT,
data=mock.ANY,
)
# Verify update() was not called, if it was show the args
self.assertEqual(self.images_mock.update.call_args_list, [])
self.assertEqual(image_fakes.IMAGE_columns, columns)
self.assertEqual(image_fakes.IMAGE_data, data)
def test_image_reserve_options(self):
mock_exception = {
'find.side_effect': exceptions.CommandError('x'),
'get.side_effect': exceptions.CommandError('x'),
}
self.images_mock.configure_mock(**mock_exception)
arglist = [
'--container-format', 'ovf',
'--disk-format', 'fs',
'--min-disk', '10',
'--min-ram', '4',
'--protected',
'--private',
image_fakes.image_name,
]
verifylist = [
('container_format', 'ovf'),
('disk_format', 'fs'),
('min_disk', 10),
('min_ram', 4),
('protected', True),
('unprotected', False),
('public', False),
('private', True),
('name', image_fakes.image_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# ImageManager.create(name=, **)
self.images_mock.create.assert_called_with(
name=image_fakes.image_name,
container_format='ovf',
disk_format='fs',
min_disk=10,
min_ram=4,
protected=True,
is_public=False,
data=mock.ANY,
)
# Verify update() was not called, if it was show the args
self.assertEqual(self.images_mock.update.call_args_list, [])
self.assertEqual(image_fakes.IMAGE_columns, columns)
self.assertEqual(image_fakes.IMAGE_data, data)
@mock.patch('openstackclient.image.v1.image.io.open', name='Open')
def test_image_create_file(self, mock_open):
mock_file = mock.MagicMock(name='File')
mock_open.return_value = mock_file
mock_open.read.return_value = image_fakes.image_data
mock_exception = {
'find.side_effect': exceptions.CommandError('x'),
'get.side_effect': exceptions.CommandError('x'),
}
self.images_mock.configure_mock(**mock_exception)
arglist = [
'--file', 'filer',
'--unprotected',
'--public',
'--property', 'Alpha=1',
'--property', 'Beta=2',
image_fakes.image_name,
]
verifylist = [
('file', 'filer'),
('protected', False),
('unprotected', True),
('public', True),
('private', False),
('properties', {'Alpha': '1', 'Beta': '2'}),
('name', image_fakes.image_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# Ensure input file is opened
mock_open.assert_called_with('filer', 'rb')
# Ensure the input file is closed
mock_file.close.assert_called_with()
# ImageManager.get(name)
self.images_mock.get.assert_called_with(image_fakes.image_name)
# ImageManager.create(name=, **)
self.images_mock.create.assert_called_with(
name=image_fakes.image_name,
container_format=image.DEFAULT_CONTAINER_FORMAT,
disk_format=image.DEFAULT_DISK_FORMAT,
protected=False,
is_public=True,
properties={
'Alpha': '1',
'Beta': '2',
},
data=mock_file,
)
# Verify update() was not called, if it was show the args
self.assertEqual(self.images_mock.update.call_args_list, [])
self.assertEqual(image_fakes.IMAGE_columns, columns)
self.assertEqual(image_fakes.IMAGE_data, data)
def test_image_create_volume(self):
# Set up VolumeManager Mock
volumes_mock = self.app.client_manager.volume.volumes
volumes_mock.reset_mock()
volumes_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy({'id': 'vol1', 'name': 'volly'}),
loaded=True,
)
response = {
"id": 'volume_id',
"updated_at": 'updated_at',
"status": 'uploading',
"display_description": 'desc',
"size": 'size',
"volume_type": 'volume_type',
"image_id": 'image1',
"container_format": image.DEFAULT_CONTAINER_FORMAT,
"disk_format": image.DEFAULT_DISK_FORMAT,
"image_name": image_fakes.image_name,
}
full_response = {"os-volume_upload_image": response}
volumes_mock.upload_to_image.return_value = (201, full_response)
arglist = [
'--volume', 'volly',
image_fakes.image_name,
]
verifylist = [
('private', False),
('protected', False),
('public', False),
('unprotected', False),
('volume', 'volly'),
('force', False),
('name', image_fakes.image_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# VolumeManager.upload_to_image(volume, force, image_name,
# container_format, disk_format)
volumes_mock.upload_to_image.assert_called_with(
'vol1',
False,
image_fakes.image_name,
'bare',
'raw',
)
# ImageManager.update(image_id, remove_props=, **)
self.images_mock.update.assert_called_with(
image_fakes.image_id,
name=image_fakes.image_name,
container_format=image.DEFAULT_CONTAINER_FORMAT,
disk_format=image.DEFAULT_DISK_FORMAT,
properties=image_fakes.image_properties,
volume='volly',
)
self.assertEqual(image_fakes.IMAGE_columns, columns)
self.assertEqual(image_fakes.IMAGE_data, data)
class TestImageDelete(TestImage):
def setUp(self):
super(TestImageDelete, self).setUp()
# This is the return value for utils.find_resource()
self.images_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(image_fakes.IMAGE),
loaded=True,
)
self.images_mock.delete.return_value = None
# Get the command object to test
self.cmd = image.DeleteImage(self.app, None)
def test_image_delete_no_options(self):
arglist = [
image_fakes.image_id,
]
verifylist = [
('image', image_fakes.image_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
self.cmd.take_action(parsed_args)
self.images_mock.delete.assert_called_with(
image_fakes.image_id,
)
class TestImageSet(TestImage):
def setUp(self):
super(TestImageSet, self).setUp()
# This is the return value for utils.find_resource()
self.images_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(image_fakes.IMAGE),
loaded=True,
)
self.images_mock.update.return_value = fakes.FakeResource(
None,
copy.deepcopy(image_fakes.IMAGE),
loaded=True,
)
# Get the command object to test
self.cmd = image.SetImage(self.app, None)
def test_image_set_no_options(self):
arglist = [
image_fakes.image_name,
]
verifylist = [
('image', image_fakes.image_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
self.cmd.take_action(parsed_args)
# Verify update() was not called, if it was show the args
self.assertEqual(self.images_mock.update.call_args_list, [])
def test_image_set_options(self):
arglist = [
'--name', 'new-name',
'--owner', 'new-owner',
'--min-disk', '2',
'--min-ram', '4',
image_fakes.image_name,
]
verifylist = [
('name', 'new-name'),
('owner', 'new-owner'),
('min_disk', 2),
('min_ram', 4),
('image', image_fakes.image_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
kwargs = {
'name': 'new-name',
'owner': 'new-owner',
'min_disk': 2,
'min_ram': 4,
}
# ImageManager.update(image, **kwargs)
self.images_mock.update.assert_called_with(
image_fakes.image_id,
**kwargs
)
self.assertEqual(image_fakes.IMAGE_columns, columns)
self.assertEqual(image_fakes.IMAGE_data, data)
def test_image_set_bools1(self):
arglist = [
'--protected',
'--private',
image_fakes.image_name,
]
verifylist = [
('protected', True),
('unprotected', False),
('public', False),
('private', True),
('image', image_fakes.image_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
self.cmd.take_action(parsed_args)
kwargs = {
'protected': True,
'is_public': False,
}
# ImageManager.update(image, **kwargs)
self.images_mock.update.assert_called_with(
image_fakes.image_id,
**kwargs
)
def test_image_set_bools2(self):
arglist = [
'--unprotected',
'--public',
image_fakes.image_name,
]
verifylist = [
('protected', False),
('unprotected', True),
('public', True),
('private', False),
('image', image_fakes.image_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
self.cmd.take_action(parsed_args)
kwargs = {
'protected': False,
'is_public': True,
}
# ImageManager.update(image, **kwargs)
self.images_mock.update.assert_called_with(
image_fakes.image_id,
**kwargs
)
def test_image_set_properties(self):
arglist = [
'--property', 'Alpha=1',
'--property', 'Beta=2',
image_fakes.image_name,
]
verifylist = [
('properties', {'Alpha': '1', 'Beta': '2'}),
('image', image_fakes.image_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
self.cmd.take_action(parsed_args)
kwargs = {
'properties': {
'Alpha': '1',
'Beta': '2',
'Gamma': 'g',
},
}
# ImageManager.update(image, **kwargs)
self.images_mock.update.assert_called_with(
image_fakes.image_id,
**kwargs
)
class TestImageList(TestImage):
def setUp(self):
super(TestImageList, self).setUp()
# This is the return value for utils.find_resource()
self.images_mock.list.return_value = [
fakes.FakeResource(
None,
copy.deepcopy(image_fakes.IMAGE),
loaded=True,
),
]
# Get the command object to test
self.cmd = image.ListImage(self.app, None)
def test_image_list_long_option(self):
arglist = [
'--long',
]
verifylist = [
('long', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
self.images_mock.list.assert_called_with()
collist = ('ID', 'Name', 'Disk Format', 'Container Format',
'Size', 'Status')
self.assertEqual(columns, collist)
datalist = ((
image_fakes.image_id,
image_fakes.image_name,
'',
'',
'',
'',
), )
self.assertEqual(datalist, tuple(data))
|
import unittest
from azure import (
DEV_ACCOUNT_NAME,
DEV_ACCOUNT_KEY,
)
from azure.storage import AccessPolicy
from azure.storage.sharedaccesssignature import (
SharedAccessPolicy,
SharedAccessSignature,
QueryStringConstants,
ResourceType,
)
from util import (
AzureTestCase,
credentials,
getUniqueName,
)
class SharedAccessSignatureTest(AzureTestCase):
def setUp(self):
self.sas = SharedAccessSignature(account_name=DEV_ACCOUNT_NAME,
account_key=DEV_ACCOUNT_KEY)
def tearDown(self):
return super(SharedAccessSignatureTest, self).tearDown()
def test_generate_signed_query_dict_container_with_access_policy(self):
accss_plcy = AccessPolicy()
accss_plcy.start = '2011-10-11'
accss_plcy.expiry = '2011-10-12'
accss_plcy.permission = 'r'
query = self.sas._generate_signed_query_dict(
'images',
ResourceType.RESOURCE_CONTAINER,
SharedAccessPolicy(accss_plcy),
)
self.assertEqual(query[QueryStringConstants.SIGNED_START], '2011-10-11')
self.assertEqual(query[QueryStringConstants.SIGNED_EXPIRY], '2011-10-12')
self.assertEqual(query[QueryStringConstants.SIGNED_RESOURCE], ResourceType.RESOURCE_CONTAINER)
self.assertEqual(query[QueryStringConstants.SIGNED_PERMISSION], 'r')
self.assertEqual(query[QueryStringConstants.SIGNED_SIGNATURE],
'CxLWN56cjXidpI9em7RDgSN2QIgLggTqrnzudH2XsOY=')
def test_generate_signed_query_dict_container_with_signed_identifier(self):
signed_identifier = 'YWJjZGVmZw=='
query = self.sas._generate_signed_query_dict(
'images',
ResourceType.RESOURCE_CONTAINER,
SharedAccessPolicy(signed_identifier=signed_identifier),
)
self.assertEqual(query[QueryStringConstants.SIGNED_RESOURCE], ResourceType.RESOURCE_CONTAINER)
self.assertEqual(query[QueryStringConstants.SIGNED_IDENTIFIER], signed_identifier)
self.assertEqual(query[QueryStringConstants.SIGNED_SIGNATURE],
'BbzpLHe+JxNAsW/v6LttP5x9DdGMvXsZpm2chKblr3s=')
def test_generate_signed_query_dict_blob_with_access_policy_and_headers(self):
accss_plcy = AccessPolicy()
accss_plcy.start = '2011-10-11T11:03:40Z'
accss_plcy.expiry = '2011-10-12T11:53:40Z'
accss_plcy.permission = 'r'
query = self.sas._generate_signed_query_dict(
'images/pic1.png',
ResourceType.RESOURCE_BLOB,
SharedAccessPolicy(accss_plcy),
content_disposition='file; attachment',
content_type='binary',
)
self.assertEqual(query[QueryStringConstants.SIGNED_START], '2011-10-11T11:03:40Z')
self.assertEqual(query[QueryStringConstants.SIGNED_EXPIRY], '2011-10-12T11:53:40Z')
self.assertEqual(query[QueryStringConstants.SIGNED_RESOURCE], ResourceType.RESOURCE_BLOB)
self.assertEqual(query[QueryStringConstants.SIGNED_PERMISSION], 'r')
self.assertEqual(query[QueryStringConstants.SIGNED_CONTENT_DISPOSITION], 'file; attachment')
self.assertEqual(query[QueryStringConstants.SIGNED_CONTENT_TYPE], 'binary')
self.assertEqual(query[QueryStringConstants.SIGNED_SIGNATURE],
'uHckUC6T+BwUsc+DgrreyIS1k6au7uUd7LSSs/z+/+w=')
def test_generate_signed_query_dict_blob_with_access_policy(self):
accss_plcy = AccessPolicy()
accss_plcy.start = '2011-10-11'
accss_plcy.expiry = '2011-10-12'
accss_plcy.permission = 'w'
query = self.sas._generate_signed_query_dict(
'images/pic1.png',
ResourceType.RESOURCE_BLOB,
SharedAccessPolicy(accss_plcy),
)
self.assertEqual(query[QueryStringConstants.SIGNED_START], '2011-10-11')
self.assertEqual(query[QueryStringConstants.SIGNED_EXPIRY], '2011-10-12')
self.assertEqual(query[QueryStringConstants.SIGNED_RESOURCE], ResourceType.RESOURCE_BLOB)
self.assertEqual(query[QueryStringConstants.SIGNED_PERMISSION], 'w')
self.assertEqual(query[QueryStringConstants.SIGNED_SIGNATURE],
'Fqt8tNcyUOp30qYRtSFNcImrRMcxlk6IF17O4l96KT8=')
def test_generate_signed_query_dict_blob_with_signed_identifier(self):
signed_identifier = 'YWJjZGVmZw=='
query = self.sas._generate_signed_query_dict(
'images',
ResourceType.RESOURCE_CONTAINER,
SharedAccessPolicy(signed_identifier=signed_identifier),
)
self.assertEqual(query[QueryStringConstants.SIGNED_RESOURCE], ResourceType.RESOURCE_CONTAINER)
self.assertEqual(query[QueryStringConstants.SIGNED_IDENTIFIER], signed_identifier)
self.assertEqual(query[QueryStringConstants.SIGNED_SIGNATURE],
'BbzpLHe+JxNAsW/v6LttP5x9DdGMvXsZpm2chKblr3s=')
if __name__ == '__main__':
unittest.main()
|
from datetime import timedelta, datetime, date
class ReadConf(object):
def __init__(self, sc, split_size=None, fetch_size=None, consistency_level=None, metrics_enabled=None):
self.jvm = sc._jvm
self.split_size = split_size
self.fetch_size = fetch_size
self.consistency_level = consistency_level
self.metrics_enabled = metrics_enabled
def to_java_conf(self):
''' Create the com.datastax.spark.connector.rdd.ReadConf JVM object'''
split_size = self.split_size or self.jvm.ReadConf.DefaultSplitSize()
fetch_size = self.fetch_size or self.jvm.ReadConf.DefaultFetchSize()
consistency_level = self.jvm.ConsistencyLevel.values()[self.consistency_level] \
if self.consistency_level else self.jvm.ReadConf.DefaultConsistencyLevel()
# TODO metrics_enabled = jvm.ReadConf.DefaultReadTaskMetricsEnabled() \
# if metrics_enabled is None else metrics_enabled
metrics_enabled = False if self.metrics_enabled is None else self.metrics_enabled
return self.jvm.ReadConf(
split_size,
fetch_size,
consistency_level,
metrics_enabled,
)
class WriteConf(object):
def __init__(self, sc, batch_size=None, batch_buffer_size=None, batch_grouping_key=None, consistency_level=None,
parallelism_level=None, throughput_mibps=None, ttl=None, timestamp=None, metrics_enabled=None):
'''
@param sc(SparkContext):
The spark context used to build the WriteConf object
@param batch_size(int):
The size in bytes to batch up in an unlogged batch of CQL inserts.
If None given the default size of 16*1024 is used or spark.cassandra.output.batch.size.bytes if set.
@param batch_buffer_size(int):
The maximum number of batches which are 'pending'.
If None given the default of 1000 is used.
@param batch_grouping_key(string):
The way batches are formed:
* all: any row can be added to any batch
* replicaset: rows are batched for replica sets
* partition: rows are batched by their partition key
* None: defaults to "partition"
@param consistency_level(cassandra.ConsistencyLevel):
The consistency level used in writing to Cassandra.
If None defaults to LOCAL_ONE or spark.cassandra.output.consistency.level if set.
@param parallelism_level(int):
The maximum number of batches written in parallel.
If None defaults to 8 or spark.cassandra.output.concurrent.writes if set.
@param throughput_mibps(int):
@param ttl(int or timedelta):
The time to live as milliseconds or timedelta to use for the values.
If None given no TTL is used.
@param timestamp(int, date or datetime):
The timestamp in milliseconds, date or datetime to use for the values.
If None given the Cassandra nodes determine the timestamp.
@param metrics_enabled(bool):
Whether to enable task metrics updates.
'''
self.batch_size = batch_size
self.batch_buffer_size = batch_buffer_size
self.batch_grouping_key = batch_grouping_key
self.consistency_level = consistency_level
self.parallelism_level = parallelism_level
self.throughput_mibps = throughput_mibps
# convert time delta in ttl in seconds
if ttl and isinstance(ttl, timedelta):
ttl = int(self.ttl.total_seconds())
self.ttl = ttl
# convert date or datetime objects to a timestamp in milliseconds since the UNIX epoch
if timestamp and (isinstance(timestamp, datetime) or isinstance(timestamp, date)):
timestamp = int((timestamp - timestamp.__class__(1970, 1, 1)).total_seconds() * 1000)
self.timestamp = timestamp
self.metrics_enabled = metrics_enabled
|
"""This example demonstrates how to authenticate using OAuth2.
This example is intended for users who wish to use the oauth2client library
directly. Using a workflow similar to the example here, you can take advantage
of the oauth2client in a broader range of contexts than caching your refresh
token using the config.py scripts allows.
You can avoid having to use the oauth2client library directly by using the Ads
Python Client Library's config.py script to cache a client ID, client secret,
and refresh token for reuse.
This example is intended to be run from the command line as it takes user input.
"""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
import httplib2
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
from adspygoogle import DfpClient
from oauth2client.client import FlowExchangeError
from oauth2client.client import OAuth2WebServerFlow
APPLICATION_NAME = 'INSERT_APPLICATION_NAME_HERE'
NETWORK_CODE = 'INSERT_NETWORK_CODE_HERE'
OAUTH2_CLIENT_ID = 'INSERT_OAUTH2_CLIENT_ID_HERE'
OAUTH2_CLIENT_SECRET = 'INSERT_OAUTH2_CLIENT_SECRET_HERE'
def main(application_name, network_code, oauth2_client_id,
oauth2_client_secret):
# We're using the oauth2client library:
# http://code.google.com/p/google-api-python-client/downloads/list
flow = OAuth2WebServerFlow(
client_id=oauth2_client_id,
client_secret=oauth2_client_secret,
scope='https://www.google.com/apis/ads/publisher',
user_agent='oauth2 code example',
redirect_uri='urn:ietf:wg:oauth:2.0:oob')
# Get the authorization URL to direct the user to.
authorize_url = flow.step1_get_authorize_url()
print ('Log in to your DFP account and open the following URL: \n%s\n' %
authorize_url)
print 'After approving the token enter the verification code (if specified).'
code = raw_input('Code: ').strip()
credential = None
try:
credential = flow.step2_exchange(code)
except FlowExchangeError, e:
sys.exit('Authentication has failed: %s' % e)
# Create the DfpClient and set the OAuth2 credentials.
client = DfpClient(headers={
'networkCode': network_code,
'applicationName': application_name,
'userAgent': 'OAuth2 Example',
'oauth2credentials': credential
})
# OAuth2 credentials objects can be reused
credentials = client.oauth2credentials
print 'OAuth2 authorization successful!'
# OAuth2 credential objects can be refreshed via credentials.refresh() - the
# access token expires after 1 hour.
credentials.refresh(httplib2.Http())
# Note: you could simply set the credentials as below and skip the previous
# steps once access has been granted.
client.oauth2credentials = credentials
network_service = client.GetService('NetworkService', version='v201311')
# Get all networks that you have access to with the current login credentials.
networks = network_service.GetAllNetworks()
for network in networks:
print ('Network with network code \'%s\' and display name \'%s\' was found.'
% (network['networkCode'], network['displayName']))
print
print 'Number of results found: %s' % len(networks)
if __name__ == '__main__':
main(APPLICATION_NAME, NETWORK_CODE, OAUTH2_CLIENT_ID, OAUTH2_CLIENT_SECRET)
|
"""Tests for the Sonos Media Player platform."""
from unittest.mock import PropertyMock
import pytest
from soco.exceptions import NotSupportedException
from homeassistant.components.sonos import DATA_SONOS, DOMAIN, media_player
from homeassistant.const import STATE_IDLE
from homeassistant.core import Context
from homeassistant.exceptions import Unauthorized
from homeassistant.helpers import device_registry as dr
from homeassistant.setup import async_setup_component
async def setup_platform(hass, config_entry, config):
"""Set up the media player platform for testing."""
config_entry.add_to_hass(hass)
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
async def test_async_setup_entry_hosts(hass, config_entry, config, soco):
"""Test static setup."""
await setup_platform(hass, config_entry, config)
speakers = list(hass.data[DATA_SONOS].discovered.values())
speaker = speakers[0]
assert speaker.soco == soco
media_player = hass.states.get("media_player.zone_a")
assert media_player.state == STATE_IDLE
async def test_async_setup_entry_discover(hass, config_entry, discover):
"""Test discovery setup."""
await setup_platform(hass, config_entry, {})
speakers = list(hass.data[DATA_SONOS].discovered.values())
speaker = speakers[0]
assert speaker.soco.uid == "RINCON_test"
media_player = hass.states.get("media_player.zone_a")
assert media_player.state == STATE_IDLE
async def test_discovery_ignore_unsupported_device(hass, config_entry, soco, caplog):
"""Test discovery setup."""
message = f"GetVolume not supported on {soco.ip_address}"
type(soco).volume = PropertyMock(side_effect=NotSupportedException(message))
await setup_platform(hass, config_entry, {})
assert message in caplog.text
assert not hass.data[DATA_SONOS].discovered
async def test_services(hass, config_entry, config, hass_read_only_user):
"""Test join/unjoin requires control access."""
await setup_platform(hass, config_entry, config)
with pytest.raises(Unauthorized):
await hass.services.async_call(
DOMAIN,
media_player.SERVICE_JOIN,
{"master": "media_player.bla", "entity_id": "media_player.blub"},
blocking=True,
context=Context(user_id=hass_read_only_user.id),
)
async def test_device_registry(hass, config_entry, config, soco):
"""Test sonos device registered in the device registry."""
await setup_platform(hass, config_entry, config)
device_registry = dr.async_get(hass)
reg_device = device_registry.async_get_device(
identifiers={("sonos", "RINCON_test")}
)
assert reg_device.model == "Model Name"
assert reg_device.sw_version == "13.1"
assert reg_device.connections == {
(dr.CONNECTION_NETWORK_MAC, "00:11:22:33:44:55"),
(dr.CONNECTION_UPNP, "uuid:RINCON_test"),
}
assert reg_device.manufacturer == "Sonos"
assert reg_device.suggested_area == "Zone A"
assert reg_device.name == "Zone A"
async def test_entity_basic(hass, config_entry, discover):
"""Test basic state and attributes."""
await setup_platform(hass, config_entry, {})
state = hass.states.get("media_player.zone_a")
assert state.state == STATE_IDLE
attributes = state.attributes
assert attributes["friendly_name"] == "Zone A"
assert attributes["is_volume_muted"] is False
assert attributes["volume_level"] == 0.19
|
import sublime
import logging
import os
from FSharp.fsac import server
from FSharp.fsac.client import FsacClient
from FSharp.fsac.request import CompilerLocationRequest
from FSharp.fsac.request import ProjectRequest
from FSharp.fsac.request import ParseRequest
from FSharp.lib.project import FSharpFile
from FSharp.lib.project import FSharpProjectFile
from FSharp.lib import response_processor
from FSharp.lib.response_processor import ON_COMPILER_PATH_AVAILABLE
_logger = logging.getLogger(__name__)
class Editor(object):
"""Global editor state.
"""
def __init__(self, resp_proc):
_logger.info ('starting fsac server...')
self.fsac = FsacClient(server.start(), resp_proc)
self.compilers_path = None
self.project_file = None
self.fsac.send_request (CompilerLocationRequest())
response_processor.add_listener(ON_COMPILER_PATH_AVAILABLE,
self.on_compiler_path_available)
def on_compiler_path_available(self, data):
self.compilers_path = data['response'].compilers_path
@property
def compiler_path(self):
if self.compilers_path is None:
return None
return os.path.join(self.compilers_path, 'fsc.exe')
@property
def interpreter_path(self):
if self.compilers_path is None:
return None
return os.path.join(self.compilers_path, 'fsi.exe')
def refresh(self, fs_file):
assert isinstance(fs_file, FSharpFile), 'wrong argument: %s' % fs_file
# todo: run in alternate thread
if not self.project_file:
self.project_file = FSharpProjectFile.from_path(fs_file.path)
self.set_project()
return
if not self.project_file.governs(fs_file.path):
new_project_file = FSharpProjectFile.from_path(fs_file.path)
self.project_file = new_project_file
self.set_project()
return
def set_project(self):
self.fsac.send_request(ProjectRequest(self.project_file.path))
def parse_file(self, fs_file, content):
self.fsac.send_request(ParseRequest(fs_file.path, content))
def parse_view(self, view):
# todo: what about unsaved files?
fs_file = FSharpFile(view)
if not fs_file.is_fsharp_file:
return
self.refresh(fs_file)
# todo: very inneficient
if fs_file.is_code:
content = view.substr(sublime.Region (0, view.size()))
self.parse_file(fs_file, content)
|
from __future__ import (print_function)
import os
import sys
import configparser
if sys.version_info <= (3, 0):
print("Error: Zulip is a Python 3 project, and cannot be run with Python 2.")
print("Use e.g. `/path/to/manage.py` not `python /path/to/manage.py`.")
sys.exit(1)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import scripts.lib.setup_path_on_import
from scripts.lib.zulip_tools import assert_not_running_as_root
if __name__ == "__main__":
assert_not_running_as_root()
config_file = configparser.RawConfigParser()
config_file.read("/etc/zulip/zulip.conf")
PRODUCTION = config_file.has_option('machine', 'deploy_type')
HAS_SECRETS = os.access('/etc/zulip/zulip-secrets.conf', os.R_OK)
if PRODUCTION and not HAS_SECRETS:
# The best way to detect running manage.py as another user in
# production before importing anything that would require that
# access is to check for access to /etc/zulip/zulip.conf (in
# which case it's a production server, not a dev environment)
# and lack of access for /etc/zulip/zulip-secrets.conf (which
# should be only readable by root and zulip)
print("Error accessing Zulip secrets; manage.py in production must be run as the zulip user.")
sys.exit(1)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zproject.settings")
from django.conf import settings
from django.core.management import execute_from_command_line
from django.core.management.base import CommandError
from scripts.lib.zulip_tools import log_management_command
log_management_command(" ".join(sys.argv), settings.MANAGEMENT_LOG_PATH)
os.environ.setdefault("PYTHONSTARTUP", os.path.join(BASE_DIR, "scripts/lib/pythonrc.py"))
if "--no-traceback" not in sys.argv and len(sys.argv) > 1:
sys.argv.append("--traceback")
try:
execute_from_command_line(sys.argv)
except CommandError as e:
print(e, file=sys.stderr)
sys.exit(1)
|
"""Pocketwalk context manager."""
import hashlib
import pathlib
import time
from runaway import signals
import pytoml as toml
def get_context_manager():
"""Get the context_manager plugin."""
return ContextManager()
class ContextManager:
"""Context manager plugin for pocketwalk."""
# [ API ]
def get_tools_unchanged_since_last_results(self, contexts):
"""Get the tools whose contexts are unchanged since the last results."""
return {tool: value['context'] for tool, value in self._tagged_contexts(contexts).items() if not value['changed']}
def get_tools_changed_since_last_save(self, contexts):
"""Get the tools whose contexts have changed since the last save."""
return {tool: value['context'] for tool, value in self._tagged_contexts(contexts).items() if value['changed']}
async def get_tool_context_data(self, config):
"""Get tool context data."""
last_context_per_tool_map = await signals.call(self._get_last_contexts_for, config)
current_context_per_tool_map = await signals.call(self._get_contexts_for, config=config)
return {
'last_saved': last_context_per_tool_map,
'current_state': current_context_per_tool_map,
}
@staticmethod
def contexts_in_a_and_not_b(*, a_context, b_context):
"""Return the contexts in a and not in b."""
to_return = {}
for tool, context in a_context.items():
if tool not in b_context:
to_return[tool] = context
return to_return
@staticmethod
async def save_context(tool, *, context):
"""Save the current context for the given tool."""
context = context.copy()
if 'affected files' in context:
del context['affected files']
pathlib.Path(pathlib.Path.cwd() / '.pocketwalk.cache').mkdir(parents=True, exist_ok=True)
(pathlib.Path.cwd() / '.pocketwalk.cache' / tool).with_suffix('.context').write_text(toml.dumps(context))
# [ Internal ]
@staticmethod
def _tagged_contexts(contexts):
"""Reduce the contexts to the latest, and tag them as changed/unchanged since the last save."""
tagged_contexts = {}
for tool, current_context in contexts['current_state'].items():
last_context = contexts['last_saved'].get(tool, None)
this_tagged_context = current_context.copy()
changed = last_context != current_context
this_tagged_context['affected files'] = []
if (
last_context and
changed and
last_context['trigger files'] == this_tagged_context['trigger files'] and
last_context['config'] == this_tagged_context['config'] and
last_context['preconditions'] == this_tagged_context['preconditions']
):
for file_name, file_hash in list(this_tagged_context['target files'].items()):
if last_context['target files'].get(file_name, None) != file_hash:
this_tagged_context['affected files'].append(file_name)
else:
this_tagged_context['affected files'] = this_tagged_context['target files']
tagged = {'context': this_tagged_context, 'changed': changed}
tagged_contexts[tool] = tagged
return tagged_contexts
def _get_contexts_for(self, config):
"""Get the current contexts for the given tools."""
contexts = {}
for this_tool in config['tools']:
target_files = config[f'{this_tool}_targets']
trigger_files = config[f'{this_tool}_triggers']
args = config[f'{this_tool}_args']
hashed_target_files = self._get_hashes_for(target_files)
hashed_trigger_files = self._get_hashes_for(trigger_files)
contexts[this_tool] = {
'target files': hashed_target_files,
'trigger files': hashed_trigger_files,
'config': args,
'preconditions': config[f'{this_tool}_preconditions'],
}
return contexts
@staticmethod
def _get_last_contexts_for(config):
"""Get the last contexts for the given tools."""
tools = [t for t in config['tools'] if (pathlib.Path.cwd() / '.pocketwalk.cache' / t).with_suffix('.context').exists()]
loaded_contexts = {t: toml.loads(
(pathlib.Path.cwd() / '.pocketwalk.cache' / t).with_suffix('.context').read_text(),
) for t in tools}
for context in loaded_contexts.values():
context['config'] = context.get('config', [])
context['preconditions'] = context.get('preconditions', [])
context['trigger files'] = context.get('trigger files', {})
context['target files'] = context.get('target files', {})
return loaded_contexts
@staticmethod
def _get_hashes_for(path_strings):
"""Return sha hashes for the path strings."""
tries = 0
max_tries = 3
while True:
try:
return {s: hashlib.sha1(pathlib.Path(s).read_bytes()).hexdigest() for s in path_strings}
except FileNotFoundError:
# Can happen if file is being written while we try to read
if tries < max_tries:
time.sleep(0.1)
tries += 1
continue
raise
assert all((
get_context_manager,
))
|
try:
import json
except ImportError:
import simplejson as json
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from product_spiders.items import Product, ProductLoader
import logging
class SkypeEnFiSpider(BaseSpider):
store_ids = {
'17699',
'17712',
'17675',
'17705',
'17676',
'17651',
'17700',
'17710',
'17722',
'17659',
'17643',
'17642',
'17718',
'17638',
'17644',
'17702',
'17649',
'17641',
'17666',
'17648',
'17645',
'17679',
'17653',
'17715',
'17664',
'17671',
'17711',
'17656',
'17662',
'17672',
'17669',
'17650',
'17707',
'17663',
'17719',
'17701',
'17721',
'17657',
'17640',
'17668',
'29665'
}
products = [
"/headsets/mini-jack/iss-talk-8120-freetalk-handsfree/",
"/headsets/usb/iss-talk-5115-everyman/",
"/headsets/usb/iss-talk-5204-freetalk-mono/",
"/headsets/wireless/iss-talk-5195-everyman-wireless/",
"/headsets/wireless/iss-talk-5192-freetalk/",
"/phones/cordless-router/rtx-dualphone-4088-black/",
"/phones/cordless-router/rtx-dualphone-4088-white/",
"/phones/plug-in/iss-talk-3000-freetalk-office-phone/",
"/phones/speakerphones/clear-chat-60/",
"/phones/speakerphones/clear-chat-160/",
"/phones/speakerphones/yamaha-projectphone-psg-01s/",
"/webcams/hd-capable/iss-talk-7140/",
"/webcams/hd-capable/fv-touchcam-n1/",
"/webcams/hd-capable/iss-talk-7182-freetalk-conference-hd-camera/",
"/webcams/standard-quality/iss-talk-7002/",
"/webcams/tvwebcams/iss-talk-7182/",
]
name = 'SkypeEnFi'
allowed_domains = ['skype.com']
start_urls = ()
site_name = 'http://shop.skype.com/intl/en-fi'
ajax_url = "http://shop.skype.com/proxy/proxy.php?productid=%%prod_id%%&shopid=%%shop_id%%"
def start_requests(self):
for product in self.products:
url = self.site_name + product
yield Request(url, self.parse)
def parse(self, response):
hxs = HtmlXPathSelector(response)
name = hxs.select("//div[@class='prodMain']/h1/text()").extract()
if not name:
logging.error("NO NAME! %s" % response.url)
return
name = name[0].strip()
items = hxs.select("//span[@class='buy_now_compare']")
i = 0
for item in items:
prod_id = item.select("@prodid").extract()[0]
shop_id = item.select("@shopid").extract()[0]
if shop_id in self.store_ids:
url = self.ajax_url.replace("%%prod_id%%", prod_id).replace("%%shop_id%%", shop_id)
yield Request(url, callback=self.parse_ajax, meta={'name': name, 'url': response.url})
i += 1
logging.error("Processing product %s" % i)
def parse_ajax(self, response):
content = response.body.strip("()")
result = json.loads(content)[0]
price = result['Promotion']
if price:
name = response.meta['name']
url = response.meta['url']
product = Product()
loader = ProductLoader(item=product, response=response)
loader.add_value('url', url)
loader.add_value('name', name)
loader.add_value('price', price)
yield loader.load_item()
else:
logging.error("No price %s" % response.meta['url'])
|
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from c7n.actions import ActionRegistry, BaseAction
from c7n.filters import FilterRegistry
from c7n.manager import resources
from c7n.query import QueryResourceManager
from c7n.utils import local_session, type_schema
log = logging.getLogger('custodian.cfn')
filters = FilterRegistry('cfn.filters')
actions = ActionRegistry('cfn.actions')
@resources.register('cfn')
class CloudFormation(QueryResourceManager):
class resource_type(object):
service = 'cloudformation'
type = 'stack'
enum_spec = ('describe_stacks', 'Stacks[]', None)
id = 'StackName'
filter_name = 'StackName'
filter_type = 'scalar'
name = 'StackName'
date = 'CreationTime'
dimension = None
action_registry = actions
filter_registry = filters
@actions.register('delete')
class Delete(BaseAction):
"""Action to delete cloudformation stacks
It is recommended to use a filter to avoid unwanted deletion of stacks
:example:
.. code-block: yaml
policies:
- name: cloudformation-delete-failed-stacks
resource: cfn
filters:
- StackStatus: ROLLBACK_COMPLETE
actions:
- delete
"""
schema = type_schema('delete')
permissions = ("cloudformation:DeleteStack",)
def process(self, stacks):
with self.executor_factory(max_workers=10) as w:
list(w.map(self.process_stacks, stacks))
def process_stacks(self, stack):
client = local_session(
self.manager.session_factory).client('cloudformation')
client.delete_stack(StackName=stack['StackName'])
|
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from mock.mock import MagicMock, call, patch
from resource_management import *
from stacks.utils.RMFTestCase import *
import getpass
import json
@patch.object(getpass, "getuser", new = MagicMock(return_value='some_user'))
@patch.object(Hook, "run_custom_hook", new = MagicMock())
class TestHookBeforeInstall(RMFTestCase):
def test_hook_default(self):
self.executeScript("before-INSTALL/scripts/hook.py",
classname="BeforeInstallHook",
target=RMFTestCase.TARGET_STACK_HOOKS,
command="hook",
config_file="default.json"
)
self.assertResourceCalled('Repository', 'HDP-2.6-repo-1',
base_url = 'http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.6.4.0-60',
action = ['prepare'],
components = [u'HDP', 'main'],
repo_template = '[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0',
repo_file_name = None,
mirror_list = None,
)
self.assertResourceCalled('Repository', 'HDP-2.6-GPL-repo-1',
base_url = 'http://s3.amazonaws.com/dev.hortonworks.com/HDP-GPL/centos6/2.x/BUILDS/2.6.4.0-60',
action = ['prepare'],
components = [u'HDP-GPL', 'main'],
repo_template = '[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0',
repo_file_name = None,
mirror_list = None,
)
self.assertResourceCalled('Repository', 'HDP-UTILS-1.1.0.22-repo-1',
base_url = 'http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.22/repos/centos6',
action = ['prepare'],
components = [u'HDP-UTILS', 'main'],
repo_template = '[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0',
repo_file_name = None,
mirror_list = None,
)
self.assertResourceCalled('Repository', None,
action=['create'],
)
self.assertResourceCalled('Package', 'unzip', retry_count=5, retry_on_repo_unavailability=False)
self.assertResourceCalled('Package', 'curl', retry_count=5, retry_on_repo_unavailability=False)
self.assertNoMoreResources()
def test_hook_no_repos(self):
config_file = self.get_src_folder() + "/test/python/stacks/configs/default.json"
with open(config_file, "r") as f:
command_json = json.load(f)
command_json['repositoryFile']['repositories'] = []
self.executeScript("before-INSTALL/scripts/hook.py",
classname="BeforeInstallHook",
command="hook",
target=RMFTestCase.TARGET_STACK_HOOKS,
config_dict=command_json)
self.assertResourceCalled('Package', 'unzip', retry_count=5, retry_on_repo_unavailability=False)
self.assertResourceCalled('Package', 'curl', retry_count=5, retry_on_repo_unavailability=False)
self.assertNoMoreResources()
def test_hook_default_repository_file(self):
self.executeScript("before-INSTALL/scripts/hook.py",
classname="BeforeInstallHook",
command="hook",
target=RMFTestCase.TARGET_STACK_HOOKS,
config_file="repository_file.json"
)
self.assertResourceCalled('Repository', 'HDP-2.2-repo-4',
action=['prepare'],
base_url='http://repo1/HDP/centos5/2.x/updates/2.2.0.0',
components=['HDP', 'main'],
mirror_list=None,
repo_file_name='ambari-hdp-4',
repo_template='[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0',
)
self.assertResourceCalled('Repository', 'HDP-UTILS-1.1.0.20-repo-4',
action=['prepare'],
base_url='http://repo1/HDP-UTILS/centos5/2.x/updates/2.2.0.0',
components=['HDP-UTILS', 'main'],
mirror_list=None,
repo_file_name='ambari-hdp-4',
repo_template='[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0',
)
self.assertResourceCalled('Repository', None,
action=['create'],
)
self.assertResourceCalled('Package', 'unzip', retry_count=5, retry_on_repo_unavailability=False)
self.assertResourceCalled('Package', 'curl', retry_count=5, retry_on_repo_unavailability=False)
self.assertNoMoreResources()
|
"""
Copyright (c) 2014-2015 F-Secure
See LICENSE for details
"""
import json
from datetime import datetime
import isodate
import requests
from resource_api.errors import ValidationError, DoesNotExist, AuthorizationError, DataConflictError, Forbidden
EXCEPTION_MAP = {
400: ValidationError,
403: AuthorizationError,
404: DoesNotExist,
405: Forbidden,
409: DataConflictError,
501: NotImplementedError
}
class Response(object):
def __init__(self, status_code, data):
self.status_code, self.data = status_code, data
class HttpClient(object):
def __init__(self, auth_headers=None, session=None):
self._auth_headers = auth_headers or {}
self._session = session or requests.Session()
def open(self, path, method="GET", content_type="application/json", query_string=None, data=None):
headers = {'content-type': content_type}
headers.update(self._auth_headers)
resp = self._session.request(url=path, method=method.lower(), params=query_string, data=data, headers=headers)
if resp.content:
data = resp.text
else:
data = None
return Response(resp.status_code, data)
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
return None
class JSONDecoder(json.JSONDecoder):
def __init__(self, schema=None, *args, **kwargs):
super(JSONDecoder, self).__init__(*args, **kwargs)
self.schema = schema
def decode(self, string):
json_obj = super(JSONDecoder, self).decode(string)
return self._decode_obj(json_obj)
def _decode_obj(self, json_obj):
if self.schema and isinstance(json_obj, dict):
py_obj = self._decode_dict(json_obj, self.schema)
elif self.schema and isinstance(json_obj, list):
py_obj = map(self._decode_obj, json_obj)
else:
py_obj = json_obj
return py_obj
def _decode_dict(self, json_dict, schema=None):
py_dict = {}
for name, value in json_dict.items():
field_schema = schema.get(name, {}) if schema else {}
if isinstance(value, dict):
py_dict[name] = self._decode_dict(value, field_schema)
elif field_schema.get("type") == "datetime":
py_dict[name] = isodate.parse_datetime(value)
else:
py_dict[name] = value
return py_dict
class JsonClient(object):
def __init__(self, http_client):
self._http_client = http_client
def open(self, url, method="GET", params=None, data=None, schema=None):
if data is not None:
data = json.dumps(data, cls=JSONEncoder)
if params is not None:
for key, value in params.iteritems():
if isinstance(value, list) or isinstance(value, dict):
params[key] = json.dumps(value, cls=JSONEncoder)
if not url:
url += "/"
resp = self._http_client.open(
path=url,
method=method,
content_type="application/json",
query_string=params,
data=data
)
rval = json.loads(resp.data, cls=JSONDecoder, schema=schema) if resp.data else None
if resp.status_code > 199 and resp.status_code < 400:
return rval
exception_class = EXCEPTION_MAP.get(resp.status_code, Exception)
raise exception_class(rval)
|
"""Creates the embedded_tools.zip that is part of the Bazel binary."""
import contextlib
import fnmatch
import os
import os.path
import re
import sys
import zipfile
from src.create_embedded_tools_lib import copy_tar_to_zip
from src.create_embedded_tools_lib import copy_zip_to_zip
from src.create_embedded_tools_lib import is_executable
output_paths = [
('*tools/jdk/BUILD.tools', lambda x: 'tools/jdk/BUILD'),
('*tools/build_defs/repo/BUILD.repo',
lambda x: 'tools/build_defs/repo/BUILD'),
('*tools/j2objc/BUILD.tools', lambda x: 'tools/j2objc/BUILD'),
('*tools/platforms/BUILD.tools', lambda x: 'platforms/BUILD'),
('*tools/platforms/*', lambda x: 'platforms/' + os.path.basename(x)),
('*tools/cpp/BUILD.tools', lambda x: 'tools/cpp/BUILD'),
('*tools/cpp/runfiles/generated_*',
lambda x: 'tools/cpp/runfiles/' + os.path.basename(x)[len('generated_'):]),
('*BUILD.java_langtools', lambda x: 'third_party/java/jdk/langtools/BUILD'),
('*launcher.exe', lambda x: 'tools/launcher/launcher.exe'),
('*def_parser.exe', lambda x: 'tools/def_parser/def_parser.exe'),
('*zipper.exe', lambda x: 'tools/zip/zipper/zipper.exe'),
('*zipper', lambda x: 'tools/zip/zipper/zipper'),
('*xcode*make_hashed_objlist.py',
lambda x: 'tools/objc/make_hashed_objlist.py'),
('*xcode*xcode-locator', lambda x: 'tools/objc/xcode-locator'),
('*src/tools/xcode/*', lambda x: 'tools/objc/' + os.path.basename(x)),
# --experimental_sibling_repository_layout=false
('*external/openjdk_*/file/*.tar.gz', lambda x: 'jdk.tar.gz'),
('*external/openjdk_*/file/*.zip', lambda x: 'jdk.zip'),
# --experimental_sibling_repository_layout=true
('*openjdk_*/file/*.tar.gz', lambda x: 'jdk.tar.gz'),
('*openjdk_*/file/*.zip', lambda x: 'jdk.zip'),
('*src/minimal_jdk.tar.gz', lambda x: 'jdk.tar.gz'),
('*src/minimal_jdk.zip', lambda x: 'jdk.zip'),
('*.bzl.tools', lambda x: x[:-6]),
('*', lambda x: re.sub(r'^.*bazel-out/[^/]*/bin/', '', x, count=1)),
]
def get_output_path(path):
for pattern, transformer in output_paths:
if fnmatch.fnmatch(path.replace('\\', '/'), pattern):
# BUILD.tools are stored as BUILD files.
return transformer(path).replace('/BUILD.tools', '/BUILD')
def get_input_files(argsfile):
"""Returns a dict of archive_file to input_file.
This describes the files that should be put into the generated archive.
Args:
argsfile: The file containing the list of input files.
Raises:
ValueError: When two input files map to the same output file.
"""
with open(argsfile, 'r') as f:
input_files = sorted(set(x.strip() for x in f.readlines()))
result = {}
for input_file in input_files:
# If we have both a BUILD and a BUILD.tools file, take the latter only.
if (os.path.basename(input_file) == 'BUILD' and
input_file + '.tools' in input_files):
continue
# It's an error to have two files map to the same output file, because the
# result is hard to predict and can easily be wrong.
output_path = get_output_path(input_file)
if output_path in result:
raise ValueError(
'Duplicate output file: Both {} and {} map to {}'.format(
result[output_path], input_file, output_path))
result[output_path] = input_file
return result
def copy_jdk_into_archive(output_zip, archive_file, input_file):
"""Extract the JDK and adds it to the archive under jdk/*."""
def _replace_dirname(filename):
# Rename the first folder to 'jdk', because Bazel looks for a
# bundled JDK in the embedded tools using that folder name.
return 'jdk/' + '/'.join(filename.split('/')[1:])
# The JDK is special - it's extracted instead of copied.
if archive_file.endswith('.tar.gz'):
copy_tar_to_zip(output_zip, input_file, _replace_dirname)
elif archive_file.endswith('.zip'):
copy_zip_to_zip(output_zip, input_file, _replace_dirname)
def main():
output_zip = os.path.join(os.getcwd(), sys.argv[1])
input_files = get_input_files(sys.argv[2])
# Copy all the input_files into output_zip.
# Adding contextlib.closing to be python 2.6 (for centos 6.7) compatible
with contextlib.closing(
zipfile.ZipFile(output_zip, 'w', zipfile.ZIP_DEFLATED)) as output_zip:
zipinfo = zipfile.ZipInfo('WORKSPACE', (1980, 1, 1, 0, 0, 0))
zipinfo.external_attr = 0o644 << 16
output_zip.writestr(zipinfo, 'workspace(name = "bazel_tools")\n')
# By sorting the file list, the resulting ZIP file will be reproducible and
# deterministic.
for archive_file, input_file in sorted(input_files.items()):
if os.path.basename(archive_file) in ('jdk.tar.gz', 'jdk.zip'):
copy_jdk_into_archive(output_zip, archive_file, input_file)
else:
zipinfo = zipfile.ZipInfo(archive_file, (1980, 1, 1, 0, 0, 0))
zipinfo.external_attr = 0o755 << 16 if is_executable(
input_file) else 0o644 << 16
zipinfo.compress_type = zipfile.ZIP_DEFLATED
with open(input_file, 'rb') as f:
output_zip.writestr(zipinfo, f.read())
if __name__ == '__main__':
main()
|
"""
Created on September, 2017
Restructured on April, 2020
@author: wangc
"""
import copy
import numpy as np
from numpy import linalg
import time
from .HybridModelBase import HybridModelBase
import Files
from utils import InputData, InputTypes, mathUtils
from utils import utils
from Runners import Error as rerror
class HybridModel(HybridModelBase):
"""
HybridModel Class. This class is aimed to automatically select the model to run among different models
"""
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for the class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for specifying input of cls.
"""
inputSpecification = super(HybridModel, cls).getInputSpecification()
romInput = InputData.parameterInputFactory("ROM", contentType=InputTypes.StringType)
romInput.addParam("class", InputTypes.StringType)
romInput.addParam("type", InputTypes.StringType)
inputSpecification.addSub(romInput)
targetEvaluationInput = InputData.parameterInputFactory("TargetEvaluation", contentType=InputTypes.StringType)
targetEvaluationInput.addParam("class", InputTypes.StringType)
targetEvaluationInput.addParam("type", InputTypes.StringType)
inputSpecification.addSub(targetEvaluationInput)
# add settings block
tolInput = InputData.parameterInputFactory("tolerance", contentType=InputTypes.FloatType)
maxTrainStepInput = InputData.parameterInputFactory("maxTrainSize", contentType=InputTypes.IntegerType)
initialTrainStepInput = InputData.parameterInputFactory("minInitialTrainSize", contentType=InputTypes.IntegerType)
settingsInput = InputData.parameterInputFactory("settings", contentType=InputTypes.StringType)
settingsInput.addSub(tolInput)
settingsInput.addSub(maxTrainStepInput)
settingsInput.addSub(initialTrainStepInput)
inputSpecification.addSub(settingsInput)
# add validationMethod block
threshold = InputData.parameterInputFactory("threshold", contentType=InputTypes.FloatType)
validationMethodInput = InputData.parameterInputFactory("validationMethod", contentType=InputTypes.StringType)
validationMethodInput.addParam("name", InputTypes.StringType)
validationMethodInput.addSub(threshold)
inputSpecification.addSub(validationMethodInput)
return inputSpecification
@classmethod
def specializeValidateDict(cls):
"""
This method describes the types of input accepted with a certain role by the model class specialization
@ In, None
@ Out, None
"""
pass
def __init__(self):
"""
Constructor
@ In, None
@ Out, None
"""
super().__init__()
self.modelInstance = None # instance of given model
self.targetEvaluationInstance = None # Instance of data object used to store the inputs and outputs of HybridModel
self.tempTargetEvaluation = None # Instance of data object that are used to store the training set
self.romsDictionary = {} # dictionary of models that is going to be employed, i.e. {'romName':Instance}
self.romTrainStartSize = 10 # the initial size of training set
self.romTrainMaxSize = 1.0e6 # the maximum size of training set
self.romValidateSize = 10 # the size of rom validation set
self.romTrained = False # True if all roms are trained
self.romConverged = False # True if all roms are converged
self.romValid = False # True if all roms are valid for given input data
self.romConvergence = 0.01 # The criterion used to check ROM convergence
self.validationMethod = {} # dict used to store the validation methods and their settings
self.existTrainSize = 0 # The size of existing training set in the provided data object via 'TargetEvaluation'
self.printTag = 'HYBRIDMODEL MODEL' # print tag
self.tempOutputs = {} # Indicators used to collect model inputs/outputs for rom training
self.oldTrainingSize = 0 # The size of training set that is previous used to train the rom
self.modelIndicator = {} # a dict i.e. {jobPrefix: 1 or 0} used to indicate the runs: model or rom. '1' indicates ROM run, and '0' indicates Code run
self.crowdingDistance = None
self.metricCategories = {'find_min':['explained_variance_score', 'r2_score'], 'find_max':['median_absolute_error', 'mean_squared_error', 'mean_absolute_error']}
# assembler objects to be requested
self.addAssemblerObject('ROM', InputData.Quantity.one_to_infinity)
self.addAssemblerObject('TargetEvaluation', InputData.Quantity.one)
def localInputAndChecks(self,xmlNode):
"""
Function to read the portion of the xml input that belongs to this specialized class
and initialize some stuff based on the inputs got
@ In, xmlNode, xml.etree.ElementTree.Element, Xml element node
@ Out, None
"""
HybridModelBase.localInputAndChecks(self, xmlNode)
paramInput = HybridModel.getInputSpecification()()
paramInput.parseNode(xmlNode)
for child in paramInput.subparts:
if child.getName() == 'TargetEvaluation':
self.targetEvaluationInstance = child.value.strip()
if child.getName() == 'ROM':
romName = child.value.strip()
self.romsDictionary[romName] = {'Instance': None, 'Converged': False, 'Valid': False}
if child.getName() == 'settings':
for childChild in child.subparts:
if childChild.getName() == 'maxTrainSize':
self.romTrainMaxSize = utils.intConversion(childChild.value)
if childChild.getName() == 'minInitialTrainSize':
self.romTrainStartSize = utils.intConversion(childChild.value)
if childChild.getName() == 'tolerance':
self.romConvergence = utils.floatConversion(childChild.value)
if child.getName() == 'validationMethod':
name = child.parameterValues['name']
self.validationMethod[name] = {}
for childChild in child.subparts:
if childChild.getName() == 'threshold':
self.validationMethod[name]['threshold'] = utils.floatConversion(childChild.value)
if name != 'CrowdingDistance':
self.raiseAnError(IOError, "Validation method ", name, " is not implemented yet!")
def initialize(self,runInfo,inputs,initDict=None):
"""
Method to initialize this model class
@ In, runInfo, dict, is the run info from the jobHandler
@ In, inputs, list, is a list containing whatever is passed with an input role in the step
@ In, initDict, dict, optional, dictionary of all objects available in the step is using this model
@ Out, None
"""
HybridModelBase.initialize(self,runInfo,inputs,initDict)
self.targetEvaluationInstance = self.retrieveObjectFromAssemblerDict('TargetEvaluation', self.targetEvaluationInstance)
if len(self.targetEvaluationInstance):
self.raiseAWarning("The provided TargetEvaluation data object is not empty, the existing data will also be used to train the ROMs!")
self.existTrainSize = len(self.targetEvaluationInstance)
self.tempTargetEvaluation = copy.deepcopy(self.targetEvaluationInstance)
if len(self.modelInstances) != 1:
self.raiseAnError(IOError, '"HybridModel" can only accept one "Model" XML subnode!',
'The following "Models" are provided "{}"'.format(','.join(list(self.modelInstances.keys()))))
self.modelInstance = list(self.modelInstances.values())[0]
if self.targetEvaluationInstance is None:
self.raiseAnError(IOError, 'TargetEvaluation XML block needs to be inputted!')
for romName, romInfo in self.romsDictionary.items():
romInfo['Instance'] = self.retrieveObjectFromAssemblerDict('ROM', romName)
if romInfo['Instance'] is None:
self.raiseAnError(IOError, 'ROM XML block needs to be inputted!')
modelInputs = self.targetEvaluationInstance.getVars("input")
modelOutputs = self.targetEvaluationInstance.getVars("output")
modelName = self.modelInstance.name
totalRomOutputs = []
for romInfo in self.romsDictionary.values():
romIn = romInfo['Instance']
if romIn.amITrained:
self.raiseAWarning("The provided rom ", romIn.name, " is already trained, we will reset it!")
romIn.reset()
romIn.initialize(runInfo, inputs, initDict)
romInputs = romIn.getInitParams()['Features']
romOutputs = romIn.getInitParams()['Target']
totalRomOutputs.extend(romOutputs)
unknownList = utils.checkIfUnknowElementsinList(modelInputs, romInputs)
if unknownList:
self.raiseAnError(IOError, 'Input Parameters: "', ','.join(str(e) for e in unknownList), '" used in ROM ', romIn.name, ' can not found in Model ', modelName)
unknownList = utils.checkIfUnknowElementsinList(romInputs, modelInputs)
if unknownList:
self.raiseAnError(IOError, 'Input Parameters: "', ','.join(str(e) for e in unknownList), '" used in Model ', modelName, ', but not used in ROM ', romIn.name)
unknownList = utils.checkIfUnknowElementsinList(modelOutputs, romOutputs)
if unknownList:
self.raiseAnError(IOError, 'Output Parameters: "', ','.join(str(e) for e in unknownList), '" used in ROM ', romIn.name, ' can not found in Model ', modelName)
if romIn.amITrained:
# Only untrained roms are allowed
self.raiseAnError(IOError,'HybridModel only accepts untrained ROM, but rom "', romIn.name, '" is already trained')
# check: we require that the union of ROMs outputs is the same as the paired model in order to use the ROM
# to replace the paired model.
if len(set(totalRomOutputs)) != len(totalRomOutputs):
dup = []
for elem in set(totalRomOutputs):
if totalRomOutputs.count(elem) > 1:
dup.append(elem)
# we assume there is no duplicate outputs among the roms
self.raiseAnError(IOError, 'The following outputs ', ','.join(str(e) for e in dup), "are found in the outputs of multiple roms!")
unknownList = utils.checkIfUnknowElementsinList(totalRomOutputs,modelOutputs)
if unknownList:
self.raiseAnError(IOError, "The following outputs: ", ','.join(str(e) for e in unknownList), " used in Model: ", modelName, "but not used in the paired ROMs.")
self.tempOutputs['uncollectedJobIds'] = []
def getInitParams(self):
"""
Method used to export to the printer in the base class the additional PERMANENT your local class have
@ In, None
@ Out, tempDict, dict, dictionary to be updated. {'attribute name':value}
"""
tempDict = HybridModelBase.getInitParams(self)
tempDict['ROMs contained in HybridModel are '] = self.romsDictionary.keys()
return tempDict
def getAdditionalInputEdits(self,inputInfo):
"""
Collects additional edits for the sampler to use when creating a new input. In this case, it calls all the getAdditionalInputEdits methods
of the sub-models
@ In, inputInfo, dict, dictionary in which to add edits
@ Out, None.
"""
HybridModelBase.getAdditionalInputEdits(self,inputInfo)
def __selectInputSubset(self,romName, kwargs):
"""
Method aimed to select the input subset for a certain model
@ In, romName, string, the rom name
@ In, kwargs , dict, the kwarded dictionary where the sampled vars are stored
@ Out, selectedKwargs , dict, the subset of variables (in a swallow copy of the kwargs dict)
"""
selectedKwargs = copy.copy(kwargs)
selectedKwargs['SampledVars'], selectedKwargs['SampledVarsPb'] = {}, {}
featsList = self.romsDictionary[romName]['Instance'].getInitParams()['Features']
selectedKwargs['SampledVars'] = {key: kwargs['SampledVars'][key] for key in featsList}
if 'SampledVarsPb' in kwargs.keys():
selectedKwargs['SampledVarsPb'] = {key: kwargs['SampledVarsPb'][key] for key in featsList}
else:
selectedKwargs['SampledVarsPb'] = {key: 1.0 for key in featsList}
return selectedKwargs
def createNewInput(self,myInput,samplerType,**kwargs):
"""
This function will return a new input to be submitted to the model, it is called by the sampler.
@ In, myInput, list, the inputs (list) to start from to generate the new one
@ In, samplerType, string, is the type of sampler that is calling to generate a new input
@ In, **kwargs, dict, is a dictionary that contains the information coming from the sampler,
a mandatory key is the sampledVars'that contains a dictionary {'name variable':value}
@ Out, newInputs, dict, dict that returns the new inputs for each sub-model
"""
self.raiseADebug("Create New Input")
useROM = kwargs['useROM']
if useROM:
identifier = kwargs['prefix']
newKwargs = {'prefix':identifier, 'useROM':useROM}
for romName in self.romsDictionary.keys():
newKwargs[romName] = self.__selectInputSubset(romName, kwargs)
newKwargs[romName]['prefix'] = romName+utils.returnIdSeparator()+identifier
newKwargs[romName]['uniqueHandler'] = self.name+identifier
else:
newKwargs = copy.deepcopy(kwargs)
if self.modelInstance.type == 'Code':
codeInput = []
romInput = []
for elem in myInput:
if isinstance(elem, Files.File):
codeInput.append(elem)
elif elem.type in ['PointSet', 'HistorySet']:
romInput.append(elem)
else:
self.raiseAnError(IOError, "The type of input ", elem.name, " can not be accepted!")
if useROM:
return (romInput, samplerType, newKwargs)
else:
return (codeInput, samplerType, newKwargs)
return (myInput, samplerType, newKwargs)
def trainRom(self, samplerType, kwargs):
"""
This function will train all ROMs if they are not converged
@ In, samplerType, string, the type of sampler
@ In, kwargs, dict, is a dictionary that contains the information coming from the sampler,
a mandatory key is the sampledVars'that contains a dictionary {'name variable':value}
@ Out, None
"""
self.raiseADebug("Start to train roms")
for romInfo in self.romsDictionary.values():
cvMetrics = romInfo['Instance'].convergence(self.tempTargetEvaluation)
if cvMetrics is not None:
converged = self.isRomConverged(cvMetrics)
romInfo['Converged'] = converged
if converged:
romInfo['Instance'].reset()
romInfo['Instance'].train(self.tempTargetEvaluation)
self.raiseADebug("ROM ", romInfo['Instance'].name, " is converged!")
else:
self.raiseAMessage("Minimum initial training size is met, but the training size is not enough to be used to perform cross validation")
self.oldTrainingSize = len(self.tempTargetEvaluation)
def isRomConverged(self, outputDict):
"""
This function will check the convergence of rom
@ In, outputDict, dict, dictionary contains the metric information
e.g. {targetName:{metricName:List of metric values}}, this dict is coming from results of cross validation
@ Out, converged, bool, True if the rom is converged
"""
converged = True
# very temporary solution
for romName, metricInfo in outputDict.items():
converged = self.checkErrors(metricInfo[0], metricInfo[1])
return converged
def checkErrors(self, metricType, metricResults):
"""
This function is used to compare the metric outputs with the tolerance for the rom convergence
@ In, metricType, list, the list of metrics
@ In, metricResults, list or dict
@ Out, converged, bool, True if the metric outputs are less than the tolerance
"""
if type(metricResults) == list or isinstance(metricResults,np.ndarray):
errorList = np.atleast_1d(metricResults)
elif type(metricResults) == dict:
errorList = np.atleast_1d(metricResults.values())
else:
self.raiseAnError(IOError, "The outputs generated by the cross validation '", self.cvInstance.name, "' can not be processed by HybridModel '", self.name, "'!")
converged = False
error = None
# we only allow to define one metric in the cross validation PP
for key, metricList in self.metricCategories.items():
if metricType[1] in metricList:
if key == 'find_min':
# use displacement from the optimum to indicate tolerance
error = 1.0 - np.amin(errorList)
elif key == 'find_max':
error = np.amax(errorList)
converged = True if error <= self.romConvergence else False
break
if error is None:
self.raiseAnError(IOError, "Metric %s used for cross validation can not be handled by the HybridModel." %metricType[1])
if not converged:
self.raiseADebug("The current error: ", str(error), " is not met with the given tolerance ", str(self.romConvergence))
else:
self.raiseADebug("The current error: ", str(error), " is met with the given tolerance ", str(self.romConvergence))
return converged
def checkRomConvergence(self):
"""
This function will check the convergence of all ROMs
@ In, None
@ Out, bool, True if all ROMs are converged
"""
converged = True
for romInfo in self.romsDictionary.values():
if not romInfo['Converged']:
converged = False
if converged:
self.raiseADebug("All ROMs are converged")
return converged
def checkRomValidity(self, kwargs):
"""
This function will check the validity of all roms
@ In, kwargs, dict, is a dictionary that contains the information coming from the sampler,
a mandatory key is the sampledVars'that contains a dictionary {'name variable':value}
@ Out, None
"""
allValid = False
for selectionMethod, params in self.validationMethod.items():
if selectionMethod == 'CrowdingDistance':
allValid = self.crowdingDistanceMethod(params, kwargs['SampledVars'])
else:
self.raiseAnError(IOError, "Unknown model selection method ", selectionMethod, " is given!")
if allValid:
self.raiseADebug("ROMs are all valid for given model ", self.modelInstance.name)
return allValid
def crowdingDistanceMethod(self, settingDict, varDict):
"""
This function will check the validity of all roms based on the crowding distance method
@ In, settingDict, dict, stores the setting information for the crowding distance method
@ In, varDict, dict, is a dictionary that contains the information coming from the sampler,
i.e. {'name variable':value}
@ Out, allValid, bool, True if the given sampled point is valid for all roms, otherwise False
"""
allValid = True
for romInfo in self.romsDictionary.values():
valid = False
# generate the data for input parameters
paramsList = romInfo['Instance'].getInitParams()['Features']
trainInput = self._extractInputs(romInfo['Instance'].trainingSet, paramsList)
currentInput = self._extractInputs(varDict, paramsList)
if self.crowdingDistance is None:
self.crowdingDistance = mathUtils.computeCrowdingDistance(trainInput)
sizeCD = len(self.crowdingDistance)
if sizeCD != trainInput.shape[1]:
self.crowdingDistance = self.updateCrowdingDistance(trainInput[:,0:sizeCD], trainInput[:,sizeCD:], self.crowdingDistance)
crowdingDistance = self.updateCrowdingDistance(trainInput, currentInput, self.crowdingDistance)
maxDist = np.amax(crowdingDistance)
minDist = np.amin(crowdingDistance)
if maxDist == minDist:
coeffCD = 1.0
else:
coeffCD = (maxDist - crowdingDistance[-1])/(maxDist - minDist)
self.raiseADebug("Crowding Distance Coefficient: ", coeffCD)
if coeffCD >= settingDict['threshold']:
valid = True
romInfo['Valid'] = valid
if valid:
self.raiseADebug("ROM ",romInfo['Instance'].name, " is valid")
else:
allValid = False
return allValid
def updateCrowdingDistance(self, oldSet, newSet, crowdingDistance):
"""
This function will compute the Crowding distance coefficients among the input parameters
@ In, oldSet, numpy.array, array contains values of input parameters that have been already used
@ In, newSet, numpy.array, array contains values of input parameters that will be used for computing the
@ In, crowdingDistance, numpy.array, the crowding distances for oldSet
@ Out, newCrowdingDistance, numpy.array, the updated crowding distances for both oldSet and newSet
"""
oldSize = oldSet.shape[1]
newSize = newSet.shape[1]
totSize = oldSize + newSize
if oldSize != crowdingDistance.size:
self.raiseAnError(IOError, "The old crowding distances is not match the old data set!")
newCrowdingDistance = np.zeros(totSize)
distMatAppend = np.zeros((oldSize,newSize))
for i in range(oldSize):
for j in range(newSize):
distMatAppend[i,j] = linalg.norm(oldSet[:,i] - newSet[:,j])
distMatNew = mathUtils.computeCrowdingDistance(newSet)
for i in range(oldSize):
newCrowdingDistance[i] = crowdingDistance[i] + np.sum(distMatAppend[i,:])
for i in range(newSize):
newCrowdingDistance[i+oldSize] = distMatNew[i] + np.sum(distMatAppend[:,i])
return newCrowdingDistance
def amIReadyToTrainROM(self):
"""
This will check the status of training data object, if the data object is updated,
This function will return true
@ In, None
@ Out, ready, bool, is this HybridModel ready to retrain the ROM?
"""
ready = False
newGeneratedTrainingSize = len(self.tempTargetEvaluation) - self.existTrainSize
if newGeneratedTrainingSize > self.romTrainMaxSize:
self.raiseAMessage("Maximum training size is reached, ROMs will not be trained anymore!")
return ready
trainingStepSize = len(self.tempTargetEvaluation) - self.oldTrainingSize
if newGeneratedTrainingSize >= self.romTrainStartSize and trainingStepSize > 0:
ready = True
return ready
def submit(self,myInput,samplerType,jobHandler,**kwargs):
"""
This will submit an individual sample to be evaluated by this model to a
specified jobHandler as a client job. Note, some parameters are needed
by createNewInput and thus descriptions are copied from there.
@ In, myInput, list, the inputs (list) to start from to generate the new
one
@ In, samplerType, string, is the type of sampler that is calling to
generate a new input
@ In, jobHandler, JobHandler instance, the global job handler instance
@ In, **kwargs, dict, is a dictionary that contains the information
coming from the sampler, a mandatory key is the sampledVars' that
contains a dictionary {'name variable':value}
@ Out, None
"""
prefix = kwargs['prefix']
self.counter = prefix
self.tempOutputs['uncollectedJobIds'].append(prefix)
if self.amIReadyToTrainROM():
self.trainRom(samplerType, kwargs)
self.romConverged = self.checkRomConvergence()
if self.romConverged:
self.romValid = self.checkRomValidity(kwargs)
else:
self.romValid = False
if self.romValid:
self.modelIndicator[prefix] = 1
else:
self.modelIndicator[prefix] = 0
kwargs['useROM'] = self.romValid
self.raiseADebug("Submit job with job identifier: {}, Runing ROM: {} ".format(kwargs['prefix'], self.romValid))
HybridModelBase.submit(self,myInput,samplerType,jobHandler,**kwargs)
def _externalRun(self,inRun, jobHandler):
"""
Method that performs the actual run of the hybrid model (separated from run method for parallelization purposes)
@ In, inRun, tuple, tuple of Inputs (inRun[0] actual input, inRun[1] type of sampler,
inRun[2] dictionary that contains information coming from sampler)
@ In, jobHandler, instance, instance of jobHandler
@ Out, exportDict, dict, dict of results from this hybrid model
"""
self.raiseADebug("External Run")
originalInput = inRun[0]
samplerType = inRun[1]
inputKwargs = inRun[2]
identifier = inputKwargs.pop('prefix')
useROM = inputKwargs.pop('useROM')
uniqueHandler = self.name + identifier
if useROM:
# run roms
exportDict = {}
self.raiseADebug("Switch to ROMs")
# submit all the roms
for romName, romInfo in self.romsDictionary.items():
inputKwargs[romName]['prefix'] = romName+utils.returnIdSeparator()+identifier
nextRom = False
while not nextRom:
if jobHandler.availability() > 0:
romInfo['Instance'].submit(originalInput, samplerType, jobHandler, **inputKwargs[romName])
self.raiseADebug("Job ", romName, " with identifier ", identifier, " is submitted")
nextRom = True
else:
time.sleep(self.sleepTime)
# collect the outputs from the runs of ROMs
while True:
finishedJobs = jobHandler.getFinished(uniqueHandler=uniqueHandler)
for finishedRun in finishedJobs:
self.raiseADebug("collect job with identifier ", identifier)
evaluation = finishedRun.getEvaluation()
if isinstance(evaluation, rerror):
self.raiseAnError(RuntimeError, "The job identified by "+finishedRun.identifier+" failed!")
# collect output in temporary data object
tempExportDict = evaluation
exportDict = self._mergeDict(exportDict, tempExportDict)
if jobHandler.areTheseJobsFinished(uniqueHandler=uniqueHandler):
self.raiseADebug("Jobs with uniqueHandler ", uniqueHandler, "are collected!")
break
time.sleep(self.sleepTime)
exportDict['prefix'] = identifier
else:
# run model
inputKwargs['prefix'] = self.modelInstance.name+utils.returnIdSeparator()+identifier
inputKwargs['uniqueHandler'] = self.name + identifier
moveOn = False
while not moveOn:
if jobHandler.availability() > 0:
self.modelInstance.submit(originalInput, samplerType, jobHandler, **inputKwargs)
self.raiseADebug("Job submitted for model ", self.modelInstance.name, " with identifier ", identifier)
moveOn = True
else:
time.sleep(self.sleepTime)
while not jobHandler.isThisJobFinished(self.modelInstance.name+utils.returnIdSeparator()+identifier):
time.sleep(self.sleepTime)
self.raiseADebug("Job finished ", self.modelInstance.name, " with identifier ", identifier)
finishedRun = jobHandler.getFinished(jobIdentifier = inputKwargs['prefix'], uniqueHandler = uniqueHandler)
evaluation = finishedRun[0].getEvaluation()
if isinstance(evaluation, rerror):
self.raiseAnError(RuntimeError, "The model "+self.modelInstance.name+" identified by "+finishedRun[0].identifier+" failed!")
# collect output in temporary data object
exportDict = evaluation
self.raiseADebug("Create exportDict")
# used in the collectOutput
exportDict['useROM'] = useROM
return exportDict
def collectOutput(self,finishedJob,output):
"""
Method that collects the outputs from the previous run
@ In, finishedJob, ClientRunner object, instance of the run just finished
@ In, output, "DataObjects" object, output where the results of the calculation needs to be stored
@ Out, None
"""
evaluation = finishedJob.getEvaluation()
useROM = evaluation['useROM']
try:
jobIndex = self.tempOutputs['uncollectedJobIds'].index(finishedJob.identifier)
self.tempOutputs['uncollectedJobIds'].pop(jobIndex)
except ValueError:
jobIndex = None
if jobIndex is not None and not useROM:
self.tempTargetEvaluation.addRealization(evaluation)
self.raiseADebug("ROM is invalid, collect ouptuts of Model with job identifier: {}".format(finishedJob.identifier))
HybridModelBase.collectOutput(self, finishedJob, output)
|
import _random
import unittest
from iptest import run_test
class _RandomTest(unittest.TestCase):
def test_getrandbits(self):
#the argument is a random int value
rand = _random.Random()
for i1 in xrange(1, 1984, 6):
self.assertTrue(rand.getrandbits(i1) < (2**i1))
self.assertTrue(rand.getrandbits(i1) < (2**i1))
self.assertTrue(rand.getrandbits(i1+1) < (2**(i1+1)))
self.assertTrue(rand.getrandbits(i1+1) < (2**(i1+1)))
temp_list = [ 63, #maxvalue
32, #bits less than 32
50, #bits greater than 32 and less than 64
100 #bits greater than 64
]
for x in temp_list:
self.assertTrue(rand.getrandbits(x) < (2**x))
rand = _random.Random()
self.assertRaises(ValueError, rand.getrandbits, 0)
self.assertRaises(ValueError, rand.getrandbits, -50)
# might raise OverflowError, might not, but shouldn't raise anything else.
try:
rand.getrandbits(2147483647)
except OverflowError:
pass
def test_jumpahead(self):
rand = _random.Random()
old_state = rand.getstate()
rand.jumpahead(100)
self.assertTrue(old_state != rand.getstate())
def test_random(self):
rand = _random.Random()
result = rand.random()
flag = result<1.0 and result >= 0.0
self.assertTrue(flag,
"Result is not the value as expected,expected the result between 0.0 to 1.0,but the actual is not")
def test_setstate(self):
# state is object which
random = _random.Random()
state1 = random.getstate()
random.setstate(state1)
state2 = random.getstate()
self.assertEqual(state1,state2)
random.jumpahead(1)
self.assertTrue(state1 != random.getstate())
random.setstate(state1)
self.assertEqual(state1, random.getstate())
#state is a int object
a = 1
self.assertRaises(Exception,random.setstate,a)
#state is a string object
b = "stete"
self.assertRaises(Exception,random.setstate,b)
#state is a random object
c = _random.Random()
self.assertRaises(Exception,random.setstate,c)
def test_getstate(self):
random = _random.Random()
a = random.getstate()
self.assertEqual(a, random.getstate())
i = 2
random = _random.Random(i)
b = random.getstate()
self.assertEqual(b, random.getstate())
str = "state"
random = _random.Random(str)
c = random.getstate()
self.assertEqual(c, random.getstate())
def test_seed(self):
i= 2
random = _random.Random(i)
a = random.getstate()
# parameter is None
random.seed()
b =random.getstate()
if a == b:
self.fail("seed() method can't change the current internal state of the generator.")
# parameter is int
x = 1
random.seed(x)
c = random.getstate()
if b == c or a == c:
self.fail("seed(x) method can't change the current internal state of the generator when x is \
int type.")
# parameter is string
x = "seed"
random.seed(x)
d = random.getstate()
if d==c or b==d or a==d:
self.fail("seed(x) method can't change the current internal state of the generator when x is \
string type.")
run_test(__name__)
|
'''Unit tests for the 'grit build' tool.
'''
import os
import sys
import tempfile
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
from grit import util
from grit.tool import build
class BuildUnittest(unittest.TestCase):
def testFindTranslationsWithSubstitutions(self):
# This is a regression test; we had a bug where GRIT would fail to find
# messages with substitutions e.g. "Hello [IDS_USER]" where IDS_USER is
# another <message>.
output_dir = tempfile.mkdtemp()
builder = build.RcBuilder()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/substitute.grd')
self.verbose = False
self.extra_verbose = False
builder.Run(DummyOpts(), ['-o', output_dir])
def testGenerateDepFile(self):
output_dir = tempfile.mkdtemp()
builder = build.RcBuilder()
class DummyOpts(object):
def __init__(self):
self.input = util.PathFromRoot('grit/testdata/substitute.grd')
self.verbose = False
self.extra_verbose = False
expected_dep_file = os.path.join(output_dir, 'substitute.grd.d')
builder.Run(DummyOpts(), ['-o', output_dir,
'--depdir', output_dir,
'--depfile', expected_dep_file])
self.failUnless(os.path.isfile(expected_dep_file))
with open(expected_dep_file) as f:
line = f.readline()
(dep_file_name, deps_string) = line.split(': ')
deps = deps_string.split(' ')
self.failUnlessEqual(os.path.abspath(expected_dep_file),
os.path.abspath(os.path.join(output_dir, dep_file_name)),
"depfile should refer to itself as the depended upon file")
self.failUnlessEqual(1, len(deps))
self.failUnlessEqual(deps[0],
util.PathFromRoot('grit/testdata/substitute.xmb'))
if __name__ == '__main__':
unittest.main()
|
"""
App that can be used to generate errors on the Python and JS side. These
errors should show tracebacks in the correct manner (and not crash the app
as in #164).
"""
from flexx import app, event, ui
class ErrorsPy(app.PyComponent):
def init(self):
self.js = ErrorsJS(self)
@event.action
def do_something_stupid(self):
self.raise_error()
def raise_error(self):
raise RuntimeError('Deliberate error')
@event.reaction('!js.b4_pointer_click')
def error_in_Py_reaction(self, *events):
self.raise_error()
class ErrorsJS(ui.Widget):
def init(self, pycomponent):
self.py = pycomponent
with ui.VBox():
self.b1 = ui.Button(text='Raise error in JS action')
self.b2 = ui.Button(text='Raise error in JS reaction')
self.b3 = ui.Button(text='Raise error in Python action')
self.b4 = ui.Button(text='Raise error in Python reaction')
ui.Widget(flex=1) # spacer
@event.action
def do_something_stupid(self):
self.raise_error(0)
def raise_error(self):
raise RuntimeError('Deliberate error')
# Handlers for four buttons
@event.reaction('b1.pointer_click')
def error_in_JS_action(self, *events):
self.do_something_stupid()
@event.reaction('b2.pointer_click')
def error_in_JS_reaction(self, *events):
self.raise_error()
@event.reaction('b3.pointer_click')
def error_in_Py_action(self, *events):
self.py.do_something_stupid()
@event.reaction('b4.pointer_click')
def error_in_Py_reaction(self, *events):
self.emit('b4_pointer_click')
if __name__ == '__main__':
m = app.launch(ErrorsPy, 'browser')
app.run()
|
import os
os.environ['REDASH_REDIS_URL'] = "redis://localhost:6379/5"
os.environ['REDASH_CELERY_BROKER'] = "redis://localhost:6379/6"
os.environ['REDASH_GOOGLE_CLIENT_ID'] = "dummy"
os.environ['REDASH_GOOGLE_CLIENT_SECRET'] = "dummy"
os.environ['REDASH_MULTI_ORG'] = "true"
import logging
from unittest import TestCase
import datetime
from redash import settings
from factories import Factory
settings.DATABASE_CONFIG = {
'name': 'circle_test',
'threadlocals': True
}
from redash import redis_connection
import redash.models
from tests.handlers import make_request
logging.disable("INFO")
logging.getLogger("metrics").setLevel("ERROR")
logging.getLogger('peewee').setLevel(logging.INFO)
class BaseTestCase(TestCase):
def setUp(self):
redash.models.create_db(True, True)
self.factory = Factory()
def tearDown(self):
redash.models.db.close_db(None)
redash.models.create_db(False, True)
redis_connection.flushdb()
def make_request(self, method, path, org=None, user=None, data=None, is_json=True):
if user is None:
user = self.factory.user
if org is None:
org = self.factory.org
if org is not False:
path = "/{}{}".format(org.slug, path)
return make_request(method, path, user, data, is_json)
def assertResponseEqual(self, expected, actual):
for k, v in expected.iteritems():
if isinstance(v, datetime.datetime) or isinstance(actual[k], datetime.datetime):
continue
if isinstance(v, list):
continue
if isinstance(v, dict):
self.assertResponseEqual(v, actual[k])
continue
self.assertEqual(v, actual[k], "{} not equal (expected: {}, actual: {}).".format(k, v, actual[k]))
|
import functools
import json
from django.conf import settings
from django.db import transaction
import commonware.log
import happyforms
from piston.handler import AnonymousBaseHandler, BaseHandler
from piston.utils import rc
from tower import ugettext as _
import waffle
import amo
from access import acl
from addons.forms import AddonForm
from addons.models import Addon, AddonUser
from amo.utils import paginate
from devhub.forms import LicenseForm, NewManifestForm
from devhub import tasks
from files.models import FileUpload, Platform
from users.models import UserProfile
from versions.forms import XPIForm
from versions.models import Version, ApplicationsVersions
from mkt.webapps.models import Webapp
log = commonware.log.getLogger('z.api')
def check_addon_and_version(f):
"""
Decorator that checks that an addon, and version exist and belong to the
request user.
"""
@functools.wraps(f)
def wrapper(*args, **kw):
request = args[1]
addon_id = kw['addon_id']
try:
addon = Addon.objects.id_or_slug(addon_id).get()
except:
return rc.NOT_HERE
if not acl.check_addon_ownership(request, addon, viewer=True):
return rc.FORBIDDEN
if 'version_id' in kw:
try:
version = Version.objects.get(addon=addon, pk=kw['version_id'])
except Version.DoesNotExist:
return rc.NOT_HERE
return f(*args, addon=addon, version=version)
else:
return f(*args, addon=addon)
return wrapper
def _form_error(f):
resp = rc.BAD_REQUEST
error = ','.join(['%s (%s)' % (v[0], k) for k, v in f.errors.iteritems()])
resp.write(': ' +
# L10n: {0} is comma separated data errors.
_(u'Invalid data provided: {0}').format(error))
log.debug(error)
return resp
def _xpi_form_error(f, request):
resp = rc.BAD_REQUEST
error = ','.join([e[0] for e in f.errors.values()])
resp.write(': ' + _('Add-on did not validate: %s') % error)
log.debug('Add-on did not validate (%s) for %s'
% (error, request.amo_user))
return resp
class UserHandler(BaseHandler):
allowed_methods = ('GET',)
model = UserProfile
fields = ('email', 'id', 'username', 'display_name', 'homepage',
'created', 'modified', 'location', 'occupation')
def read(self, request):
email = request.GET.get('email')
if email:
if acl.action_allowed(request, 'API.Users', 'View'):
try:
return UserProfile.objects.get(email=email, deleted=False)
except UserProfile.DoesNotExist:
return rc.NOT_FOUND
else:
return rc.FORBIDDEN
return request.amo_user
class AddonsHandler(BaseHandler):
allowed_methods = ('GET', 'POST', 'PUT', 'DELETE')
model = Addon
fields = ('id', 'name', 'eula', 'guid', 'status', 'slug')
exclude = ('highest_status', 'icon_type')
# Custom handler so translated text doesn't look weird
@classmethod
def name(cls, addon):
return addon.name.localized_string if addon.name else ''
# We need multiple validation, so don't use @validate decorators.
@transaction.commit_on_success
def create(self, request):
new_file_form = XPIForm(request, request.POST, request.FILES)
if not new_file_form.is_valid():
return _xpi_form_error(new_file_form, request)
# License can be optional.
license = None
if 'builtin' in request.POST:
license_form = LicenseForm(request.POST)
if not license_form.is_valid():
return _form_error(license_form)
license = license_form.save()
addon = new_file_form.create_addon(license=license)
if not license:
# If there is no license, we push you to step
# 5 so that you can pick one.
addon.submitstep_set.create(step=5)
return addon
@check_addon_and_version
def update(self, request, addon):
form = AddonForm(request.PUT, instance=addon)
if not form.is_valid():
return _form_error(form)
a = form.save()
return a
@check_addon_and_version
def delete(self, request, addon):
addon.delete(msg='Deleted via API')
return rc.DELETED
def read(self, request, addon_id=None):
"""
Returns authors who can update an addon (not Viewer role) for addons
that have not been admin disabled. Optionally provide an addon id.
"""
if not request.user.is_authenticated():
return rc.BAD_REQUEST
ids = (AddonUser.objects.values_list('addon_id', flat=True)
.filter(user=request.amo_user,
role__in=[amo.AUTHOR_ROLE_DEV,
amo.AUTHOR_ROLE_OWNER]))
qs = (Addon.objects.filter(id__in=ids)
.exclude(status=amo.STATUS_DISABLED)
.no_transforms())
if addon_id:
try:
return qs.get(id=addon_id)
except Addon.DoesNotExist:
rc.NOT_HERE
paginator = paginate(request, qs)
return {'objects': paginator.object_list,
'num_pages': paginator.paginator.num_pages,
'count': paginator.paginator.count}
class AppsHandler(AddonsHandler):
allowed_methods = ('GET', 'POST')
model = Webapp
fields = ('id', 'name', 'manifest_url', 'status', 'app_slug')
exclude = ('highest_status', 'icon_type')
@transaction.commit_on_success
def create(self, request):
form = NewManifestForm(request.POST)
if form.is_valid():
# This feels like an awful lot of work.
# But first upload the file and do the validation.
upload = FileUpload.objects.create()
tasks.fetch_manifest(form.cleaned_data['manifest'], upload.pk)
# We must reget the object here since the above has
# saved changes to the object.
upload = FileUpload.objects.get(pk=upload.pk)
# Check it validated correctly.
if settings.VALIDATE_ADDONS:
validation = json.loads(upload.validation)
if validation['errors']:
response = rc.BAD_REQUEST
response.write(validation)
return response
# Fetch the addon, the icon and set the user.
addon = Addon.from_upload(upload,
[Platform.objects.get(id=amo.PLATFORM_ALL.id)])
if addon.has_icon_in_manifest():
tasks.fetch_icon(addon)
AddonUser(addon=addon, user=request.amo_user).save()
addon.update(status=amo.WEBAPPS_UNREVIEWED_STATUS)
else:
return _form_error(form)
return addon
class ApplicationsVersionsHandler(AnonymousBaseHandler):
model = ApplicationsVersions
allowed_methods = ('GET', )
fields = ('application', 'max', 'min')
@classmethod
def application(cls, av):
return unicode(av.application)
@classmethod
def max(cls, av):
return av.max.version
@classmethod
def min(cls, av):
return av.min.version
class BaseVersionHandler(object):
# Custom handler so translated text doesn't look weird
@classmethod
def release_notes(cls, version):
if version.releasenotes:
return version.releasenotes.localized_string
@classmethod
def license(cls, version):
if version.license:
return unicode(version.license)
@classmethod
def current(cls, version):
return (version.id == version.addon._current_version_id)
class AnonymousVersionsHandler(AnonymousBaseHandler, BaseVersionHandler):
model = Version
allowed_methods = ('GET',)
fields = ('id', 'addon_id', 'created', 'release_notes', 'version',
'license', 'current', 'apps')
def read(self, request, addon_id, version_id=None):
if version_id:
try:
return Version.objects.get(pk=version_id)
except:
return rc.NOT_HERE
try:
addon = Addon.objects.id_or_slug(addon_id).get()
except:
return rc.NOT_HERE
return addon.versions.all()
class VersionsHandler(BaseHandler, BaseVersionHandler):
allowed_methods = ('POST', 'PUT', 'DELETE', 'GET')
model = Version
fields = AnonymousVersionsHandler.fields + ('statuses',)
exclude = ('approvalnotes', )
anonymous = AnonymousVersionsHandler
@check_addon_and_version
def create(self, request, addon):
new_file_form = XPIForm(request, request.POST, request.FILES,
addon=addon)
if not new_file_form.is_valid():
return _xpi_form_error(new_file_form, request)
license = None
if 'builtin' in request.POST:
license_form = LicenseForm(request.POST)
if not license_form.is_valid():
return _form_error(license_form)
license = license_form.save()
v = new_file_form.create_version(license=license)
return v
@check_addon_and_version
def update(self, request, addon, version):
new_file_form = XPIForm(request, request.PUT, request.FILES,
version=version)
if not new_file_form.is_valid():
return _xpi_form_error(new_file_form, request)
license = None
if 'builtin' in request.POST:
license_form = LicenseForm(request.POST)
if not license_form.is_valid():
return _form_error(license_form)
license = license_form.save()
v = new_file_form.update_version(license)
return v
@check_addon_and_version
def delete(self, request, addon, version):
version.delete()
return rc.DELETED
@check_addon_and_version
def read(self, request, addon, version=None):
return version if version else addon.versions.all()
class AMOBaseHandler(BaseHandler):
"""
A generic Base Handler that automates create, delete, read and update.
For list, we use a pagination handler rather than just returning all.
For list, if an id is given, only one object is returned.
For delete and update the id of the record is required.
"""
def get_form(self, *args, **kw):
class Form(happyforms.ModelForm):
class Meta:
model = self.model
return Form(*args, **kw)
def delete(self, request, id):
try:
return self.model.objects.get(pk=id).delete()
except self.model.DoesNotExist:
return rc.NOT_HERE
def create(self, request):
form = self.get_form(request.POST)
if form.is_valid():
return form.save()
return _form_error(form)
def read(self, request, id=None):
if id:
try:
return self.model.objects.get(pk=id)
except self.model.DoesNotExist:
return rc.NOT_HERE
else:
paginator = paginate(request, self.model.objects.all())
return {'objects': paginator.object_list,
'num_pages': paginator.paginator.num_pages,
'count': paginator.paginator.count}
def update(self, request, id):
try:
obj = self.model.objects.get(pk=id)
except self.model.DoesNotExist:
return rc.NOT_HERE
form = self.get_form(request.POST, instance=obj)
if form.is_valid():
form.save()
return rc.ALL_OK
return _form_error(form)
|
import textwrap
import sys
import os
import matplotlib.pyplot as plt
lib_path = os.path.abspath(r'E:\Tamuz\Utils\RobotQAUtils')
sys.path.append(lib_path)
from Utils.RobotQAUtils.plateReader import *
from Utils.RobotQAUtils.classes import *
width = 0.35
def printRobotDeviationPercents(robotDeviationPercentTuppleUp,robotDeviationPercentTuppleDown,title = None):
width = 0.35
fig = plt.figure()
ax = fig.add_subplot(111)
ax1 = fig.add_subplot(111)
rowLabels =plateReader.createRowLabels(len(robotDeviationPercentTuppleUp),'exp')
rowLabels=[textwrap.fill(text,15) for text in rowLabels]
plt.xticks(range(len(robotDeviationPercentTuppleUp)),rowLabels)
rects1 = ax.bar(range(len(robotDeviationPercentTuppleUp)), robotDeviationPercentTuppleUp, width, color='y',align='center')
rects2 = ax.bar([x+width for x in range(len(robotDeviationPercentTuppleDown))], robotDeviationPercentTuppleDown, width, color='r',align='center')
ax.set_ylabel('percent')
ax.set_xlabel('expiriments')
if not title:
ax.set_title('deviation percent')
else:
ax.set_title(title)
ax.legend( (rects1[0], rects2[0]), ('up', 'down') )
plt.show()
def printRobotDeviationPercentsLiquidClass3D(robotDeviationPercentTuppleUp,robotDeviationPercentTuppleDown,title = None ,ds=None,legendLabels = None):
'''
X: prints the expiriment number
Y: prints the liquid class name
Z: prints the average of the deviation percents above robot mean and below robot mean. prints the volume as well'''
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
liquidClasses = plateReader.getExpLiquidClasses(ds)
#remove duplicates from liquid classes
newLiquidClasses = []
for c in liquidClasses:
if c not in newLiquidClasses:
newLiquidClasses.append(c)
liquidClasses = newLiquidClasses
rowLabels =plateReader.createRowLabels(len(robotDeviationPercentTuppleUp),'')
rowLabels=[textwrap.fill(text,15) for text in rowLabels]
plt.xticks(range(len(robotDeviationPercentTuppleUp)),rowLabels)
yLabels = []
for idx,l in enumerate(liquidClasses):# iterating through all liquid classes
#creating labels for y axis
xsUp = range(len(robotDeviationPercentTuppleUp))
xsDown = [x+width for x in range(len(robotDeviationPercentTuppleDown))]
xsVol = [x+width+0.2 for x in range(len(robotDeviationPercentTuppleDown))]
ysUp = []
ysDown = []
ysVol = []
for i, d in enumerate(ds):#iterating through all expiriments to see which has the same liquid class as l.
if l == d.getLiquidClass():
ysUp.append(robotDeviationPercentTuppleUp[i])
ysDown.append(robotDeviationPercentTuppleDown[i])
ysVol.append(d.getManualColorVolume())#appending the volume of the liquid class to shows in the bar
else:
ysUp.append(0)
ysDown.append(0)
ysVol.append(0)
csUp = ['r'] * len(ysUp)
csDown = ['y'] * len(ysDown)
csVol = ['b']*len(ysVol)
upBar = ax.bar(xsUp, ysUp, zs=idx, zdir='y', color=csUp,width=width,align='center')
downBar = ax.bar(xsDown, ysDown, zs=idx, zdir='y', color=csDown,width=width,align='center')
volumeBar = ax.bar(xsVol, ysVol, zs=idx, zdir='y', color=csVol,width=0.1,align='center')
ax.set_xlabel('X - expiriment number')
ax.set_ylabel('Y - liquid class')
ax.set_zlabel('Z - deviation + volume')
if not title:
ax.set_title('deviation percent')
else:
ax.set_title(title)
red_proxy = plt.Rectangle((0, 0), 1, 1, fc="r")
yellow_proxy = plt.Rectangle((0, 0), 1, 1, fc="y")
blue_proxy = plt.Rectangle((0, 0), 1, 1, fc="b")
if legendLabels is None:
ax.legend([red_proxy,yellow_proxy,blue_proxy],['deviation percent above average','deviation percent below average','volume of color in exp'])
else:
ax.legend([red_proxy,yellow_proxy,blue_proxy],legendLabels)
yLabels=[textwrap.fill(text,15) for text in liquidClasses]
plt.yticks(range(len(yLabels)),yLabels)
def printRobotCompareToManualDeviationPercentsLiquidClass3D(robotDeviationPercentTupple,manualDeviationPercentTupple,title = None ,ds=None , legendRectangles = None,legendLabels = None):
'''
gets a tupple of robot deviation percents average, manual deviation percents average and a list of dilutionStatistics at the same order of the tupples.
X: prints the expiriment number
Y: prints the liquid class name
Z: prints the average of the deviation percents of the robot and the deviation percents of the hand. prints the volume as well'''
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
liquidClasses = plateReader.getExpLiquidClasses(ds)
#remove duplicates from liquid classes
newLiquidClasses = []
for c in liquidClasses:
if c not in newLiquidClasses:
newLiquidClasses.append(c)
liquidClasses = newLiquidClasses
rowLabels =plateReader.createRowLabels(len(robotDeviationPercentTuppleUp),'')
rowLabels=[textwrap.fill(text,15) for text in rowLabels]
plt.xticks(range(len(robotDeviationPercentTuppleUp)),rowLabels)
yLabels = []
for idx,l in enumerate(liquidClasses):# iterating through all liquid classes
#creating labels for y axis
xsUp = range(len(robotDeviationPercentTuppleUp))
xsDown = [x+width for x in range(len(robotDeviationPercentTuppleDown))]
xsVol = [x+width+0.2 for x in range(len(robotDeviationPercentTuppleDown))]
ysUp = []
ysDown = []
ysVol = []
for i, d in enumerate(ds):#iterating through all expiriments to see which has the same liquid class as l.
if l == d.getLiquidClass():
ysUp.append(robotDeviationPercentTuppleUp[i])
ysDown.append(robotDeviationPercentTuppleDown[i])
ysVol.append(d.getManualColorVolume())#appending the volume of the liquid class to shows in the bar
else:
ysUp.append(0)
ysDown.append(0)
ysVol.append(0)
csUp = ['r'] * len(ysUp)
csDown = ['y'] * len(ysDown)
csVol = ['b']*len(ysVol)
upBar = ax.bar(xsUp, ysUp, zs=idx, zdir='y', color=csUp,width=width,align='center')
downBar = ax.bar(xsDown, ysDown, zs=idx, zdir='y', color=csDown,width=width,align='center')
volumeBar = ax.bar(xsVol, ysVol, zs=idx, zdir='y', color=csVol,width=0.1,align='center')
ax.set_xlabel('X - expiriment number')
ax.set_ylabel('Y - liquid class')
ax.set_zlabel('Z - deviation + volume')
if not title:
ax.set_title('deviation percent')
else:
ax.set_title(title)
red_proxy = plt.Rectangle((0, 0), 1, 1, fc="r")
yellow_proxy = plt.Rectangle((0, 0), 1, 1, fc="y")
blue_proxy = plt.Rectangle((0, 0), 1, 1, fc="b")
if legendLabels is None:
ax.legend([red_proxy,yellow_proxy,blue_proxy],['deviation percent above average','deviation percent below average','volume of color in exp'])
else:
ax.legend([red_proxy,yellow_proxy,blue_proxy],legendLabels)
yLabels=[textwrap.fill(text,15) for text in liquidClasses]
plt.yticks(range(len(yLabels)),yLabels)
def printRobotDeviationPercentsVolume3D(robotDeviationPercentTuppleUp,robotDeviationPercentTuppleDown,title = None ,ds=None):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
rowLabels =plateReader.createRowLabels(len(robotDeviationPercentTuppleUp),'')
rowLabels=[textwrap.fill(text,15) for text in rowLabels]
plt.xticks(range(len(robotDeviationPercentTuppleUp)),rowLabels)
colorVolumes = plateReader.getExpVolumes(ds)
for idx,vol in enumerate(colorVolumes):
xsUp = range(len(robotDeviationPercentTuppleUp))
xsDown = [x+width for x in range(len(robotDeviationPercentTuppleDown))]
ysUp = []
ysDown = []
for i, d in enumerate(ds):
if vol == d.getManualColorVolume():
ysUp.append(robotDeviationPercentTuppleUp[i])
ysDown.append(robotDeviationPercentTuppleDown[i])
else:
ysUp.append(0)
ysDown.append(0)
csUp = ['r'] * len(ysUp)
csDown = ['y'] * len(ysDown)
upBar = ax.bar(xsUp, ysUp, zs=vol, zdir='y', color=csUp,width=width,align='center')
downBar = ax.bar(xsDown, ysDown, zs=vol, zdir='y', color=csDown,width=width,align='center')
ax.set_xlabel('X - expiriment number')
ax.set_ylabel('Y - volume')
ax.set_zlabel('Z - deviation')
if not title:
ax.set_title('deviation percent')
else:
ax.set_title(title)
red_proxy = plt.Rectangle((0, 0), 1, 1, fc="r")
yellow_proxy = plt.Rectangle((0, 0), 1, 1, fc="y")
blue_proxy = plt.Rectangle((0, 0), 1, 1, fc="b")
ax.legend([red_proxy,yellow_proxy],['deviation percent above average','deviation percent below average'])
|
from __future__ import with_statement
from fabric.api import *
from fabric.contrib.console import confirm
env.hosts = ["merchant.agiliq.com"]
env.user = "agiliq"
def describe():
print "This is a fab file to automate deployments for the merchant server."
def deploy():
with cd("/home/agiliq/Work/merchant"):
run("git pull")
with cd("/home/agiliq/Work/merchant/example"):
run("python manage.py validate")
run("python manage.py syncdb")
# run("merchant-restart")
run('/home/agiliq/scripts/restart_merchant.sh')
|
import pickle
import re
from traceback import format_exception
import pytest
from jinja2 import ChoiceLoader
from jinja2 import DictLoader
from jinja2 import Environment
from jinja2 import TemplateSyntaxError
@pytest.fixture
def fs_env(filesystem_loader):
"""returns a new environment."""
return Environment(loader=filesystem_loader)
class TestDebug:
def assert_traceback_matches(self, callback, expected_tb):
with pytest.raises(Exception) as exc_info:
callback()
tb = format_exception(exc_info.type, exc_info.value, exc_info.tb)
m = re.search(expected_tb.strip(), "".join(tb))
assert (
m is not None
), "Traceback did not match:\n\n{''.join(tb)}\nexpected:\n{expected_tb}"
def test_runtime_error(self, fs_env):
def test():
tmpl.render(fail=lambda: 1 / 0)
tmpl = fs_env.get_template("broken.html")
self.assert_traceback_matches(
test,
r"""
File ".*?broken.html", line 2, in (top-level template code|<module>)
\{\{ fail\(\) \}\}(
\^{12})?
File ".*debug?.pyc?", line \d+, in <lambda>
tmpl\.render\(fail=lambda: 1 / 0\)(
~~\^~~)?
ZeroDivisionError: (int(eger)? )?division (or modulo )?by zero
""",
)
def test_syntax_error(self, fs_env):
# The trailing .*? is for PyPy 2 and 3, which don't seem to
# clear the exception's original traceback, leaving the syntax
# error in the middle of other compiler frames.
self.assert_traceback_matches(
lambda: fs_env.get_template("syntaxerror.html"),
"""(?sm)
File ".*?syntaxerror.html", line 4, in (template|<module>)
\\{% endif %\\}.*?
(jinja2\\.exceptions\\.)?TemplateSyntaxError: Encountered unknown tag 'endif'. Jinja \
was looking for the following tags: 'endfor' or 'else'. The innermost block that needs \
to be closed is 'for'.
""",
)
def test_regular_syntax_error(self, fs_env):
def test():
raise TemplateSyntaxError("wtf", 42)
self.assert_traceback_matches(
test,
r"""
File ".*debug.pyc?", line \d+, in test
raise TemplateSyntaxError\("wtf", 42\)(
\^{36})?
(jinja2\.exceptions\.)?TemplateSyntaxError: wtf
line 42""",
)
def test_pickleable_syntax_error(self, fs_env):
original = TemplateSyntaxError("bad template", 42, "test", "test.txt")
unpickled = pickle.loads(pickle.dumps(original))
assert str(original) == str(unpickled)
assert original.name == unpickled.name
def test_include_syntax_error_source(self, filesystem_loader):
e = Environment(
loader=ChoiceLoader(
[
filesystem_loader,
DictLoader({"inc": "a\n{% include 'syntaxerror.html' %}\nb"}),
]
)
)
t = e.get_template("inc")
with pytest.raises(TemplateSyntaxError) as exc_info:
t.render()
assert exc_info.value.source is not None
def test_local_extraction(self):
from jinja2.debug import get_template_locals
from jinja2.runtime import missing
locals = get_template_locals(
{
"l_0_foo": 42,
"l_1_foo": 23,
"l_2_foo": 13,
"l_0_bar": 99,
"l_1_bar": missing,
"l_0_baz": missing,
}
)
assert locals == {"foo": 13, "bar": 99}
def test_get_corresponding_lineno_traceback(self, fs_env):
tmpl = fs_env.get_template("test.html")
assert tmpl.get_corresponding_lineno(1) == 1
|
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'Find-TrustedDocuments',
# list of one or more authors for the module
'Author': ['@jamcut'],
# more verbose multi-line description of the module
'Description': ('This module will enumerate the appropriate registry '
'keys to determine what, if any, trusted documents '
'exist on the host. It will also enumerate trusted locations.'
),
# True if the module needs to run in the background
'Background' : False,
# File extension to save the file as
'OutputExtension' : None,
# True if the module needs admin rights to run
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
# list of any references/other comments
'Comments': [
'Original .ps1 file',
'https://github.com/jamcut/one-offs/blob/master/Find-TrustedDocuments.ps1'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to enumerate trusted documents from.',
'Required' : True,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
def generate(self):
# the PowerShell script itself, with the command to invoke
# for execution appended to the end. Scripts should output
# everything to the pipeline for proper parsing.
#
# the script should be stripped of comments, with a link to any
# original reference script included in the comments.
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/host/Find-TrustedDocuments.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
script += "Find-TrustedDocuments"
return script
|
import sys
import copy
import logging
from StringIO import StringIO
from django.utils import six
import django
from django.conf import settings
from django.core import serializers
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned,\
FieldError
from django.db import models
from django.db.models import signals
from django.db.models.options import Options
from django.db.models.loading import register_models, get_model
from django.db.models.base import ModelBase, subclass_exception, \
get_absolute_url, method_get_order, method_set_order
from django.db.models.fields.related import (OneToOneField, add_lazy_relation)
from django.utils.functional import curry
from django.core.serializers.base import DeserializationError
from django.core.serializers.python import Deserializer as PythonDeserializer, _get_model
from functools import update_wrapper
from django.utils.encoding import force_unicode, smart_unicode
from rest_framework.parsers import JSONParser, XMLParser, YAMLParser
from rest_framework.renderers import JSONRenderer, XMLRenderer, YAMLRenderer
from restkit import Resource, RequestFailed, ResourceNotFound
from django_roa.db import get_roa_headers
from django_roa.db.exceptions import ROAException
logger = logging.getLogger("django_roa")
DJANGO_LT_1_7 = django.VERSION[:2] < (1, 7)
DJANGO_GT_1_4 = django.VERSION[:2] > (1, 4)
ROA_ARGS_NAMES_MAPPING = getattr(settings, 'ROA_ARGS_NAMES_MAPPING', {})
ROA_FORMAT = getattr(settings, 'ROA_FORMAT', 'json')
ROA_FILTERS = getattr(settings, 'ROA_FILTERS', {})
ROA_MODEL_NAME_MAPPING = getattr(settings, 'ROA_MODEL_NAME_MAPPING', [])
ROA_MODEL_CREATE_MAPPING = getattr(settings, 'ROA_MODEL_CREATE_MAPPING', {})
ROA_MODEL_UPDATE_MAPPING = getattr(settings, 'ROA_MODEL_UPDATE_MAPPING', {})
ROA_CUSTOM_ARGS = getattr(settings, "ROA_CUSTOM_ARGS", {})
ROA_SSL_ARGS = getattr(settings, 'ROA_SSL_ARGS', {})
DEFAULT_CHARSET = getattr(settings, 'DEFAULT_CHARSET', 'utf-8')
class ROAModelBase(ModelBase):
def __new__(cls, name, bases, attrs):
if DJANGO_LT_1_7:
return cls._new_old_django(name, bases, attrs)
else:
return cls._new_recent_django(name, bases, attrs)
@classmethod
def _new_recent_django(cls, name, bases, attrs):
"""
Exactly the same except the line with ``isinstance(b, ROAModelBase)``.
"""
import warnings
from django.apps.config import MODELS_MODULE_NAME
from django.apps import apps
from django.utils.deprecation import RemovedInDjango19Warning
super_new = super(ModelBase, cls).__new__
# Also ensure initialization is only performed for subclasses of Model
# (excluding Model class itself).
parents = [b for b in bases if isinstance(b, ROAModelBase)]
if not parents:
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
# Look for an application configuration to attach the model to.
app_config = apps.get_containing_app_config(module)
if getattr(meta, 'app_label', None) is None:
if app_config is None:
# If the model is imported before the configuration for its
# application is created (#21719), or isn't in an installed
# application (#21680), use the legacy logic to figure out the
# app_label by looking one level up from the package or module
# named 'models'. If no such package or module exists, fall
# back to looking one level up from the module this model is
# defined in.
# For 'django.contrib.sites.models', this would be 'sites'.
# For 'geo.models.places' this would be 'geo'.
msg = (
"Model class %s.%s doesn't declare an explicit app_label "
"and either isn't in an application in INSTALLED_APPS or "
"else was imported before its application was loaded. " %
(module, name))
if abstract:
msg += "Its app_label will be set to None in Django 1.9."
else:
msg += "This will no longer be supported in Django 1.9."
warnings.warn(msg, RemovedInDjango19Warning, stacklevel=2)
model_module = sys.modules[new_class.__module__]
package_components = model_module.__name__.split('.')
package_components.reverse() # find the last occurrence of 'models'
try:
app_label_index = package_components.index(MODELS_MODULE_NAME) + 1
except ValueError:
app_label_index = 1
kwargs = {"app_label": package_components[app_label_index]}
else:
kwargs = {"app_label": app_config.label}
else:
kwargs = {}
new_class.add_to_class('_meta', Options(meta, **kwargs))
if not abstract:
new_class.add_to_class(
'DoesNotExist',
subclass_exception(
str('DoesNotExist'),
tuple(x.DoesNotExist for x in parents if hasattr(x, '_meta') and not x._meta.abstract) or (ObjectDoesNotExist,),
module,
attached_to=new_class))
new_class.add_to_class(
'MultipleObjectsReturned',
subclass_exception(
str('MultipleObjectsReturned'),
tuple(x.MultipleObjectsReturned for x in parents if hasattr(x, '_meta') and not x._meta.abstract) or (MultipleObjectsReturned,),
module,
attached_to=new_class))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
# If the model is a proxy, ensure that the base class
# hasn't been swapped out.
if is_proxy and base_meta and base_meta.swapped:
raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped))
if getattr(new_class, '_default_manager', None):
if not is_proxy:
# Multi-table inheritance doesn't inherit default manager from
# parents.
new_class._default_manager = None
new_class._base_manager = None
else:
# Proxy classes do inherit parent's default manager, if none is
# set explicitly.
new_class._default_manager = new_class._default_manager._copy_to_model(new_class)
new_class._base_manager = new_class._base_manager._copy_to_model(new_class)
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = (
new_class._meta.local_fields +
new_class._meta.local_many_to_many +
new_class._meta.virtual_fields
)
field_names = set(f.name for f in new_fields)
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [kls for kls in parents if hasattr(kls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError("Abstract base class containing model fields not permitted for proxy model '%s'." % name)
else:
continue
if base is not None:
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
else:
base = parent
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
new_class._meta.setup_proxy(base)
new_class._meta.concrete_model = base._meta.concrete_model
else:
new_class._meta.concrete_model = new_class
# Collect the parent links for multi-table inheritance.
parent_links = {}
for base in reversed([new_class] + parents):
# Conceptually equivalent to `if base is Model`.
if not hasattr(base, '_meta'):
continue
# Skip concrete parent classes.
if base != new_class and not base._meta.abstract:
continue
# Locate OneToOneField instances.
for field in base._meta.local_fields:
if isinstance(field, OneToOneField):
parent_links[field.rel.to] = field
# Do the appropriate setup for any model parents.
for base in parents:
original_base = base
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
# Check for clashes between locally declared fields and those
# on the base classes (we cannot handle shadowed fields at the
# moment).
for field in parent_fields:
if field.name in field_names:
raise FieldError(
'Local field %r in class %r clashes '
'with field of similar name from '
'base class %r' % (field.name, name, base.__name__)
)
if not base._meta.abstract:
# Concrete classes...
base = base._meta.concrete_model
if base in parent_links:
field = parent_links[base]
elif not is_proxy:
attr_name = '%s_ptr' % base._meta.model_name
field = OneToOneField(base, name=attr_name,
auto_created=True, parent_link=True)
# Only add the ptr field if it's not already present;
# e.g. migrations will already have it specified
if not hasattr(new_class, attr_name):
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
# .. and abstract ones.
for field in parent_fields:
new_class.add_to_class(field.name, copy.deepcopy(field))
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base._meta.parents)
# Inherit managers from the abstract base classes.
new_class.copy_managers(base._meta.abstract_managers)
# Proxy models inherit the non-abstract managers from their base,
# unless they have redefined any of them.
if is_proxy:
new_class.copy_managers(original_base._meta.concrete_managers)
# Inherit virtual fields (like GenericForeignKey) from the parent
# class
for field in base._meta.virtual_fields:
if base._meta.abstract and field.name in field_names:
raise FieldError(
'Local field %r in class %r clashes '
'with field of similar name from '
'abstract base class %r' % (field.name, name, base.__name__)
)
new_class.add_to_class(field.name, copy.deepcopy(field))
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
new_class._meta.apps.register_model(new_class._meta.app_label, new_class)
return new_class
@classmethod
def _new_old_django(cls, name, bases, attrs):
"""
Exactly the same except the line with ``isinstance(b, ROAModelBase)`` and part delimited by 'ROA HACK'
"""
super_new = super(ModelBase, cls).__new__
# six.with_metaclass() inserts an extra class called 'NewBase' in the
# inheritance tree: Model -> NewBase -> object. But the initialization
# should be executed only once for a given model class.
# attrs will never be empty for classes declared in the standard way
# (ie. with the `class` keyword). This is quite robust.
if name == 'NewBase' and attrs == {}:
return super_new(cls, name, bases, attrs)
# Also ensure initialization is only performed for subclasses of Model
# (excluding Model class itself).
parents = [b for b in bases if isinstance(b, ROAModelBase) and
not (b.__name__ == 'NewBase' and b.__mro__ == (b, object))]
if not parents:
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
if getattr(meta, 'app_label', None) is None:
# Figure out the app_label by looking one level up.
# For 'django.contrib.sites.models', this would be 'sites'.
model_module = sys.modules[new_class.__module__]
kwargs = {"app_label": model_module.__name__.split('.')[-2]}
else:
kwargs = {}
new_class.add_to_class('_meta', Options(meta, **kwargs))
if not abstract:
# ROA HACK:
subclass_kwargs = {
'name': str('DoesNotExist'),
'parents': tuple(x.DoesNotExist for x in parents if hasattr(x, '_meta') and not x._meta.abstract)
or (ObjectDoesNotExist,),
'module': module
}
if DJANGO_GT_1_4:
subclass_kwargs['attached_to'] = new_class
new_class.add_to_class('DoesNotExist', subclass_exception(**subclass_kwargs))
subclass_kwargs = {
'name': str('MultipleObjectsReturned'),
'parents': tuple(x.MultipleObjectsReturned for x in parents if hasattr(x, '_meta') and not x._meta.abstract)
or (MultipleObjectsReturned,),
'module': module
}
if DJANGO_GT_1_4:
subclass_kwargs['attached_to'] = new_class
new_class.add_to_class('MultipleObjectsReturned', subclass_exception(**subclass_kwargs))
# END HACK
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
# If the model is a proxy, ensure that the base class
# hasn't been swapped out.
if is_proxy and base_meta and base_meta.swapped:
raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped))
if getattr(new_class, '_default_manager', None):
if not is_proxy:
# Multi-table inheritance doesn't inherit default manager from
# parents.
new_class._default_manager = None
new_class._base_manager = None
else:
# Proxy classes do inherit parent's default manager, if none is
# set explicitly.
new_class._default_manager = new_class._default_manager._copy_to_model(new_class)
new_class._base_manager = new_class._base_manager._copy_to_model(new_class)
# Bail out early if we have already created this class.
m = get_model(new_class._meta.app_label, name,
seed_cache=False, only_installed=False)
if m is not None:
return m
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = new_class._meta.local_fields + \
new_class._meta.local_many_to_many + \
new_class._meta.virtual_fields
field_names = set([f.name for f in new_fields])
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [cls for cls in parents if hasattr(cls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError("Abstract base class containing model fields not permitted for proxy model '%s'." % name)
else:
continue
if base is not None:
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
else:
base = parent
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
if (new_class._meta.local_fields or
new_class._meta.local_many_to_many):
raise FieldError("Proxy model '%s' contains model fields." % name)
new_class._meta.setup_proxy(base)
new_class._meta.concrete_model = base._meta.concrete_model
else:
new_class._meta.concrete_model = new_class
# Do the appropriate setup for any model parents.
o2o_map = dict([(f.rel.to, f) for f in new_class._meta.local_fields
if isinstance(f, OneToOneField)])
for base in parents:
original_base = base
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
# Check for clashes between locally declared fields and those
# on the base classes (we cannot handle shadowed fields at the
# moment).
for field in parent_fields:
if field.name in field_names:
raise FieldError('Local field %r in class %r clashes '
'with field of similar name from '
'base class %r' %
(field.name, name, base.__name__))
if not base._meta.abstract:
# Concrete classes...
base = base._meta.concrete_model
if base in o2o_map:
field = o2o_map[base]
elif not is_proxy:
attr_name = '%s_ptr' % base._meta.model_name
field = OneToOneField(base, name=attr_name,
auto_created=True, parent_link=True)
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
# .. and abstract ones.
for field in parent_fields:
new_class.add_to_class(field.name, copy.deepcopy(field))
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base._meta.parents)
# Inherit managers from the abstract base classes.
new_class.copy_managers(base._meta.abstract_managers)
# Proxy models inherit the non-abstract managers from their base,
# unless they have redefined any of them.
if is_proxy:
new_class.copy_managers(original_base._meta.concrete_managers)
# Inherit virtual fields (like GenericForeignKey) from the parent
# class
for field in base._meta.virtual_fields:
if base._meta.abstract and field.name in field_names:
raise FieldError('Local field %r in class %r clashes '\
'with field of similar name from '\
'abstract base class %r' % \
(field.name, name, base.__name__))
new_class.add_to_class(field.name, copy.deepcopy(field))
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
register_models(new_class._meta.app_label, new_class)
# Because of the way imports happen (recursively), we may or may not be
# the first time this model tries to register with the framework. There
# should only be one class for each model, so we always return the
# registered version.
return get_model(new_class._meta.app_label, name,
seed_cache=False, only_installed=False)
def _prepare(cls):
"""
Creates some methods once self._meta has been populated.
"""
opts = cls._meta
opts._prepare(cls)
if opts.order_with_respect_to:
cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True)
cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False)
# defer creating accessors on the foreign class until we are
# certain it has been created
def make_foreign_order_accessors(field, model, cls):
setattr(
field.rel.to,
'get_%s_order' % cls.__name__.lower(),
curry(method_get_order, cls)
)
setattr(
field.rel.to,
'set_%s_order' % cls.__name__.lower(),
curry(method_set_order, cls)
)
add_lazy_relation(
cls,
opts.order_with_respect_to,
opts.order_with_respect_to.rel.to,
make_foreign_order_accessors
)
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join([f.attname for f in opts.fields]))
if hasattr(cls, 'get_absolute_url'):
cls.get_absolute_url = update_wrapper(curry(get_absolute_url, opts, cls.get_absolute_url),
cls.get_absolute_url)
if hasattr(cls, 'get_resource_url_list'):
cls.get_resource_url_list = staticmethod(curry(get_resource_url_list,
opts, cls.get_resource_url_list))
if hasattr(cls, 'get_resource_url_count'):
cls.get_resource_url_count = update_wrapper(curry(get_resource_url_count, opts, cls.get_resource_url_count),
cls.get_resource_url_count)
if hasattr(cls, 'get_resource_url_detail'):
cls.get_resource_url_detail = update_wrapper(curry(get_resource_url_detail, opts, cls.get_resource_url_detail),
cls.get_resource_url_detail)
signals.class_prepared.send(sender=cls)
class ROAModel(models.Model):
"""
Model which access remote resources.
"""
__metaclass__ = ROAModelBase
@classmethod
def serializer(cls):
"""
Return a like Django Rest Framework serializer class
"""
raise NotImplementedError
def get_renderer(self):
"""
Cf from rest_framework.renderers import JSONRenderer
"""
if ROA_FORMAT == 'json':
return JSONRenderer()
elif ROA_FORMAT == 'xml':
return XMLRenderer()
elif ROAException == 'yaml':
return YAMLRenderer()
else:
raise NotImplementedError
@classmethod
def get_parser(cls):
"""
Cf from rest_framework.parsers import JSONParser
"""
if ROA_FORMAT == 'json':
return JSONParser()
elif ROA_FORMAT == 'xml':
return XMLParser()
elif ROAException == 'yaml':
return YAMLParser()
else:
raise NotImplementedError
def get_serializer_content_type(self):
if ROA_FORMAT == 'json':
return {'Content-Type' : 'application/json'}
elif ROA_FORMAT == 'xml':
return {'Content-Type' : 'application/xml'}
elif ROAException == 'yaml':
return {'Content-Type' : 'text/x-yaml'}
else:
raise NotImplementedError
@classmethod
def get_serializer(cls, instance=None, data=None, partial=False, **kwargs):
"""
Transform API response to Django model objects.
"""
serializer_class = cls.serializer()
serializer = None
if instance:
serializer = serializer_class(instance, partial=partial, **kwargs)
elif data:
data = data['results'] if 'results' in data else data
serializer = serializer_class(data=data, many=isinstance(data, list), **kwargs)
return serializer
@staticmethod
def get_resource_url_list():
raise Exception("Static method get_resource_url_list is not defined.")
@classmethod
def count_response(cls, data, **kwargs):
"""
Read count query response and return result
"""
if 'count' in data: # with default DRF : with pagination
count = int(data['count'])
elif isinstance(data, (list, tuple)):
count = len(data) # with default DRF : without pagination
else:
count = int(data)
return count
def get_resource_url_count(self):
# By default this method is not with compatible with json Django Rest Framework standard viewset urls
# In this case, you just have to override it and return self.get_resource_url_list()
return u"%scount/" % (self.get_resource_url_list(),)
def get_resource_url_detail(self):
return u"%s%s/" % (self.get_resource_url_list(), self.pk)
def save_base(self, raw=False, cls=None, origin=None, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Does the heavy-lifting involved in saving. Subclasses shouldn't need to
override this method. It's separate from save() in order to hide the
need for overrides of save() to pass around internal-only parameters
('raw', 'cls', and 'origin').
"""
assert not (force_insert and force_update)
record_exists = False
if cls is None:
cls = self.__class__
meta = cls._meta
if not meta.proxy:
origin = cls
else:
meta = cls._meta
if origin and not getattr(meta, "auto_created", False):
signals.pre_save.send(sender=origin, instance=self, raw=raw)
model_name = str(meta)
# If we are in a raw save, save the object exactly as presented.
# That means that we don't try to be smart about saving attributes
# that might have come from the parent class - we just save the
# attributes we have been given to the class we have been given.
# We also go through this process to defer the save of proxy objects
# to their actual underlying model.
if not raw or meta.proxy:
if meta.proxy:
org = cls
else:
org = None
for parent, field in meta.parents.items():
# At this point, parent's primary key field may be unknown
# (for example, from administration form which doesn't fill
# this field). If so, fill it.
if field and getattr(self, parent._meta.pk.attname) is None and getattr(self, field.attname) is not None:
setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
self.save_base(cls=parent, origin=org, using=using)
if field:
setattr(self, field.attname, self._get_pk_val(parent._meta))
if meta.proxy:
return
if not meta.proxy:
pk_val = self._get_pk_val(meta)
pk_is_set = pk_val is not None
get_args = {}
get_args[ROA_ARGS_NAMES_MAPPING.get('FORMAT', 'format')] = ROA_FORMAT
get_args.update(ROA_CUSTOM_ARGS)
# Construct Json payload
serializer = self.get_serializer(self)
payload = self.get_renderer().render(serializer.data)
# Add serializer content_type
headers = get_roa_headers()
headers.update(self.get_serializer_content_type())
# check if resource use custom primary key
if not meta.pk.attname in ['pk', 'id']:
# consider it might be inserting so check it first
# @todo: try to improve this block to check if custom pripary key is not None first
resource = Resource(self.get_resource_url_detail(),
filters=ROA_FILTERS, **ROA_SSL_ARGS)
try:
response = resource.get(payload=None, headers=headers, **get_args)
except ResourceNotFound:
# since such resource does not exist, it's actually creating
pk_is_set = False
except RequestFailed:
pk_is_set = False
if force_update or pk_is_set and not self.pk is None:
record_exists = True
resource = Resource(self.get_resource_url_detail(),
filters=ROA_FILTERS, **ROA_SSL_ARGS)
try:
logger.debug(u"""Modifying : "%s" through %s with payload "%s" and GET args "%s" """ % (
force_unicode(self),
force_unicode(resource.uri),
force_unicode(payload),
force_unicode(get_args)))
response = resource.put(payload=payload, headers=headers, **get_args)
except RequestFailed as e:
raise ROAException(e)
else:
record_exists = False
resource = Resource(self.get_resource_url_list(),
filters=ROA_FILTERS, **ROA_SSL_ARGS)
try:
logger.debug(u"""Creating : "%s" through %s with payload "%s" and GET args "%s" """ % (
force_unicode(self),
force_unicode(resource.uri),
force_unicode(payload),
force_unicode(get_args)))
response = resource.post(payload=payload, headers=headers, **get_args)
except RequestFailed as e:
raise ROAException(e)
response = force_unicode(response.body_string()).encode(DEFAULT_CHARSET)
data = self.get_parser().parse(StringIO(response))
serializer = self.get_serializer(data=data)
if not serializer.is_valid():
raise ROAException(u'Invalid deserialization for %s model: %s' % (self, serializer.errors))
try:
self.pk = int(serializer.object.pk)
except ValueError:
self.pk = serializer.object.pk
self = serializer.object
if origin:
signals.post_save.send(sender=origin, instance=self,
created=(not record_exists), raw=raw)
save_base.alters_data = True
def delete(self):
assert self._get_pk_val() is not None, "%s object can't be deleted " \
"because its %s attribute is set to None." \
% (self._meta.object_name, self._meta.pk.attname)
# Deletion in cascade should be done server side.
resource = Resource(self.get_resource_url_detail(),
filters=ROA_FILTERS, **ROA_SSL_ARGS)
logger.debug(u"""Deleting : "%s" through %s""" % \
(unicode(self), unicode(resource.uri)))
# Add serializer content_type
headers = get_roa_headers()
headers.update(self.get_serializer_content_type())
result = resource.delete(headers=headers, **ROA_CUSTOM_ARGS)
if result.status_int in [200, 202, 204]:
self.pk = None
delete.alters_data = True
def _get_unique_checks(self, exclude=None):
"""
We don't want to check unicity that way for now.
"""
unique_checks, date_checks = [], []
return unique_checks, date_checks
ROA_URL_OVERRIDES_LIST = getattr(settings, 'ROA_URL_OVERRIDES_LIST', {})
ROA_URL_OVERRIDES_COUNT = getattr(settings, 'ROA_URL_OVERRIDES_COUNT', {})
ROA_URL_OVERRIDES_DETAIL = getattr(settings, 'ROA_URL_OVERRIDES_DETAIL', {})
def get_resource_url_list(opts, func, *args, **kwargs):
if DJANGO_LT_1_7:
key = '%s.%s' % (opts.app_label, opts.module_name)
else:
key = '%s.%s' % (opts.app_label, opts.model_name)
overridden = ROA_URL_OVERRIDES_LIST.get(key, False)
return overridden and overridden or func(*args, **kwargs)
def get_resource_url_count(opts, func, self, *args, **kwargs):
if DJANGO_LT_1_7:
key = '%s.%s' % (opts.app_label, opts.module_name)
else:
key = '%s.%s' % (opts.app_label, opts.model_name)
return ROA_URL_OVERRIDES_COUNT.get(key, func)(self, *args, **kwargs)
def get_resource_url_detail(opts, func, self, *args, **kwargs):
if DJANGO_LT_1_7:
key = '%s.%s' % (opts.app_label, opts.module_name)
else:
key = '%s.%s' % (opts.app_label, opts.model_name)
return ROA_URL_OVERRIDES_DETAIL.get(key, func)(self, *args, **kwargs)
|
'''
Created on Dec 4, 2012
@author: <a href="mailto:christian.grefe@cern.ch">Christian Grefe</a>
'''
from __future__ import absolute_import, unicode_literals
from pyLCIO import EVENT
from sixlcio.moves import range
from io import open
import sixlcio as six
class Reader( object ):
''' Generic reader class '''
def __init__( self, reader, fileName=None ):
''' Constructor '''
self.reader = reader
self.fileList = []
self.fileIter = iter( self.fileList )
self.isOpen = False
if fileName and isinstance(fileName, six.string_types):
self.addFile( fileName )
elif isinstance( fileName, list ):
self.addFileList( fileName )
def __iter__( self ):
return self
def getNumberOfEvents( self ):
''' Get the number of events in the stream '''
if len(self.fileList) == 0:
return 0
if not self.isOpen:
self.__open__( self.fileList[0] )
return self.reader.getNumberOfEvents()
def addFile( self, fileName ):
''' Add a file to the list of files to be read '''
self.fileList.append( fileName )
def addFiles( self, files ):
''' Add a list of file names to be read '''
self.fileList.extend( files )
def addFileList( self, fileListName ):
''' Add a list of files to the reader using a text file containing the names of the input files (one per line)'''
fileListFile = open( fileListName, 'r' )
for line in fileListFile:
self.fileList.append( line.strip() )
fileListFile.close()
def __next__( self ):
''' Reads the next event from the stream. '''
if not self.isOpen:
self.__open__(next(self.fileIter))
event = self.__read__()
if event:
return event
else:
self.__close__()
return next(self)
next = __next__ # for Python 2
def skip( self, nEntries ):
''' Skip entries from the stream '''
for i in range(int(nEntries)):
next(self)
def __close__( self ):
''' Close the reader and all streams '''
try:
self.reader.close()
except Exception:
pass
self.isOpen = False
def __read__( self ):
''' Reads the next entry from the stream. Needs to be implemented by the derived class '''
return
def __open__( self, fileName ):
''' Opens the given file. Needs to be implemented by the derived class '''
self.isOpen = True
|
import itertools
import random
import khmer
from khmer.khmer_args import estimate_optimal_with_K_and_f as optimal_fp
from khmer import reverse_complement as revcomp
from . import khmer_tst_utils as utils
import pytest
import screed
K = 21
class Kmer(str):
def __init__(self, value, pos=0):
self.pos = pos
def __new__(cls, value, pos=0):
if not len(value) == K:
raise ValueError('bad k-mer length')
return str.__new__(cls, value)
def mutate_base(base):
if base in 'AT':
return random.choice('GC')
elif base in 'GC':
return random.choice('AT')
else:
assert False, 'bad base'
def mutate_sequence(sequence, N=1):
sequence = list(sequence)
positions = random.sample(range(len(sequence)), N)
for i in positions:
sequence[i] = mutate_base(sequence[i])
return ''.join(sequence)
def mutate_position(sequence, pos):
sequence = list(sequence)
sequence[pos] = mutate_base(sequence[pos])
return ''.join(sequence)
def get_random_sequence(length, exclude=None):
'''Generate a random (non-looping) nucleotide sequence.
To be non-overlapping, the sequence should not include any repeated
length K-1 k-mers.
Args:
exclude (str): If not None, add the k-mers from this sequence to the
seen set.
Returns:
str: A random non-looping sequence.
'''
seen = set()
def add_seen(kmer):
seen.add(kmer)
seen.add(revcomp(kmer))
if exclude is not None:
for pos in range(0, len(exclude) - K):
add_seen(exclude[pos:pos + K - 1])
seq = [random.choice('ACGT') for _ in range(K - 1)] # do first K-1 bases
add_seen(''.join(seq))
while(len(seq) < length):
next_base = random.choice('ACGT')
next_kmer = ''.join(seq[-K + 2:] + [next_base])
assert len(next_kmer) == K - 1
if (next_kmer) not in seen:
seq.append(next_base)
add_seen(next_kmer)
else:
continue
return ''.join(seq)
def reads(sequence, L=100, N=100, dbg_cover=False):
positions = list(range(len(sequence) - L))
if dbg_cover is True:
for start in range(0, len(sequence), K):
read = sequence[start:start + L]
if len(read) < K:
read = sequence[-L:]
yield read
N -= 1
if N < 0:
return
for i in range(N):
start = random.choice(positions)
yield sequence[start:start + L]
def kmers(sequence):
for i in range(len(sequence) - K + 1):
yield sequence[i:i + K]
def test_mutate_sequence():
for _ in range(100):
assert 'A' not in mutate_sequence('A' * 10, 10)
assert 'T' not in mutate_sequence('T' * 10, 10)
assert 'C' not in mutate_sequence('C' * 10, 10)
assert 'G' not in mutate_sequence('G' * 10, 10)
def test_mutate_position():
assert mutate_position('AAAA', 2) in ['AACA', 'AAGA']
assert mutate_position('TTTT', 2) in ['TTCT', 'TTGT']
assert mutate_position('CCCC', 2) in ['CCAC', 'CCTC']
assert mutate_position('GGGG', 2) in ['GGAG', 'GGTG']
def test_reads():
contigfile = utils.get_test_data('simple-genome.fa')
contig = list(screed.open(contigfile))[0].sequence
for read in reads(contig):
assert read in contig
for read in reads(contig):
assert mutate_sequence(read) not in contig
'''
These fixtures emit various graph structures with their corresponding
sequences and important nodes. They take a random sequence fixture and
a graph fixture, then consume sequence and generate k-mers accordingly.
We're using a bespoke but simple language to describe graph structures in the
docstrings of these tests. It is as follows:
o: Node
[x:y]: Node at position in sequence
[x:y]+S: Node at position in sequence with extra base (where S in ACGT)
(Name), ([x:y] Name): Named node, named node at position
→ : Edge
~~: Tandem →o→ repeats
'''
@pytest.fixture(params=['simple-genome.fa'])
def known_sequence(request):
fn = utils.get_test_data(request.param)
return list(screed.open(fn))[0].sequence
@pytest.fixture(params=list(range(500, 1600, 500)),
ids=lambda val: '(L={0})'.format(val))
def random_sequence(request):
def get(exclude=None):
return get_random_sequence(request.param, exclude=exclude)
return get
@pytest.fixture(params=[khmer.Nodegraph, khmer.Countgraph],
ids=['(Type=Nodegraph)', '(Type=Countgraph)'])
def graph(request):
num_kmers = 50000
des_fp = 0.00001
args = optimal_fp(num_kmers, des_fp)
print('Graph Params:', args)
return request.param(K, args.htable_size, args.num_htables)
def hdn_counts(sequence, graph):
'''Get the degree distribution of nodes with degree more than 2.
'''
hdns = {}
for kmer in kmers(sequence):
d = graph.kmer_degree(kmer)
if d > 2:
hdns[d] = hdns.get(d, 0) + 1
return hdns
@pytest.fixture
def linear_structure(request, graph, random_sequence):
'''Sets up a simple linear path graph structure.
sequence
[0]→o→o~~o→o→[-1]
'''
sequence = random_sequence()
graph.consume(sequence)
# Check for false positive neighbors in our graph
# Mark as an expected failure if any are found
if hdn_counts(sequence, graph):
request.applymarker(pytest.mark.xfail)
return graph, sequence
@pytest.fixture(params=[K * 2, -K * 2],
ids=['(Where={0})'.format(i) for i in ['Start', 'End']])
def right_tip_structure(request, graph, random_sequence):
'''
Sets up a graph structure like so:
([S+1:S+K]+B tip)
sequence ↗
[0]→o→o~~o→(L)→([S:S+K] HDN)→(R)→o→o→o~~o→[-1]
Where S is the start position of the high degreen node (HDN).
That is, it has a single branch at the Sth K-mer.
'''
sequence = random_sequence()
S = request.param
if S < 0:
S = len(sequence) + S
# the HDN
HDN = Kmer(sequence[S:S + K], pos=S)
# left of the HDN
L = Kmer(sequence[S - 1:S - 1 + K], pos=S - 1)
# right of the HDN
R = Kmer(sequence[S + 1:S + 1 + K], pos=S + 1)
# the branch kmer
tip = Kmer(mutate_position(R, -1),
pos=R.pos)
graph.consume(sequence)
graph.count(tip)
# Check for false positive neighbors and mark as expected failure if found
if hdn_counts(sequence, graph) != {3: 1}:
request.applymarker(pytest.mark.xfail)
return graph, sequence, L, HDN, R, tip
@pytest.fixture(params=[K * 2, -K * 2],
ids=['(Where={0})'.format(i) for i in ['Start', 'End']])
def right_double_fork_structure(request, linear_structure, random_sequence):
'''
Sets up a graph structure like so:
branch
([S+1:S+K]+B)→o~~o→o
core_sequence ↗
[0]→o→o~~o→(L)→([S:S+K] HDN)→(R)→o→o→o~~o→[-1]
Where S is the start position of the high degreen node (HDN)
and B is the mutated base starting the branch.
'''
graph, core_sequence = linear_structure
print('\nCore Len:', len(core_sequence))
branch_sequence = random_sequence(exclude=core_sequence)
print('Branch len:', len(branch_sequence))
# start position of the HDN
S = request.param
if S < 0:
S = len(core_sequence) + S
# the HDN
HDN = Kmer(core_sequence[S:S + K], pos=S)
# left of the HDN
L = Kmer(core_sequence[S - 1:S - 1 + K], pos=S - 1)
# right of the HDN
R = Kmer(core_sequence[S + 1:S + 1 + K], pos=S + 1)
# the branch sequence, mutated at position S+1
branch_start = core_sequence[:R.pos] + mutate_position(R, -1)
branch_sequence = branch_start + branch_sequence
graph.consume(core_sequence)
graph.consume(branch_sequence)
# Check for false positive neighbors and mark as expected failure if found
core_hdns = hdn_counts(core_sequence, graph)
branch_hdns = hdn_counts(branch_sequence, graph)
# the core and branch sequences should each have exactly
# ONE node of degree 3 (HDN)
if core_hdns != {3: 1} or branch_hdns != {3: 1}:
print(core_hdns, branch_hdns)
request.applymarker(pytest.mark.xfail)
return graph, core_sequence, L, HDN, R, branch_sequence
@pytest.fixture
def right_triple_fork_structure(request, right_double_fork_structure,
random_sequence):
'''
Sets up a graph structure like so:
top_branch
([:S+1]+B)→o~~o→o
core_sequence ↗
[0]→o→o~~o→(L)→([S:S+K] HDN)→(R)→o→o→o~~o→[-1]
↘
([:S+1]+B)→o~~o→o
bottom_branch
Where S is the start position of the high degreen node (HDN).
'''
graph, core_sequence, L, HDN, R, top_sequence = right_double_fork_structure
bottom_branch = random_sequence(exclude=core_sequence + top_sequence)
print(len(core_sequence), len(top_sequence), len(bottom_branch))
# the branch sequence, mutated at position S+1
# choose a base not already represented at that position
bases = {'A', 'C', 'G', 'T'}
mutated = random.choice(list(bases - {R[-1], top_sequence[R.pos + K - 1]}))
bottom_sequence = core_sequence[:HDN.pos + K] + mutated + bottom_branch
graph.consume(bottom_sequence)
# Check for false positive neighbors and mark as expected failure if found
core_hdns = hdn_counts(core_sequence, graph)
top_hdns = hdn_counts(top_sequence, graph)
bottom_hdns = hdn_counts(bottom_sequence, graph)
# the core, top, and bottom sequences should each have exactly
# ONE node of degree 4 (HDN)
if not (core_hdns == top_hdns == bottom_hdns == {4: 1}):
print(core_hdns, top_hdns, bottom_hdns)
request.applymarker(pytest.mark.xfail)
return graph, core_sequence, L, HDN, R, top_sequence, bottom_sequence
@pytest.fixture(params=[K * 2, -K * 2],
ids=['(Where={0})'.format(i) for i in ['Start', 'End']])
def left_tip_structure(request, graph, random_sequence):
'''
Sets up a graph structure like so:
branch
(B+[S:S+K-1] tip)
↘ sequence
[0]→o~~o→(L)→([S:S+K] HDN)→(R)→o→o~~o→[-1]
Where S is the start position of the HDN.
'''
sequence = random_sequence()
S = request.param
if S < 0:
S = len(sequence) + S
tip = Kmer(mutate_position(sequence[S - 1:S - 1 + K], 0),
pos=S - 1 + K)
HDN = Kmer(sequence[S:S + K], pos=S)
L = Kmer(sequence[S - 1:S - 1 + K], pos=S - 1)
R = Kmer(sequence[S + 1:S + 1 + K], pos=S + 1)
graph.consume(sequence)
graph.count(tip)
# Check for false positive neighbors and mark as expected failure if found
if hdn_counts(sequence, graph) != {3: 1}:
request.applymarker(pytest.mark.xfail)
return graph, sequence, L, HDN, R, tip
@pytest.fixture(params=[K * 2, -K * 2],
ids=['(Where={0})'.format(i) for i in ['Start', 'End']])
def left_double_fork_structure(request, linear_structure, random_sequence):
'''
Sets up a graph structure like so:
o→o~~o→(B+[S:S+K-1])
↘ core_sequence
[0]→o→o~~o→(L)→([S:S+K] HDN)→(R)→o→o→o~~o→[-1]
Where S is the start position of the high degreen node (HDN).
'''
graph, core_sequence = linear_structure
branch_sequence = random_sequence(exclude=core_sequence)
# start position of the HDN
S = request.param
if S < 0:
S = len(core_sequence) + S
# the HDN
HDN = Kmer(core_sequence[S:S + K], pos=S)
# left of the HDN
L = Kmer(core_sequence[S - 1:S - 1 + K], pos=S - 1)
# right of the HDN
R = Kmer(core_sequence[S + 1:S + 1 + K], pos=S + 1)
# the branch sequence, mutated at position 0 in L,
# whih is equivalent to the K-1 prefix of HDN prepended with a new base
branch_start = mutate_position(L, 0)
branch_sequence = branch_sequence + \
branch_start + core_sequence[L.pos + K:]
graph.consume(core_sequence)
graph.consume(branch_sequence)
# Check for false positive neighbors and mark as expected failure if found
core_hdns = hdn_counts(core_sequence, graph)
branch_hdns = hdn_counts(branch_sequence, graph)
# the core and branch sequences should each have exactly
# ONE node of degree 3 (HDN)
if not (core_hdns == branch_hdns == {3: 1}):
request.applymarker(pytest.mark.xfail)
return graph, core_sequence, L, HDN, R, branch_sequence
@pytest.fixture(params=[K * 2, (-K * 2) - 2],
ids=['(Where={0})'.format(i) for i in ['Start', 'End']])
def snp_bubble_structure(request, linear_structure):
'''
Sets up a graph structure resulting from a SNP (Single Nucleotide
Polymorphism).
(HDN_L[1:]+SNP)→o~~o→(SNP+)
↗ ↘
o~~([S:S+K] HDN_L) ([S+K+1:S+2K+1] HDN_R)~~o
↘ ↗
(HDN_L[1:]+W)→o~~o~~o→(W+)
Where S is the start position of HDN directly left of the SNP (HDN_L),
SNP is the mutated base, and W is the wildtype (original) base.
Of course, W and SNP could be interchanged here, we don't actually
know which is which ;)
Note our parameterization: we need a bit more room from the ends,
so we bring the rightmost SNP a tad left.
'''
graph, wildtype_sequence = linear_structure
S = request.param
if S < 0:
S = len(wildtype_sequence) + S
snp_sequence = mutate_position(wildtype_sequence, S + K)
HDN_L = Kmer(wildtype_sequence[S:S + K], pos=S)
HDN_R = Kmer(wildtype_sequence[S + K + 1:S + 2 * K + 1], pos=S + K + 1)
graph.consume(wildtype_sequence)
graph.consume(snp_sequence)
# Check for false positive neighbors and mark as expected failure if found
w_hdns = hdn_counts(wildtype_sequence, graph)
snp_hdns = hdn_counts(snp_sequence, graph)
if not (w_hdns == snp_hdns == {3: 2}):
print(w_hdns, snp_hdns)
print(HDN_L, HDN_R)
print(wildtype_sequence[HDN_L.pos + K + 1])
print(snp_sequence[HDN_L.pos + K + 1])
request.applymarker(pytest.mark.xfail)
return graph, wildtype_sequence, snp_sequence, HDN_L, HDN_R
@pytest.fixture(params=[2, 3, 4, 5, 6, 7, 8])
def tandem_repeat_structure(request, linear_structure):
graph, sequence = linear_structure
tandem_repeats = sequence * request.param
graph.consume(tandem_repeats)
if hdn_counts(tandem_repeats, graph):
request.applymarker(pytest.mark.xfail)
return graph, sequence, tandem_repeats
@pytest.fixture
def circular_linear_structure(request, linear_structure):
graph, sequence = linear_structure
sequence += sequence
if hdn_counts(sequence, graph):
request.applymarker(pytest.mark.xfail)
return graph, sequence
|
from __future__ import absolute_import, division, print_function
from .core import (Bag, Item, from_sequence, from_filenames, from_url,
to_textfiles, concat, from_castra, from_imperative,
from_delayed, bag_range as range, bag_zip as zip)
from .text import read_text
from ..context import set_options
from ..base import compute
|
try:
from setuptools import setup, find_packages
except ImportError:
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
setup(
name='django-roa',
version='1.8.1',
url='https://github.com/charles-vdulac/django-roa',
download_url='https://github.com/charles-vdulac/django-roa/archive/master.zip',
license='BSD',
description="Turn your models into remote resources that you can access through Django's ORM.",
author='David Larlet',
author_email='david@larlet.fr',
packages=find_packages(),
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
],
install_requires=[
'Django',
'restkit',
'wsgiref',
'simplejson',
'djangorestframework'
],
tests_require={
'Piston-tests': ['django-piston'],
}
)
|
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
renWin = vtk.vtkRenderWindow()
renWin.SetMultiSamples(0)
renWin.SetSize(200, 200)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
wavelet = vtk.vtkRTAnalyticSource()
wavelet.SetWholeExtent(-100, 100, -100, 100, 0, 0)
wavelet.SetCenter(0, 0, 0)
wavelet.SetMaximum(255)
wavelet.SetStandardDeviation(.5)
wavelet.SetXFreq(60)
wavelet.SetYFreq(30)
wavelet.SetZFreq(40)
wavelet.SetXMag(10)
wavelet.SetYMag(18)
wavelet.SetZMag(5)
wavelet.SetSubsampleRate(1)
warp = vtk.vtkWarpScalar()
warp.SetInputConnection(wavelet.GetOutputPort())
mapper = vtk.vtkDataSetMapper()
mapper.SetInputConnection(warp.GetOutputPort())
mapper.SetScalarRange(75, 290)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
renderer = vtk.vtkRenderer()
renderer.AddActor(actor)
renderer.ResetCamera()
renderer.GetActiveCamera().Elevation(-10)
renWin.AddRenderer(renderer)
iren.Initialize()
|
"""
Test lldb data formatter subsystem.
"""
from __future__ import print_function
import os
import time
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class CategoriesDataFormatterTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break at.
self.line = line_number('main.cpp', '// Set break point at this line.')
def test_with_run_command(self):
"""Test that that file and class static variables display correctly."""
self.build()
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case (most of these categories do not
# exist anymore, but we just make sure we delete all of them)
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
self.runCmd('type category delete Category1', check=False)
self.runCmd('type category delete Category2', check=False)
self.runCmd('type category delete NewCategory', check=False)
self.runCmd("type category delete CircleCategory", check=False)
self.runCmd(
"type category delete RectangleStarCategory",
check=False)
self.runCmd("type category delete BaseCategory", check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
# Add a summary to a new category and check that it works
self.runCmd(
"type summary add Rectangle --summary-string \"ARectangle\" -w NewCategory")
self.expect("frame variable r1 r2 r3", matching=False,
substrs=['r1 = ARectangle',
'r2 = ARectangle',
'r3 = ARectangle'])
self.runCmd("type category enable NewCategory")
self.expect("frame variable r1 r2 r3", matching=True,
substrs=['r1 = ARectangle',
'r2 = ARectangle',
'r3 = ARectangle'])
# Disable the category and check that the old stuff is there
self.runCmd("type category disable NewCategory")
self.expect("frame variable r1 r2 r3",
substrs=['r1 = {',
'r2 = {',
'r3 = {'])
# Re-enable the category and check that it works
self.runCmd("type category enable NewCategory")
self.expect("frame variable r1 r2 r3",
substrs=['r1 = ARectangle',
'r2 = ARectangle',
'r3 = ARectangle'])
# Delete the category and the old stuff should be there
self.runCmd("type category delete NewCategory")
self.expect("frame variable r1 r2 r3",
substrs=['r1 = {',
'r2 = {',
'r3 = {'])
# Add summaries to two different categories and check that we can
# switch
self.runCmd(
"type summary add --summary-string \"Width = ${var.w}, Height = ${var.h}\" Rectangle -w Category1")
self.runCmd("type summary add --python-script \"return 'Area = ' + str( int(valobj.GetChildMemberWithName('w').GetValue()) * int(valobj.GetChildMemberWithName('h').GetValue()) );\" Rectangle -w Category2")
# check that enable A B is the same as enable B enable A
self.runCmd("type category enable Category1 Category2")
self.expect("frame variable r1 r2 r3",
substrs=['r1 = Width = ',
'r2 = Width = ',
'r3 = Width = '])
self.runCmd("type category disable Category1")
self.expect("frame variable r1 r2 r3",
substrs=['r1 = Area = ',
'r2 = Area = ',
'r3 = Area = '])
# switch again
self.runCmd("type category enable Category1")
self.expect("frame variable r1 r2 r3",
substrs=['r1 = Width = ',
'r2 = Width = ',
'r3 = Width = '])
# Re-enable the category and show that the preference is persisted
self.runCmd("type category disable Category2")
self.runCmd("type category enable Category2")
self.expect("frame variable r1 r2 r3",
substrs=['r1 = Area = ',
'r2 = Area = ',
'r3 = Area = '])
# Now delete the favorite summary
self.runCmd("type summary delete Rectangle -w Category2")
self.expect("frame variable r1 r2 r3",
substrs=['r1 = Width = ',
'r2 = Width = ',
'r3 = Width = '])
# Delete the summary from the default category (that does not have it)
self.runCmd("type summary delete Rectangle", check=False)
self.expect("frame variable r1 r2 r3",
substrs=['r1 = Width = ',
'r2 = Width = ',
'r3 = Width = '])
# Now add another summary to another category and switch back and forth
self.runCmd("type category delete Category1 Category2")
self.runCmd(
"type summary add Rectangle -w Category1 --summary-string \"Category1\"")
self.runCmd(
"type summary add Rectangle -w Category2 --summary-string \"Category2\"")
self.runCmd("type category enable Category2")
self.runCmd("type category enable Category1")
self.runCmd("type summary list -w Category1")
self.expect("type summary list -w NoSuchCategoryHere",
substrs=['no matching results found'])
self.expect("frame variable r1 r2 r3",
substrs=['r1 = Category1',
'r2 = Category1',
'r3 = Category1'])
self.runCmd("type category disable Category1")
self.expect("frame variable r1 r2 r3",
substrs=['r1 = Category2',
'r2 = Category2',
'r3 = Category2'])
# Check that re-enabling an enabled category works
self.runCmd("type category enable Category1")
self.expect("frame variable r1 r2 r3",
substrs=['r1 = Category1',
'r2 = Category1',
'r3 = Category1'])
self.runCmd("type category delete Category1")
self.runCmd("type category delete Category2")
self.expect("frame variable r1 r2 r3",
substrs=['r1 = {',
'r2 = {',
'r3 = {'])
# Check that multiple summaries can go into one category
self.runCmd(
"type summary add -w Category1 --summary-string \"Width = ${var.w}, Height = ${var.h}\" Rectangle")
self.runCmd(
"type summary add -w Category1 --summary-string \"Radius = ${var.r}\" Circle")
self.runCmd("type category enable Category1")
self.expect("frame variable r1 r2 r3",
substrs=['r1 = Width = ',
'r2 = Width = ',
'r3 = Width = '])
self.expect("frame variable c1 c2 c3",
substrs=['c1 = Radius = ',
'c2 = Radius = ',
'c3 = Radius = '])
self.runCmd("type summary delete Circle -w Category1")
self.expect("frame variable c1 c2 c3",
substrs=['c1 = {',
'c2 = {',
'c3 = {'])
# Add a regex based summary to a category
self.runCmd(
"type summary add -w Category1 --summary-string \"Radius = ${var.r}\" -x Circle")
self.expect("frame variable r1 r2 r3",
substrs=['r1 = Width = ',
'r2 = Width = ',
'r3 = Width = '])
self.expect("frame variable c1 c2 c3",
substrs=['c1 = Radius = ',
'c2 = Radius = ',
'c3 = Radius = '])
# Delete it
self.runCmd("type summary delete Circle -w Category1")
self.expect("frame variable c1 c2 c3",
substrs=['c1 = {',
'c2 = {',
'c3 = {'])
# Change a summary inside a category and check that the change is
# reflected
self.runCmd(
"type summary add Circle -w Category1 --summary-string \"summary1\"")
self.expect("frame variable c1 c2 c3",
substrs=['c1 = summary1',
'c2 = summary1',
'c3 = summary1'])
self.runCmd(
"type summary add Circle -w Category1 --summary-string \"summary2\"")
self.expect("frame variable c1 c2 c3",
substrs=['c1 = summary2',
'c2 = summary2',
'c3 = summary2'])
# Check that our order of priority works. Start by clearing categories
self.runCmd("type category delete Category1")
self.runCmd(
"type summary add Shape -w BaseCategory --summary-string \"AShape\"")
self.runCmd("type category enable BaseCategory")
self.expect("print (Shape*)&c1",
substrs=['AShape'])
self.expect("print (Shape*)&r1",
substrs=['AShape'])
self.expect("print (Shape*)c_ptr",
substrs=['AShape'])
self.expect("print (Shape*)r_ptr",
substrs=['AShape'])
self.runCmd(
"type summary add Circle -w CircleCategory --summary-string \"ACircle\"")
self.runCmd(
"type summary add Rectangle -w RectangleCategory --summary-string \"ARectangle\"")
self.runCmd("type category enable CircleCategory")
self.expect("frame variable c1",
substrs=['ACircle'])
self.expect("frame variable c_ptr",
substrs=['ACircle'])
self.runCmd(
"type summary add \"Rectangle *\" -w RectangleStarCategory --summary-string \"ARectangleStar\"")
self.runCmd("type category enable RectangleStarCategory")
self.expect("frame variable c1 r1 c_ptr r_ptr",
substrs=['ACircle',
'ARectangleStar'])
self.runCmd("type category enable RectangleCategory")
self.expect("frame variable c1 r1 c_ptr r_ptr",
substrs=['ACircle',
'ACircle',
'ARectangle'])
# Check that abruptly deleting an enabled category does not crash us
self.runCmd("type category delete RectangleCategory")
self.expect("frame variable c1 r1 c_ptr r_ptr",
substrs=['ACircle',
'(Rectangle) r1 = ', 'w = 5', 'h = 6',
'ACircle',
'ARectangleStar'])
# check that list commands work
self.expect("type category list",
substrs=['RectangleStarCategory (enabled)'])
self.expect("type summary list",
substrs=['ARectangleStar'])
# Disable a category and check that it fallsback
self.runCmd("type category disable CircleCategory")
# check that list commands work
self.expect("type category list",
substrs=['CircleCategory (disabled'])
self.expect("frame variable c1 r_ptr",
substrs=['AShape',
'ARectangleStar'])
# check that filters work into categories
self.runCmd(
"type filter add Rectangle --child w --category RectangleCategory")
self.runCmd("type category enable RectangleCategory")
self.runCmd(
"type summary add Rectangle --category RectangleCategory --summary-string \" \" -e")
self.expect('frame variable r2',
substrs=['w = 9'])
self.runCmd("type summary add Rectangle --summary-string \" \" -e")
self.expect('frame variable r2', matching=False,
substrs=['h = 16'])
# Now delete all categories
self.runCmd(
"type category delete CircleCategory RectangleStarCategory BaseCategory RectangleCategory")
# check that a deleted category with filter does not blow us up
self.expect('frame variable r2',
substrs=['w = 9',
'h = 16'])
# and also validate that one can print formatters for a language
self.expect(
'type summary list -l c++',
substrs=[
'vector',
'map',
'list',
'string'])
|
"""Base TestCase classes for nbextensions tests."""
from __future__ import (
absolute_import, division, print_function, unicode_literals,
)
import logging
import os
from threading import Event, Thread
from jupyter_contrib_core.notebook_compat import serverextensions
from jupyter_contrib_core.testing_utils import (
GlobalMemoryHandler, get_wrapped_logger, wrap_logger_handlers,
)
from jupyter_contrib_core.testing_utils.jupyter_env import patch_jupyter_dirs
from nose.plugins.skip import SkipTest
from notebook.notebookapp import NotebookApp
from notebook.tests.launchnotebook import NotebookTestBase
from tornado.ioloop import IOLoop
from traitlets.config import Config
from traitlets.traitlets import default
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock # py2
no_selenium = True
try:
from selenium import webdriver
except ImportError:
pass
else:
no_selenium = False
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.remote import remote_connection
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.ui import WebDriverWait
# don't show selenium debug logs
remote_connection.LOGGER.setLevel(logging.INFO)
class NoseyNotebookApp(NotebookApp):
"""Wrap the regular logging handler(s). For use inside nose tests."""
@default('log')
def _log_default(self):
"""wrap loggers for this application."""
return wrap_logger_handlers(NotebookApp._log_default(self))
class NbextensionTestBase(NotebookTestBase):
"""
Base class for nbextensions test case classes.
We override the setup_class method from NotebookTestBase in order to
install things, and also to set log_level to debug.
Also split some of the setup_class method into separate methods in order to
simplify subclassing.
"""
config = Config(NotebookApp={'log_level': logging.DEBUG})
# these are added for notebook < 4.1, where url_prefix wasn't defined.
# However, due to the fact that the base_url body data attribute in the
# page template isn't passed through the urlencode jinja2 filter,
# we can't expect a base_url which would need encoding to work :(
if not hasattr(NotebookTestBase, 'url_prefix'):
url_prefix = '/ab/'
@classmethod
def base_url(cls):
return 'http://localhost:%i%s' % (cls.port, cls.url_prefix)
_install_user = False
_install_sys_prefix = False
@classmethod
def pre_server_setup(cls):
"""Setup extensions etc before running the notebook server."""
# added to install things!
cls.log.info('Enabling jupyter_nbextensions_configurator')
inst_func = serverextensions.toggle_serverextension_python
inst_funcname = '.'.join([inst_func.__module__, inst_func.__name__])
logger = get_wrapped_logger(
name=inst_funcname, log_level=logging.DEBUG)
serverextensions.toggle_serverextension_python(
'jupyter_nbextensions_configurator', enabled=True, logger=logger,
user=cls._install_user, sys_prefix=cls._install_sys_prefix)
@classmethod
def get_server_kwargs(cls, **overrides):
kwargs = dict(
port=cls.port,
port_retries=0,
open_browser=False,
runtime_dir=cls.jupyter_dirs['server']['runtime'],
notebook_dir=cls.jupyter_dirs['server']['notebook'],
base_url=cls.url_prefix,
config=cls.config,
)
# disable auth-by-default, introduced in notebook PR #1831
if 'token' in NotebookApp.class_trait_names():
kwargs['token'] = ''
kwargs.update(overrides)
return kwargs
@classmethod
def start_server_thread(cls, started_event):
"""
Start a notebook server in a separate thread.
The start is signalled using the passed Event instance.
"""
cls.log.info('Starting notebook server app thread')
app = cls.notebook = NoseyNotebookApp(**cls.get_server_kwargs())
# don't register signal handler during tests
app.init_signal = lambda: None
app.initialize(argv=[])
loop = IOLoop.current()
loop.add_callback(started_event.set)
try:
app.start()
finally:
# set the event, so failure to start doesn't cause a hang
started_event.set()
# app.session_manager.close call was added after notebook 4.0
if hasattr(app.session_manager, 'close'):
app.session_manager.close()
@classmethod
def _setup_patches(cls):
(cls.jupyter_patches, cls.jupyter_dirs,
remove_jupyter_dirs) = patch_jupyter_dirs()
# store in a list to avoid confusion over bound/unbound method in pypy
cls.removal_funcs = [remove_jupyter_dirs]
try:
for ptch in cls.jupyter_patches:
ptch.start()
# patches for items called in NotebookTestBase.teardown_class
# env_patch needs a start method as well because of a typo in
# notebook 4.0 which calls it in the teardown_class method
cls.env_patch = cls.path_patch = Mock(['start', 'stop'])
cls.home_dir = cls.config_dir = cls.data_dir = Mock(['cleanup'])
cls.runtime_dir = cls.notebook_dir = Mock(['cleanup'])
cls.tmp_dir = Mock(['cleanup'])
except Exception:
for func in cls.removal_funcs:
func()
raise
@classmethod
def setup_class(cls):
"""Install things & setup a notebook server in a separate thread."""
cls.log = get_wrapped_logger(cls.__name__)
cls._setup_patches()
cls.pre_server_setup()
try:
started = Event()
cls.notebook_thread = Thread(
target=cls.start_server_thread, args=[started])
cls.notebook_thread.start()
started.wait()
cls.wait_until_alive()
except Exception:
for func in cls.removal_funcs:
func()
raise
@classmethod
def teardown_class(cls):
try:
# call superclass to stop notebook server
super(NbextensionTestBase, cls).teardown_class()
finally:
try:
for ptch in cls.jupyter_patches:
ptch.stop()
finally:
for func in cls.removal_funcs:
func()
def _skip_if_no_selenium():
if no_selenium:
raise SkipTest('Selenium not installed. '
'Skipping selenium-based test.')
if os.environ.get('TRAVIS_OS_NAME') == 'osx':
raise SkipTest("Don't do selenium tests on travis osx")
class SeleniumNbextensionTestBase(NbextensionTestBase):
# browser logs from selenium aren't very useful currently, but if you want
# them, you can set the class attribute show_driver_logs to have them
# output via the GlobalMemoryHandler on test failure
show_driver_logs = False
@classmethod
def setup_class(cls):
cls.init_webdriver()
cls._failure_occurred = False # flag for logging
super(SeleniumNbextensionTestBase, cls).setup_class()
@classmethod
def init_webdriver(cls):
cls.log = get_wrapped_logger(cls.__name__)
_skip_if_no_selenium()
if hasattr(cls, 'driver'):
return cls.driver
if (os.environ.get('CI') and os.environ.get('TRAVIS') and
os.environ.get('SAUCE_ACCESS_KEY')):
cls.log.info(
'Running in CI environment. Using Sauce remote webdriver.')
username = os.environ['SAUCE_USERNAME']
access_key = os.environ['SAUCE_ACCESS_KEY']
capabilities = {
# 'platform': 'Mac OS X 10.9',
'platform': 'Linux',
'browserName': 'firefox',
'version': 'latest',
'tags': [os.environ['TOXENV'], 'CI'],
'name': cls.__name__
}
hub_url = 'http://{}:{}@ondemand.saucelabs.com:80/wd/hub'.format(
username, access_key)
if os.environ.get('TRAVIS'):
# see https://docs.travis-ci.com/user/gui-and-headless-browsers
# and https://docs.travis-ci.com/user/sauce-connect
capabilities.update({
'tunnel-identifier': os.environ['TRAVIS_JOB_NUMBER'],
'build': os.environ['TRAVIS_BUILD_NUMBER'],
})
cls.driver = webdriver.Remote(
desired_capabilities=capabilities, command_executor=hub_url)
else:
cls.log.info('Using local webdriver.')
cls.driver = webdriver.Firefox()
return cls.driver
def run(self, results):
"""Run a given test. Overridden in order to access results."""
# in py2 unittest, run doesn't return the results object, so we need to
# create one in order to have a reference to it.
if results is None:
results = self.defaultTestResult()
super(SeleniumNbextensionTestBase, self).run(results)
if results.failures or results.errors:
self.__class__._failure_occurred = True
return results
@classmethod
def _print_logs_on_failure(cls):
if cls._failure_occurred:
cls.log.info('\n'.join([
'',
'\t\tFailed test!',
'\t\tCaptured logging:',
]))
GlobalMemoryHandler.rotate_buffer(1)
GlobalMemoryHandler.flush_to_target()
browser_logger = get_wrapped_logger(
name=cls.__name__ + '.driver', log_level=logging.DEBUG)
if cls.show_driver_logs:
cls.log.info('\n\t\tjavascript console logs below...\n\n')
for entry in cls.driver.get_log('browser'):
level = logging._nameToLevel.get(
entry['level'], logging.ERROR)
msg = entry['message'].strip()
browser_logger.log(level, msg)
record, target = GlobalMemoryHandler._buffer[-1]
record.ct = entry['timestamp'] / 1000.
GlobalMemoryHandler._buffer[-1] = record, target
GlobalMemoryHandler.flush_to_target()
if (not cls._failure_occurred) or os.environ.get('CI'):
cls.log.info('closing webdriver')
cls.driver.quit()
else:
cls.log.info('keeping webdriver open')
@classmethod
def teardown_class(cls):
cls._print_logs_on_failure()
super(SeleniumNbextensionTestBase, cls).teardown_class()
@classmethod
def wait_for_element(cls, presence_cond, message, timeout=5):
"""WebDriverWait for an element to appear, fail test on timeout."""
try:
return WebDriverWait(cls.driver, timeout).until(
ec.presence_of_element_located(presence_cond))
except TimeoutException:
if message:
raise cls.failureException(message)
else:
raise cls.failureException(
'{}No element matching condition {!r} found in {}s'.format(
message, presence_cond, timeout))
@classmethod
def wait_for_selector(cls, css_selector, message='', timeout=5):
"""WebDriverWait for a selector to appear, fail test on timeout."""
if message:
message += '\n'
message = '{}No element matching selector {!r} found in {}s'.format(
message, css_selector, timeout)
return cls.wait_for_element(
(By.CSS_SELECTOR, css_selector), message=message, timeout=timeout)
@classmethod
def wait_for_partial_link_text(cls, link_text, message='', timeout=5):
"""WebDriverWait for a link to appear, fail test on timeout."""
if message:
message += '\n'
message = (
'{}No element matching partial link text '
'{!r} found in {}s').format(message, link_text, timeout)
return cls.wait_for_element((By.PARTIAL_LINK_TEXT, link_text),
message=message, timeout=timeout)
@classmethod
def wait_for_xpath(cls, xpath, message='', timeout=5):
"""WebDriverWait for a selector to appear, fail test on timeout."""
if message:
message += '\n'
message = '{}No element matching xpath {!r} found in {}s'.format(
message, xpath, timeout)
return cls.wait_for_element(
(By.XPATH, xpath), message=message, timeout=timeout)
|
"""
Deploy this project in dev/stage/production.
Requires commander_ which is installed on the systems that need it.
.. _commander: https://github.com/oremj/commander
"""
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from commander.deploy import task, hostgroups
import commander_settings as settings
venv_bin_path = os.path.join(settings.SRC_DIR, 'virtualenv', 'bin')
os.environ['PATH'] = venv_bin_path + os.pathsep + os.environ['PATH']
os.environ['DJANGO_SETTINGS_MODULE'] = 'badgus.settings'
@task
def update_code(ctx, tag):
"""Update the code to a specific git reference (tag/sha/etc)."""
with ctx.lcd(settings.SRC_DIR):
ctx.local('git fetch')
ctx.local('git checkout -f %s' % tag)
ctx.local('git submodule sync')
ctx.local('git submodule update --init --recursive')
@task
def update_info(ctx):
"""Write info about the current state to a publicly visible file."""
with ctx.lcd(settings.SRC_DIR):
ctx.local('date')
ctx.local('git branch -a')
ctx.local('git log -3')
ctx.local('git status')
ctx.local('git submodule status')
ctx.local('git rev-parse HEAD > media/revision.txt')
@task
def setup_dependencies(ctx):
with ctx.lcd(settings.SRC_DIR):
# TODO: only delete & recreate virtualenv when needed
# Maybe stash the md5 of requirements files between deploys, only
# rebuild virtualenv on mismatch?
ctx.local('rm -rf virtualenv')
ctx.local('virtualenv --no-site-packages virtualenv')
# Activate virtualenv to append to path.
activate_env = os.path.join(settings.SRC_DIR, 'virtualenv', 'bin', 'activate_this.py')
execfile(activate_env, dict(__file__=activate_env))
ctx.local('python --version')
ctx.local('python scripts/peep.py install -r requirements/prod.txt')
ctx.local('virtualenv --relocatable virtualenv')
# Fix lib64 symlink to be relative instead of absolute.
ctx.local('rm -f virtualenv/lib64')
with ctx.lcd('virtualenv'):
ctx.local('ln -s lib lib64')
@task
def update_db(ctx):
with ctx.lcd(settings.SRC_DIR):
ctx.local("python manage.py syncdb --noinput")
# HACK: Skip djcelery migrations. Tables already there... somehow?
# ctx.local('python manage.py migrate djcelery --fake --noinput')
# ctx.local('python manage.py migrate --noinput')
# ctx.local('python manage.py migrate --list')
@task
def update_assets(ctx):
with ctx.lcd(settings.SRC_DIR):
ctx.local("python manage.py collectstatic --noinput")
@task
def clean(ctx):
"""Clean .gitignore and .pyc files."""
with ctx.lcd(settings.SRC_DIR):
ctx.local("find . -type f -name '.gitignore' -or -name '*.pyc' -delete")
@task
def checkin_changes(ctx):
"""Use the local, IT-written deploy script to check in changes."""
ctx.local(settings.DEPLOY_SCRIPT)
@hostgroups(settings.WEB_HOSTGROUP, remote_kwargs={'ssh_key': settings.SSH_KEY})
def deploy_app(ctx):
"""Call the remote update script to push changes to webheads."""
ctx.remote('touch %s' % settings.REMOTE_WSGI)
@task
def pre_update(ctx, ref=settings.UPDATE_REF):
"""1. Update code to pick up changes to this file."""
update_code(ref)
setup_dependencies()
update_info()
clean()
@task
def update(ctx):
"""2. Nothing to do here yet."""
update_assets()
update_db()
@task
def deploy(ctx):
"""3. Deploy stuff."""
checkin_changes()
deploy_app()
|
"""
Absolute position layout demo
Tested environment:
Mac OS X 10.6.8
"""
import sys
try:
from PySide import QtCore
from PySide import QtGui
except ImportError:
from PyQt4 import QtCore
from PyQt4 import QtGui
class Demo(QtGui.QMainWindow):
def __init__(self):
super(Demo, self).__init__()
x, y, w, h = 500, 200, 300, 100
self.setGeometry(x, y, w, h)
label1 = QtGui.QLabel('hello', self)
x, y = 10, 10
label1.move(x, y)
label1.resize(200, 30)
text = str(label1.frameSize())
label1.setText(text)
# PySide.QtCore.QSize(200, 30) --> x, y
print 'label1:', text
label2 = QtGui.QLabel('world', self)
x, y = 20, 40
label2.move(x, y)
label2.resize(300, 30)
text = str(label2.geometry())
label2.setText(text)
# PySide.QtCore.QRect(20, 40, 300, 30) --> x, y, w, h
print 'label2:', text
def show_and_raise(self):
self.show()
self.raise_()
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
demo = Demo()
demo.show_and_raise()
app.exec_()
|
import os
import sys
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import (
get_info, system_info, lapack_opt_info, blas_opt_info)
config = Configuration('linalg', parent_package, top_path)
config.add_subpackage('tests')
# Accelerate is buggy, disallow it. See also numpy/core/setup.py
for opt_order in (blas_opt_info.blas_order, lapack_opt_info.lapack_order):
if 'accelerate' in opt_order:
opt_order.remove('accelerate')
# Configure lapack_lite
src_dir = 'lapack_lite'
lapack_lite_src = [
os.path.join(src_dir, 'python_xerbla.c'),
os.path.join(src_dir, 'f2c_z_lapack.c'),
os.path.join(src_dir, 'f2c_c_lapack.c'),
os.path.join(src_dir, 'f2c_d_lapack.c'),
os.path.join(src_dir, 'f2c_s_lapack.c'),
os.path.join(src_dir, 'f2c_lapack.c'),
os.path.join(src_dir, 'f2c_blas.c'),
os.path.join(src_dir, 'f2c_config.c'),
os.path.join(src_dir, 'f2c.c'),
]
all_sources = config.paths(lapack_lite_src)
if os.environ.get('NPY_USE_BLAS_ILP64', "0") != "0":
lapack_info = get_info('lapack_ilp64_opt', 2)
else:
lapack_info = get_info('lapack_opt', 0) # and {}
use_lapack_lite = not lapack_info
if use_lapack_lite:
# This makes numpy.distutils write the fact that lapack_lite
# is being used to numpy.__config__
class numpy_linalg_lapack_lite(system_info):
def calc_info(self):
info = {'language': 'c'}
if sys.maxsize > 2**32:
# Build lapack-lite in 64-bit integer mode.
# The suffix is arbitrary (lapack_lite symbols follow it),
# but use the "64_" convention here.
info['define_macros'] = [
('HAVE_BLAS_ILP64', None),
('BLAS_SYMBOL_SUFFIX', '64_')
]
self.set_info(**info)
lapack_info = numpy_linalg_lapack_lite().get_info(2)
def get_lapack_lite_sources(ext, build_dir):
if use_lapack_lite:
print("### Warning: Using unoptimized lapack ###")
return all_sources
else:
if sys.platform == 'win32':
print("### Warning: python_xerbla.c is disabled ###")
return []
return [all_sources[0]]
config.add_extension(
'lapack_lite',
sources=['lapack_litemodule.c', get_lapack_lite_sources],
depends=['lapack_lite/f2c.h'],
extra_info=lapack_info,
)
# umath_linalg module
config.add_extension(
'_umath_linalg',
sources=['umath_linalg.c.src', get_lapack_lite_sources],
depends=['lapack_lite/f2c.h'],
extra_info=lapack_info,
libraries=['npymath'],
)
config.add_data_files('*.pyi')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
|
from .history import History
__all__ = ["History"]
|
from iepy.preprocess.ner.base import FoundEntity
from .factories import SentencedIEDocFactory, GazetteItemFactory
from .manager_case import ManagerTestCase
class TestDocumentInvariants(ManagerTestCase):
def test_cant_set_different_number_of_synparse_than_sentences(self):
doc = SentencedIEDocFactory()
sents = list(doc.get_sentences())
fake_syn_parse_items = [
'<fake parse tree %i>' % i for i in range(len(sents) + 1)]
with self.assertRaises(ValueError):
doc.set_syntactic_parsing_result(fake_syn_parse_items)
class TestSetNERResults(ManagerTestCase):
def _f_eo(self, key='something', kind_name='ABC', alias='The dog', offset=0,
offset_end=2, from_gazette=False):
# constructs and returns a simple FoundEntity with the args given
return FoundEntity(key=key, kind_name=kind_name, alias=alias, offset=offset,
offset_end=offset_end, from_gazette=from_gazette)
def setUp(self):
self.doc = SentencedIEDocFactory(text="The dog is dead. Long live the dog.")
def test_simple(self):
f_eo = self._f_eo()
self.doc.set_ner_result([f_eo])
self.assertEqual(self.doc.entity_occurrences.count(), 1)
eo = self.doc.entity_occurrences.first()
self.assertEqual(eo.entity.key, f_eo.key)
self.assertEqual(eo.entity.kind.name, f_eo.kind_name)
self.assertEqual(eo.entity.gazette, None)
self.assertEqual(eo.offset, f_eo.offset)
self.assertEqual(eo.offset_end, f_eo.offset_end)
self.assertEqual(eo.alias, f_eo.alias)
def test_offsets_are_checked(self):
f_eo = self._f_eo(offset=-1) # negative offset
self.assertRaises(ValueError, self.doc.set_ner_result, [f_eo])
f_eo = self._f_eo(offset=2, offset_end=2) # end lte start
self.assertRaises(ValueError, self.doc.set_ner_result, [f_eo])
f_eo = self._f_eo(offset=2, offset_end=1) # end lte start
self.assertRaises(ValueError, self.doc.set_ner_result, [f_eo])
doc_tkns = len(self.doc.tokens)
f_eo = self._f_eo(offset=doc_tkns + 1,
offset_end=doc_tkns + 3) # bigger than doc tokens
self.assertRaises(ValueError, self.doc.set_ner_result, [f_eo])
def test_if_from_gazette_is_enabled_gazetteitem_is_set(self):
f_eo = self._f_eo(from_gazette=True)
gz_item = GazetteItemFactory(kind__name=f_eo.kind_name,
text=f_eo.key)
self.doc.set_ner_result([f_eo])
eo = self.doc.entity_occurrences.first()
self.assertEqual(eo.entity.gazette, gz_item)
def test_sending_again_same_found_entity_is_idempotent(self):
f_eo = self._f_eo()
self.doc.set_ner_result([f_eo])
self.doc.set_ner_result([f_eo])
self.assertEqual(self.doc.entity_occurrences.count(), 1)
def test_sending_twice_same_found_entity_doesnt_crash(self):
f_eo = self._f_eo()
self.doc.set_ner_result([f_eo, f_eo])
self.assertEqual(self.doc.entity_occurrences.count(), 1)
def test_same_different_eos_with_same_offsets_and_kind_are_not_allowed(self):
f_eo = self._f_eo()
f_eo_2 = self._f_eo(key=f_eo.key + ' and more') # to be sure is another key
self.doc.set_ner_result([f_eo, f_eo_2])
self.assertEqual(self.doc.entity_occurrences.count(), 1)
eo = self.doc.entity_occurrences.first()
# the one that is saved is the first one
self.assertEqual(eo.entity.key, f_eo.key)
|
"""
This file contains a contains the high-level functions to read a
VOTable file.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ...extern import six
import io
import os
import sys
import textwrap
import warnings
from . import exceptions
from . import tree
from ...utils.xml import iterparser
from ...utils import data
__all__ = ['parse', 'parse_single_table', 'from_table', 'writeto', 'validate',
'reset_vo_warnings']
def parse(source, columns=None, invalid='exception', pedantic=None,
chunk_size=tree.DEFAULT_CHUNK_SIZE, table_number=None,
table_id=None, filename=None, unit_format=None,
datatype_mapping=None, _debug_python_based_parser=False):
"""
Parses a VOTABLE_ xml file (or file-like object), and returns a
`~astropy.io.votable.tree.VOTableFile` object.
Parameters
----------
source : str or readable file-like object
Path or file object containing a VOTABLE_ xml file.
columns : sequence of str, optional
List of field names to include in the output. The default is
to include all fields.
invalid : str, optional
One of the following values:
- 'exception': throw an exception when an invalid value is
encountered (default)
- 'mask': mask out invalid values
pedantic : bool, optional
When `True`, raise an error when the file violates the spec,
otherwise issue a warning. Warnings may be controlled using
the standard Python mechanisms. See the `warnings`
module in the Python standard library for more information.
When not provided, uses the configuration setting
``astropy.io.votable.pedantic``, which defaults to False.
chunk_size : int, optional
The number of rows to read before converting to an array.
Higher numbers are likely to be faster, but will consume more
memory.
table_number : int, optional
The number of table in the file to read in. If `None`, all
tables will be read. If a number, 0 refers to the first table
in the file, and only that numbered table will be parsed and
read in. Should not be used with ``table_id``.
table_id : str, optional
The ID of the table in the file to read in. Should not be
used with ``table_number``.
filename : str, optional
A filename, URL or other identifier to use in error messages.
If *filename* is None and *source* is a string (i.e. a path),
then *source* will be used as a filename for error messages.
Therefore, *filename* is only required when source is a
file-like object.
unit_format : str, astropy.units.format.Base instance or None, optional
The unit format to use when parsing unit attributes. If a
string, must be the name of a unit formatter. The built-in
formats include ``generic``, ``fits``, ``cds``, and
``vounit``. A custom formatter may be provided by passing a
`~astropy.units.UnitBase` instance. If `None` (default),
the unit format to use will be the one specified by the
VOTable specification (which is ``cds`` up to version 1.2 of
VOTable, and (probably) ``vounit`` in future versions of the
spec).
datatype_mapping : dict of str to str, optional
A mapping of datatype names to valid VOTable datatype names.
For example, if the file being read contains the datatype
"unsignedInt" (an invalid datatype in VOTable), include the
mapping ``{"unsignedInt": "long"}``.
Returns
-------
votable : `~astropy.io.votable.tree.VOTableFile` object
See also
--------
astropy.io.votable.exceptions : The exceptions this function may raise.
"""
from . import conf
invalid = invalid.lower()
assert invalid in ('exception', 'mask')
if pedantic is None:
pedantic = conf.pedantic
if datatype_mapping is None:
datatype_mapping = {}
config = {
'columns' : columns,
'invalid' : invalid,
'pedantic' : pedantic,
'chunk_size' : chunk_size,
'table_number' : table_number,
'filename' : filename,
'unit_format' : unit_format,
'datatype_mapping' : datatype_mapping
}
if filename is None and isinstance(source, six.string_types):
config['filename'] = source
with iterparser.get_xml_iterator(
source,
_debug_python_based_parser=_debug_python_based_parser) as iterator:
return tree.VOTableFile(
config=config, pos=(1, 1)).parse(iterator, config)
def parse_single_table(source, **kwargs):
"""
Parses a VOTABLE_ xml file (or file-like object), reading and
returning only the first `~astropy.io.votable.tree.Table`
instance.
See `parse` for a description of the keyword arguments.
Returns
-------
votable : `~astropy.io.votable.tree.Table` object
"""
if kwargs.get('table_number') is None:
kwargs['table_number'] = 0
votable = parse(source, **kwargs)
return votable.get_first_table()
def writeto(table, file, tabledata_format=None):
"""
Writes a `~astropy.io.votable.tree.VOTableFile` to a VOTABLE_ xml file.
Parameters
----------
table : `~astropy.io.votable.tree.VOTableFile` or `~astropy.table.Table` instance.
file : str or writable file-like object
Path or file object to write to
tabledata_format : str, optional
Override the format of the table(s) data to write. Must be
one of ``tabledata`` (text representation), ``binary`` or
``binary2``. By default, use the format that was specified in
each ``table`` object as it was created or read in. See
:ref:`votable-serialization`.
"""
from ...table import Table
if isinstance(table, Table):
table = tree.VOTableFile.from_table(table)
elif not isinstance(table, tree.VOTableFile):
raise TypeError(
"first argument must be astropy.io.vo.VOTableFile or "
"astropy.table.Table instance")
table.to_xml(file, tabledata_format=tabledata_format,
_debug_python_based_parser=True)
def validate(source, output=None, xmllint=False, filename=None):
"""
Prints a validation report for the given file.
Parameters
----------
source : str or readable file-like object
Path to a VOTABLE_ xml file or pathlib.path
object having Path to a VOTABLE_ xml file.
output : writable file-like object, optional
Where to output the report. Defaults to ``sys.stdout``.
If `None`, the output will be returned as a string.
xmllint : bool, optional
When `True`, also send the file to ``xmllint`` for schema and
DTD validation. Requires that ``xmllint`` is installed. The
default is `False`. ``source`` must be a file on the local
filesystem in order for ``xmllint`` to work.
filename : str, optional
A filename to use in the error messages. If not provided, one
will be automatically determined from ``source``.
Returns
-------
is_valid : bool or str
Returns `True` if no warnings were found. If ``output`` is
`None`, the return value will be a string.
"""
from ...utils.console import print_code_line, color_print
if output is None:
output = sys.stdout
return_as_str = False
if output is None:
output = io.StringIO()
lines = []
votable = None
reset_vo_warnings()
with data.get_readable_fileobj(source, encoding='binary') as fd:
content = fd.read()
content_buffer = io.BytesIO(content)
content_buffer.seek(0)
if filename is None:
if isinstance(source, six.string_types):
filename = source
elif hasattr(source, 'name'):
filename = source.name
elif hasattr(source, 'url'):
filename = source.url
else:
filename = "<unknown>"
with warnings.catch_warnings(record=True) as warning_lines:
warnings.resetwarnings()
warnings.simplefilter("always", exceptions.VOWarning, append=True)
try:
votable = parse(content_buffer, pedantic=False, filename=filename)
except ValueError as e:
lines.append(str(e))
lines = [str(x.message) for x in warning_lines if
issubclass(x.category, exceptions.VOWarning)] + lines
content_buffer.seek(0)
output.write("Validation report for {0}\n\n".format(filename))
if len(lines):
xml_lines = iterparser.xml_readlines(content_buffer)
for warning in lines:
w = exceptions.parse_vowarning(warning)
if not w['is_something']:
output.write(w['message'])
output.write('\n\n')
else:
line = xml_lines[w['nline'] - 1]
warning = w['warning']
if w['is_warning']:
color = 'yellow'
else:
color = 'red'
color_print(
'{0:d}: '.format(w['nline']), '',
warning or 'EXC', color,
': ', '',
textwrap.fill(
w['message'],
initial_indent=' ',
subsequent_indent=' ').lstrip(),
file=output)
print_code_line(line, w['nchar'], file=output)
output.write('\n')
else:
output.write('astropy.io.votable found no violations.\n\n')
success = 0
if xmllint and os.path.exists(filename):
from ...utils.xml import validate
if votable is None:
version = "1.1"
else:
version = votable.version
success, stdout, stderr = validate.validate_schema(
filename, version)
if success != 0:
output.write(
'xmllint schema violations:\n\n')
output.write(stderr)
else:
output.write('xmllint passed\n')
if return_as_str:
return output.getvalue()
return len(lines) == 0 and success == 0
def from_table(table, table_id=None):
"""
Given an `~astropy.table.Table` object, return a
`~astropy.io.votable.tree.VOTableFile` file structure containing
just that single table.
Parameters
----------
table : `~astropy.table.Table` instance
table_id : str, optional
If not `None`, set the given id on the returned
`~astropy.io.votable.tree.Table` instance.
Returns
-------
votable : `~astropy.io.votable.tree.VOTableFile` instance
"""
return tree.VOTableFile.from_table(table, table_id=table_id)
def is_votable(source):
"""
Reads the header of a file to determine if it is a VOTable file.
Parameters
----------
source : str or readable file-like object
Path or file object containing a VOTABLE_ xml file.
Returns
-------
is_votable : bool
Returns `True` if the given file is a VOTable file.
"""
try:
with iterparser.get_xml_iterator(source) as iterator:
for start, tag, data, pos in iterator:
if tag != 'xml':
return False
break
for start, tag, data, pos in iterator:
if tag != 'VOTABLE':
return False
break
return True
except ValueError:
return False
def reset_vo_warnings():
"""
Resets all of the vo warning state so that warnings that
have already been emitted will be emitted again. This is
used, for example, by `validate` which must emit all
warnings each time it is called.
"""
from . import converters, xmlutil
#-----------------------------------------------------------#
# This is a special variable used by the Python warnings #
# infrastructure to keep track of warnings that have #
# already been seen. Since we want to get every single #
# warning out of this, we have to delete all of them first. #
#-----------------------------------------------------------#
for module in (converters, exceptions, tree, xmlutil):
if hasattr(module, '__warningregistry__'):
del module.__warningregistry__
|
from __future__ import unicode_literals
from django.db import migrations
def make_recipients(apps):
Recipient=apps.get_model('mailtrigger','Recipient')
rc = Recipient.objects.create
rc(slug='iesg',
desc='The IESG',
template='The IESG <iesg@ietf.org>')
rc(slug='iab',
desc='The IAB',
template='The IAB <iab@iab.org>')
rc(slug='ietf_announce',
desc='The IETF Announce list',
template='IETF-Announce <ietf-announce@ietf.org>')
rc(slug='rfc_editor',
desc='The RFC Editor',
template='<rfc-editor@rfc-editor.org>')
rc(slug='iesg_secretary',
desc='The Secretariat',
template='<iesg-secretary@ietf.org>')
rc(slug='ietf_secretariat',
desc='The Secretariat',
template='<ietf-secretariat-reply@ietf.org>')
rc(slug='doc_authors',
desc="The document's authors",
template='{% if doc.type_id == "draft" %}<{{doc.name}}@ietf.org>{% endif %}')
rc(slug='doc_notify',
desc="The addresses in the document's notify field",
template='{{doc.notify}}')
rc(slug='doc_group_chairs',
desc="The document's group chairs (if the document is assigned to a working or research group)",
template=None)
rc(slug='doc_group_delegates',
desc="The document's group delegates (if the document is assigned to a working or research group)",
template=None)
rc(slug='doc_affecteddoc_authors',
desc="The authors of the subject documents of a conflict-review or status-change",
template=None)
rc(slug='doc_affecteddoc_group_chairs',
desc="The chairs of groups of the subject documents of a conflict-review or status-change",
template=None)
rc(slug='doc_affecteddoc_notify',
desc="The notify field of the subject documents of a conflict-review or status-change",
template=None)
rc(slug='doc_shepherd',
desc="The document's shepherd",
template='{% if doc.shepherd %}<{{doc.shepherd.address}}>{% endif %}' )
rc(slug='doc_ad',
desc="The document's responsible Area Director",
template='{% if doc.ad %}<{{doc.ad.email_address}}>{% endif %}' )
rc(slug='doc_group_mail_list',
desc="The list address of the document's group",
template=None )
rc(slug='doc_stream_manager',
desc="The manager of the document's stream",
template=None )
rc(slug='stream_managers',
desc="The managers of any related streams",
template=None )
rc(slug='conflict_review_stream_manager',
desc="The stream manager of a document being reviewed for IETF stream conflicts",
template=None )
rc(slug='conflict_review_steering_group',
desc="The steering group (e.g. IRSG) of a document being reviewed for IETF stream conflicts",
template = None)
rc(slug='iana_approve',
desc="IANA's draft approval address",
template='IANA <drafts-approval@icann.org>')
rc(slug='iana_last_call',
desc="IANA's draft last call address",
template='IANA <drafts-lastcall@icann.org>')
rc(slug='iana_eval',
desc="IANA's draft evaluation address",
template='IANA <drafts-eval@icann.org>')
rc(slug='iana',
desc="IANA",
template='<iana@iana.org>')
rc(slug='group_mail_list',
desc="The group's mailing list",
template='{% if group.list_email %}<{{ group.list_email }}>{% endif %}')
rc(slug='group_steering_group',
desc="The group's steering group (IESG or IRSG)",
template=None)
rc(slug='group_chairs',
desc="The group's chairs",
template="{% if group and group.acronym %}<{{group.acronym}}-chairs@ietf.org>{% endif %}")
rc(slug='group_responsible_directors',
desc="The group's responsible AD(s) or IRTF chair",
template=None)
rc(slug='doc_group_responsible_directors',
desc="The document's group's responsible AD(s) or IRTF chair",
template=None)
rc(slug='internet_draft_requests',
desc="The internet drafts ticketing system",
template='<internet-drafts@ietf.org>')
rc(slug='submission_submitter',
desc="The person that submitted a draft",
template='{{submission.submitter}}')
rc(slug='submission_authors',
desc="The authors of a submitted draft",
template=None)
rc(slug='submission_group_chairs',
desc="The chairs of a submitted draft belonging to a group",
template=None)
rc(slug='submission_confirmers',
desc="The people who can confirm a draft submission",
template=None)
rc(slug='submission_group_mail_list',
desc="The people who can confirm a draft submission",
template=None)
rc(slug='doc_non_ietf_stream_manager',
desc="The document's stream manager if the document is not in the IETF stream",
template=None)
rc(slug='rfc_editor_if_doc_in_queue',
desc="The RFC Editor if a document is in the RFC Editor queue",
template=None)
rc(slug='doc_discussing_ads',
desc="Any ADs holding an active DISCUSS position on a given document",
template=None)
rc(slug='group_changed_personnel',
desc="Any personnel who were added or deleted when a group's personnel changes",
template='{{ changed_personnel | join:", " }}')
rc(slug='session_requests',
desc="The session request ticketing system",
template='<session-request@ietf.org>')
rc(slug='session_requester',
desc="The person that requested a meeting slot for a given group",
template=None)
rc(slug='logged_in_person',
desc="The person currently logged into the datatracker who initiated a given action",
template='{% if person and person.email_address %}<{{ person.email_address }}>{% endif %}')
rc(slug='ipr_requests',
desc="The ipr disclosure handling system",
template='<ietf-ipr@ietf.org>')
rc(slug='ipr_submitter',
desc="The submitter of an IPR disclosure",
template='{% if ipr.submitter_email %}{{ ipr.submitter_email }}{% endif %}')
rc(slug='ipr_updatedipr_contacts',
desc="The submitter (or ietf participant if the submitter is not available) "
"of all IPR disclosures updated directly by this disclosure, without recursing "
"to what the updated disclosures might have updated.",
template=None)
rc(slug='ipr_updatedipr_holders',
desc="The holders of all IPR disclosures updated by disclosure and disclosures updated by those and so on.",
template=None)
rc(slug='ipr_announce',
desc="The IETF IPR announce list",
template='ipr-announce@ietf.org')
rc(slug='doc_ipr_group_or_ad',
desc="Leadership for a document that has a new IPR disclosure",
template=None)
rc(slug='liaison_to_contacts',
desc="The addresses captured in the To field of the liaison statement form",
template='{{liaison.to_contacts}}')
rc(slug='liaison_cc',
desc="The addresses captured in the Cc field of the liaison statement form",
template='{{liaison.cc_contacts}}')
rc(slug='liaison_technical_contacts',
desc="The addresses captured in the technical contact field of the liaison statement form",
template='{{liaison.technical_contacts}}')
rc(slug='liaison_response_contacts',
desc="The addresses captured in the response contact field of the liaison statement form",
template='{{liaison.response_contacts}}')
rc(slug='liaison_approvers',
desc="The set of people who can approve this liasion statemetns",
template='{{liaison.approver_emails|join:", "}}')
rc(slug='liaison_manager',
desc="The assigned liaison manager for an external group ",
template=None)
rc(slug='nominator',
desc="The person that submitted a nomination to nomcom",
template='{{nominator}}')
rc(slug='nominee',
desc="The person nominated for a position",
template='{{nominee}}')
rc(slug='nomcom_chair',
desc="The chair of a given nomcom",
template='{{nomcom.group.get_chair.email.address}}')
rc(slug='commenter',
desc="The person providing a comment to nomcom",
template='{{commenter}}')
rc(slug='new_work',
desc="The IETF New Work list",
template='<new-work@ietf.org>')
def make_mailtriggers(apps):
Recipient=apps.get_model('mailtrigger','Recipient')
MailTrigger=apps.get_model('mailtrigger','MailTrigger')
def mt_factory(slug,desc,to_slugs,cc_slugs=[]):
# Try to protect ourselves from typos
all_slugs = to_slugs[:]
all_slugs.extend(cc_slugs)
for recipient_slug in all_slugs:
try:
Recipient.objects.get(slug=recipient_slug)
except Recipient.DoesNotExist:
print "****Some rule tried to use",recipient_slug
raise
m = MailTrigger.objects.create(slug=slug, desc=desc)
m.to = Recipient.objects.filter(slug__in=to_slugs)
m.cc = Recipient.objects.filter(slug__in=cc_slugs)
mt_factory(slug='ballot_saved',
desc="Recipients when a new ballot position "
"(with discusses, other blocking positions, "
"or comments) is saved",
to_slugs=['iesg'],
cc_slugs=['doc_notify',
'doc_group_mail_list',
'doc_authors',
'doc_group_chairs',
'doc_shepherd',
'doc_affecteddoc_authors',
'doc_affecteddoc_group_chairs',
'doc_affecteddoc_notify',
'conflict_review_stream_manager',
]
)
mt_factory(slug='ballot_deferred',
desc="Recipients when a ballot is deferred to "
"or undeferred from a future telechat",
to_slugs=['iesg',
'iesg_secretary',
'doc_group_chairs',
'doc_notify',
'doc_authors',
'doc_shepherd',
'doc_affecteddoc_authors',
'doc_affecteddoc_group_chairs',
'doc_affecteddoc_notify',
'conflict_review_stream_manager',
],
)
mt_factory(slug='ballot_approved_ietf_stream',
desc="Recipients when an IETF stream document ballot is approved",
to_slugs=['ietf_announce'],
cc_slugs=['iesg',
'doc_notify',
'doc_ad',
'doc_authors',
'doc_shepherd',
'doc_group_mail_list',
'doc_group_chairs',
'rfc_editor',
],
)
mt_factory(slug='ballot_approved_ietf_stream_iana',
desc="Recipients for IANA message when an IETF stream document ballot is approved",
to_slugs=['iana_approve'])
mt_factory(slug='ballot_approved_conflrev',
desc="Recipients when a conflict review ballot is approved",
to_slugs=['conflict_review_stream_manager',
'conflict_review_steering_group',
'doc_affecteddoc_authors',
'doc_affecteddoc_group_chairs',
'doc_affecteddoc_notify',
'doc_notify',
],
cc_slugs=['iesg',
'ietf_announce',
'iana',
],
)
mt_factory(slug='ballot_approved_charter',
desc="Recipients when a charter is approved",
to_slugs=['ietf_announce',],
cc_slugs=['group_mail_list',
'group_steering_group',
'group_chairs',
'doc_notify',
],
)
mt_factory(slug='ballot_approved_status_change',
desc="Recipients when a status change is approved",
to_slugs=['ietf_announce',],
cc_slugs=['iesg',
'rfc_editor',
'doc_notify',
'doc_affecteddoc_authors',
'doc_affecteddoc_group_chairs',
'doc_affecteddoc_notify',
],
)
mt_factory(slug='ballot_issued',
desc="Recipients when a ballot is issued",
to_slugs=['iesg',])
mt_factory(slug='ballot_issued_iana',
desc="Recipients for IANA message when a ballot is issued",
to_slugs=['iana_eval',])
mt_factory(slug='last_call_requested',
desc="Recipients when AD requests a last call",
to_slugs=['iesg_secretary',],
cc_slugs=['doc_ad',
'doc_shepherd',
'doc_notify',
],
)
mt_factory(slug='last_call_issued',
desc="Recipients when a last call is issued",
to_slugs=['ietf_announce',],
cc_slugs=['doc_ad',
'doc_shepherd',
'doc_authors',
'doc_notify',
'doc_group_mail_list',
'doc_group_chairs',
'doc_affecteddoc_authors',
'doc_affecteddoc_group_chairs',
'doc_affecteddoc_notify',
]
)
mt_factory(slug='last_call_issued_iana',
desc="Recipients for IANA message when a last call is issued",
to_slugs=['iana_last_call'])
mt_factory(slug='last_call_expired',
desc="Recipients when a last call has expired",
to_slugs=['doc_ad',
'doc_notify',
'doc_authors',
'doc_shepherd',
],
cc_slugs=['iesg_secretary',],
)
mt_factory(slug='pubreq_iesg',
desc="Recipients when a draft is submitted to the IESG",
to_slugs=['doc_ad',],
cc_slugs=['iesg_secretary',
'doc_notify',
'doc_shepherd',
'doc_group_chairs',
],
)
mt_factory(slug='pubreq_rfced',
desc="Recipients when a non-IETF stream manager requests publication",
to_slugs=['rfc_editor',])
mt_factory(slug='pubreq_rfced_iana',
desc="Recipients for IANA message when a non-IETF stream manager "
"requests publication",
to_slugs=['iana_approve',])
mt_factory(slug='charter_internal_review',
desc="Recipients for message noting that internal review has "
"started on a charter",
to_slugs=['iesg',
'iab',
])
mt_factory(slug='charter_external_review',
desc="Recipients for a charter external review",
to_slugs=['ietf_announce',],
cc_slugs=['group_mail_list',],
)
mt_factory(slug='charter_external_review_new_work',
desc="Recipients for a message to new-work about a charter review",
to_slugs=['new_work',])
mt_factory(slug='conflrev_requested',
desc="Recipients for a stream manager's request for an IETF conflict review",
to_slugs=['iesg_secretary'],
cc_slugs=['iesg',
'doc_notify',
'doc_affecteddoc_authors',
'doc_affecteddoc_group_chairs',
'doc_affecteddoc_notify',
],
)
mt_factory(slug='conflrev_requested_iana',
desc="Recipients for IANA message when a stream manager requests "
"an IETF conflict review",
to_slugs=['iana_eval',])
mt_factory(slug='doc_stream_changed',
desc="Recipients for notification when a document's stream changes",
to_slugs=['doc_authors',
'stream_managers',
'doc_notify',
])
mt_factory(slug='doc_stream_state_edited',
desc="Recipients when the stream state of a document is manually edited",
to_slugs=['doc_group_chairs',
'doc_group_delegates',
'doc_shepherd',
'doc_authors',
])
mt_factory(slug='group_milestones_edited',
desc="Recipients when any of a group's milestones are edited",
to_slugs=['group_responsible_directors',
'group_chairs',
])
mt_factory(slug='group_approved_milestones_edited',
desc="Recipients when the set of approved milestones for a group are edited",
to_slugs=['group_mail_list',
])
mt_factory(slug='doc_state_edited',
desc="Recipients when a document's state is manually edited",
to_slugs=['doc_notify',
'doc_ad',
'doc_authors',
'doc_shepherd',
'doc_group_chairs',
'doc_affecteddoc_authors',
'doc_group_responsible_directors',
'doc_affecteddoc_group_chairs',
'doc_affecteddoc_notify',
])
mt_factory(slug='doc_iana_state_changed',
desc="Recipients when IANA state information for a document changes ",
to_slugs=['doc_notify',
'doc_ad',
'doc_authors',
'doc_shepherd',
'doc_group_chairs',
'doc_affecteddoc_authors',
'doc_affecteddoc_group_chairs',
'doc_affecteddoc_notify',
])
mt_factory(slug='doc_telechat_details_changed',
desc="Recipients when a document's telechat date or other "
"telechat specific details are changed",
to_slugs=['iesg',
'iesg_secretary',
'doc_notify',
'doc_authors',
'doc_shepherd',
'doc_group_chairs',
'doc_affecteddoc_authors',
'doc_affecteddoc_group_chairs',
'doc_affecteddoc_notify',
])
mt_factory(slug='doc_pulled_from_rfc_queue',
desc="Recipients when a document is taken out of the RFC's editor queue "
"before publication",
to_slugs=['iana',
'rfc_editor',
],
cc_slugs=['iesg_secretary',
'iesg',
'doc_notify',
'doc_authors',
'doc_shepherd',
'doc_group_chairs',
],
)
mt_factory(slug='doc_replacement_changed',
desc="Recipients when what a document replaces or is replaced by changes",
to_slugs=['doc_authors',
'doc_notify',
'doc_shepherd',
'doc_group_chairs',
'doc_group_responsible_directors',
])
mt_factory(slug='charter_state_edit_admin_needed',
desc="Recipients for message to adminstrators when a charter state edit "
"needs followon administrative action",
to_slugs=['iesg_secretary'])
mt_factory(slug='group_closure_requested',
desc="Recipients for message requesting closure of a group",
to_slugs=['iesg_secretary'])
mt_factory(slug='doc_expires_soon',
desc="Recipients for notification of impending expiration of a document",
to_slugs=['doc_authors'],
cc_slugs=['doc_notify',
'doc_shepherd',
'doc_group_chairs',
'doc_group_responsible_directors',
],
)
mt_factory(slug='doc_expired',
desc="Recipients for notification of a document's expiration",
to_slugs=['doc_authors'],
cc_slugs=['doc_notify',
'doc_shepherd',
'doc_group_chairs',
'doc_group_responsible_directors',
],
)
mt_factory(slug='resurrection_requested',
desc="Recipients of a request to change the state of a draft away from 'Dead'",
to_slugs=['internet_draft_requests',])
mt_factory(slug='resurrection_completed',
desc="Recipients when a draft resurrection request has been completed",
to_slugs=['iesg_secretary',
'doc_ad',
])
mt_factory(slug='sub_manual_post_requested',
desc="Recipients for a manual post request for a draft submission",
to_slugs=['internet_draft_requests',],
cc_slugs=['submission_submitter',
'submission_authors',
'submission_group_chairs',
],
)
mt_factory(slug='sub_chair_approval_requested',
desc="Recipients for a message requesting group chair approval of "
"a draft submission",
to_slugs=['submission_group_chairs',])
mt_factory(slug='sub_confirmation_requested',
desc="Recipients for a message requesting confirmation of a draft submission",
to_slugs=['submission_confirmers',])
mt_factory(slug='sub_management_url_requested',
desc="Recipients for a message with the full URL for managing a draft submission",
to_slugs=['submission_confirmers',])
mt_factory(slug='sub_announced',
desc="Recipients for the announcement of a successfully submitted draft",
to_slugs=['ietf_announce',
],
cc_slugs=['submission_group_mail_list',
],
)
mt_factory(slug='sub_announced_to_authors',
desc="Recipients for the announcement to the authors of a successfully "
"submitted draft",
to_slugs=['submission_authors',
'submission_confirmers',
])
mt_factory(slug='sub_new_version',
desc="Recipients for notification of a new version of an existing document",
to_slugs=['doc_notify',
'doc_ad',
'doc_non_ietf_stream_manager',
'rfc_editor_if_doc_in_queue',
'doc_discussing_ads',
])
mt_factory(slug='group_personnel_change',
desc="Recipients for a message noting changes in a group's personnel",
to_slugs=['iesg_secretary',
'group_responsible_directors',
'group_chairs',
'group_changed_personnel',
])
mt_factory(slug='session_requested',
desc="Recipients for a normal meeting session request",
to_slugs=['session_requests', ],
cc_slugs=['group_mail_list',
'group_chairs',
'group_responsible_directors',
'logged_in_person',
],
)
mt_factory(slug='session_requested_long',
desc="Recipients for a meeting session request for more than 2 sessions",
to_slugs=['group_responsible_directors', ],
cc_slugs=['session_requests',
'group_chairs',
'logged_in_person',
],
)
mt_factory(slug='session_request_cancelled',
desc="Recipients for a message cancelling a session request",
to_slugs=['session_requests', ],
cc_slugs=['group_mail_list',
'group_chairs',
'group_responsible_directors',
'logged_in_person',
],
)
mt_factory(slug='session_request_not_meeting',
desc="Recipients for a message noting a group plans to not meet",
to_slugs=['session_requests', ],
cc_slugs=['group_mail_list',
'group_chairs',
'group_responsible_directors',
'logged_in_person',
],
)
mt_factory(slug='session_scheduled',
desc="Recipients for details when a session has been scheduled",
to_slugs=['session_requester',
'group_chairs',
],
cc_slugs=['group_mail_list',
'group_responsible_directors',
],
)
mt_factory(slug='ipr_disclosure_submitted',
desc="Recipients when an IPR disclosure is submitted",
to_slugs=['ipr_requests', ])
mt_factory(slug='ipr_disclosure_followup',
desc="Recipients when the secretary follows up on an IPR disclosure submission",
to_slugs=['ipr_submitter', ],)
mt_factory(slug='ipr_posting_confirmation',
desc="Recipients for a message confirming that a disclosure has been posted",
to_slugs=['ipr_submitter', ],
cc_slugs=['ipr_updatedipr_contacts',
'ipr_updatedipr_holders',
],
)
mt_factory(slug='ipr_posted_on_doc',
desc="Recipients when an IPR disclosure calls out a given document",
to_slugs=['doc_authors', ],
cc_slugs=['doc_ipr_group_or_ad',
'ipr_announce',
],
)
mt_factory(slug='liaison_statement_posted',
desc="Recipient for a message when a new liaison statement is posted",
to_slugs=['liaison_to_contacts', ],
cc_slugs=['liaison_cc',
'liaison_technical_contacts',
'liaison_response_contacts',
],
)
mt_factory(slug='liaison_approval_requested',
desc="Recipients for a message that a pending liaison statement needs approval",
to_slugs=['liaison_approvers',
])
mt_factory(slug='liaison_deadline_soon',
desc="Recipients for a message about a liaison statement deadline that is "
"approaching.",
to_slugs=['liaison_to_contacts',
],
cc_slugs=['liaison_cc',
'liaison_technical_contacts',
'liaison_response_contacts',
],
)
mt_factory(slug='liaison_manager_update_request',
desc="Recipients for a message requesting an updated list of authorized individuals",
to_slugs=['liaison_manager', ])
mt_factory(slug='nomination_received',
desc="Recipients for a message noting a new nomination has been received",
to_slugs=['nomcom_chair', ])
mt_factory(slug='nomination_receipt_requested',
desc="Recipients for a message confirming a nomination was made",
to_slugs=['nominator', ])
mt_factory(slug='nomcom_comment_receipt_requested',
desc="Recipients for a message confirming a comment was made",
to_slugs=['commenter', ])
mt_factory(slug='nomination_created_person',
desc="Recipients for a message noting that a nomination caused a "
"new Person record to be created in the datatracker",
to_slugs=['ietf_secretariat',
'nomcom_chair',
],
)
mt_factory(slug='nomination_new_nominee',
desc="Recipients the first time a person is nominated for a position, "
"asking them to accept or decline the nomination",
to_slugs=['nominee', ])
mt_factory(slug='nomination_accept_reminder',
desc="Recipeints of message reminding a nominee to accept or decline a nomination",
to_slugs=['nominee', ])
mt_factory(slug='nomcom_questionnaire',
desc="Recipients for the questionairre that nominees should complete",
to_slugs=['nominee', ])
mt_factory(slug='nomcom_questionnaire_reminder',
desc="Recipients for a message reminding a nominee to return a "
"completed questionairre response",
to_slugs=['nominee', ])
mt_factory(slug='doc_replacement_suggested',
desc="Recipients for suggestions that this doc replaces or is replace by "
"some other document",
to_slugs=['doc_group_chairs',
'doc_group_responsible_directors',
'doc_non_ietf_stream_manager',
'iesg_secretary',
])
mt_factory(slug='doc_adopted_by_group',
desc="Recipients for notification that a document has been adopted by a group",
to_slugs=['doc_authors',
'doc_group_chairs',
'doc_group_mail_list',
],
cc_slugs=['doc_ad',
'doc_shepherd',
'doc_notify',
],
)
mt_factory(slug='doc_added_comment',
desc="Recipients for a message when a new comment is manually entered into the document's history",
to_slugs=['doc_authors',
'doc_group_chairs',
'doc_shepherd',
'doc_group_responsible_directors',
'doc_non_ietf_stream_manager',
])
mt_factory(slug='doc_intended_status_changed',
desc="Recipients for a message when a document's intended "
"publication status changes",
to_slugs=['doc_authors',
'doc_group_chairs',
'doc_shepherd',
'doc_group_responsible_directors',
'doc_non_ietf_stream_manager',
])
mt_factory(slug='doc_iesg_processing_started',
desc="Recipients for a message when the IESG begins processing a document ",
to_slugs=['doc_authors',
'doc_ad',
'doc_shepherd',
'doc_group_chairs',
])
def forward(apps, schema_editor):
make_recipients(apps)
make_mailtriggers(apps)
def reverse(apps, schema_editor):
Recipient=apps.get_model('mailtrigger','Recipient')
MailTrigger=apps.get_model('mailtrigger','MailTrigger')
Recipient.objects.all().delete()
MailTrigger.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('mailtrigger', '0001_initial'),
]
operations = [
migrations.RunPython(forward, reverse)
]
|
"""Scrapy settings"""
from os.path import join, dirname
EXTENSIONS = {
'scrapy.contrib.logstats.LogStats': None,
'scrapy.webservice.WebService': None,
'scrapy.telnet.TelnetConsole': None,
'scrapy.contrib.throttle.AutoThrottle': None
}
LOG_LEVEL = 'DEBUG'
DATA_DIR = join(dirname(dirname(__file__)), 'data')
SPEC_DATA_DIR = join(DATA_DIR, 'projects')
HTTPCACHE_ENABLED = True
HTTPCACHE_DIR = join(DATA_DIR, 'cache')
|
import logging
from django.conf import settings
from django.db import models
from django.utils.translation import gettext
import amo.models
import amo.utils
from addons.models import Addon
from users.models import UserProfile
log = logging.getLogger('z.abuse')
class AbuseReport(amo.models.ModelBase):
# NULL if the reporter is anonymous.
reporter = models.ForeignKey(UserProfile, null=True,
blank=True, related_name='abuse_reported')
ip_address = models.CharField(max_length=255, default='0.0.0.0')
# An abuse report can be for an addon or a user. Only one of these should
# be null.
addon = models.ForeignKey(Addon, null=True, related_name='abuse_reports')
user = models.ForeignKey(UserProfile, null=True,
related_name='abuse_reports')
message = models.TextField()
class Meta:
db_table = 'abuse_reports'
def send(self):
obj = self.addon or self.user
if self.reporter:
user_name = '%s (%s)' % (self.reporter.name, self.reporter.email)
else:
user_name = 'An anonymous coward'
with amo.utils.no_translation():
type_ = (gettext(amo.ADDON_TYPE[self.addon.type])
if self.addon else 'User')
subject = u'[%s] Abuse Report for %s' % (type_, obj.name)
msg = u'%s reported abuse for %s (%s%s).\n\n%s' % (
user_name, obj.name, settings.SITE_URL, obj.get_url_path(),
self.message)
amo.utils.send_mail(subject, msg,
recipient_list=(settings.ABUSE_EMAIL,))
@classmethod
def recent_high_abuse_reports(cls, threshold, period, addon_id=None,
addon_type=None):
"""
Returns AbuseReport objects for the given threshold over the given time
period (in days). Filters by addon_id or addon_type if provided.
E.g. Greater than 5 abuse reports for all webapps in the past 7 days.
"""
abuse_sql = ['''
SELECT `abuse_reports`.*,
COUNT(`abuse_reports`.`addon_id`) AS `num_reports`
FROM `abuse_reports`
INNER JOIN `addons` ON (`abuse_reports`.`addon_id` = `addons`.`id`)
WHERE `abuse_reports`.`created` >= %s ''']
params = [period]
if addon_id:
abuse_sql.append('AND `addons`.`id` = %s ')
params.append(addon_id)
elif addon_type and addon_type in amo.ADDON_TYPES:
abuse_sql.append('AND `addons`.`addontype_id` = %s ')
params.append(addon_type)
abuse_sql.append('GROUP BY addon_id HAVING num_reports > %s')
params.append(threshold)
return list(cls.objects.raw(''.join(abuse_sql), params))
def send_abuse_report(request, obj, message):
report = AbuseReport(ip_address=request.META.get('REMOTE_ADDR'),
message=message)
if request.user.is_authenticated():
report.reporter = request.amo_user
if isinstance(obj, Addon):
report.addon = obj
elif isinstance(obj, UserProfile):
report.user = obj
report.save()
report.send()
|
from django.db.backends.postgresql_psycopg2.introspection import DatabaseIntrospection
from django.contrib.gis.gdal import OGRGeomType
class GeoIntrospectionError(Exception):
pass
class PostGISIntrospection(DatabaseIntrospection):
# Reverse dictionary for PostGIS geometry types not populated until
# introspection is actually performed.
postgis_types_reverse = {}
def get_postgis_types(self):
"""
Returns a dictionary with keys that are the PostgreSQL object
identification integers for the PostGIS geometry and/or
geography types (if supported).
"""
cursor = self.connection.cursor()
# The OID integers associated with the geometry type may
# be different across versions; hence, this is why we have
# to query the PostgreSQL pg_type table corresponding to the
# PostGIS custom data types.
oid_sql = 'SELECT "oid" FROM "pg_type" WHERE "typname" = %s'
try:
cursor.execute(oid_sql, ('geometry',))
GEOM_TYPE = cursor.fetchone()[0]
postgis_types = { GEOM_TYPE : 'GeometryField' }
if self.connection.ops.geography:
cursor.execute(oid_sql, ('geography',))
GEOG_TYPE = cursor.fetchone()[0]
# The value for the geography type is actually a tuple
# to pass in the `geography=True` keyword to the field
# definition.
postgis_types[GEOG_TYPE] = ('GeometryField', {'geography' : True})
finally:
cursor.close()
return postgis_types
def get_field_type(self, data_type, description):
if not self.postgis_types_reverse:
# If the PostGIS types reverse dictionary is not populated, do so
# now. In order to prevent unnecessary requests upon connection
# intialization, the `data_types_reverse` dictionary is not updated
# with the PostGIS custom types until introspection is actually
# performed -- in other words, when this function is called.
self.postgis_types_reverse = self.get_postgis_types()
self.data_types_reverse.update(self.postgis_types_reverse)
return super(PostGISIntrospection, self).get_field_type(data_type, description)
def get_geometry_type(self, table_name, geo_col):
"""
The geometry type OID used by PostGIS does not indicate the particular
type of field that a geometry column is (e.g., whether it's a
PointField or a PolygonField). Thus, this routine queries the PostGIS
metadata tables to determine the geometry type,
"""
cursor = self.connection.cursor()
try:
try:
# First seeing if this geometry column is in the `geometry_columns`
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geometry_columns" '
'WHERE "f_table_name"=%s AND "f_geometry_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row: raise GeoIntrospectionError
except GeoIntrospectionError:
if self.connection.ops.geography:
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geography_columns" '
'WHERE "f_table_name"=%s AND "f_geography_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise Exception('Could not find a geometry or geography column for "%s"."%s"' %
(table_name, geo_col))
# OGRGeomType does not require GDAL and makes it easy to convert
# from OGC geom type name to Django field.
field_type = OGRGeomType(row[2]).django
# Getting any GeometryField keyword arguments that are not the default.
dim = row[0]
srid = row[1]
field_params = {}
if srid != 4326:
field_params['srid'] = srid
if dim != 2:
field_params['dim'] = dim
finally:
cursor.close()
return field_type, field_params
|
import os
import base64
import shutil
import gettext
import unittest
from test import support
GNU_MO_DATA = b'''\
3hIElQAAAAAGAAAAHAAAAEwAAAALAAAAfAAAAAAAAACoAAAAFQAAAKkAAAAjAAAAvwAAAKEAAADj
AAAABwAAAIUBAAALAAAAjQEAAEUBAACZAQAAFgAAAN8CAAAeAAAA9gIAAKEAAAAVAwAABQAAALcD
AAAJAAAAvQMAAAEAAAADAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAABQAAAAYAAAACAAAAAFJh
eW1vbmQgTHV4dXJ5IFlhY2gtdABUaGVyZSBpcyAlcyBmaWxlAFRoZXJlIGFyZSAlcyBmaWxlcwBU
aGlzIG1vZHVsZSBwcm92aWRlcyBpbnRlcm5hdGlvbmFsaXphdGlvbiBhbmQgbG9jYWxpemF0aW9u
CnN1cHBvcnQgZm9yIHlvdXIgUHl0aG9uIHByb2dyYW1zIGJ5IHByb3ZpZGluZyBhbiBpbnRlcmZh
Y2UgdG8gdGhlIEdOVQpnZXR0ZXh0IG1lc3NhZ2UgY2F0YWxvZyBsaWJyYXJ5LgBtdWxsdXNrAG51
ZGdlIG51ZGdlAFByb2plY3QtSWQtVmVyc2lvbjogMi4wClBPLVJldmlzaW9uLURhdGU6IDIwMDAt
MDgtMjkgMTI6MTktMDQ6MDAKTGFzdC1UcmFuc2xhdG9yOiBKLiBEYXZpZCBJYsOhw7FleiA8ai1k
YXZpZEBub29zLmZyPgpMYW5ndWFnZS1UZWFtOiBYWCA8cHl0aG9uLWRldkBweXRob24ub3JnPgpN
SU1FLVZlcnNpb246IDEuMApDb250ZW50LVR5cGU6IHRleHQvcGxhaW47IGNoYXJzZXQ9aXNvLTg4
NTktMQpDb250ZW50LVRyYW5zZmVyLUVuY29kaW5nOiBub25lCkdlbmVyYXRlZC1CeTogcHlnZXR0
ZXh0LnB5IDEuMQpQbHVyYWwtRm9ybXM6IG5wbHVyYWxzPTI7IHBsdXJhbD1uIT0xOwoAVGhyb2F0
d29iYmxlciBNYW5ncm92ZQBIYXkgJXMgZmljaGVybwBIYXkgJXMgZmljaGVyb3MAR3V2ZiB6YnFo
eXIgY2ViaXZxcmYgdmFncmVhbmd2YmFueXZtbmd2YmEgbmFxIHlicG55dm1uZ3ZiYQpmaGNjYmVn
IHNiZSBsYmhlIENsZ3ViYSBjZWJ0ZW56ZiBvbCBjZWJpdnF2YXQgbmEgdmFncmVzbnByIGdiIGd1
ciBUQUgKdHJnZ3JrZyB6cmZmbnRyIHBuZ255YnQgeXZvZW5lbC4AYmFjb24Ad2luayB3aW5rAA==
'''
UMO_DATA = b'''\
3hIElQAAAAACAAAAHAAAACwAAAAFAAAAPAAAAAAAAABQAAAABAAAAFEAAAAPAQAAVgAAAAQAAABm
AQAAAQAAAAIAAAAAAAAAAAAAAAAAAAAAYWLDngBQcm9qZWN0LUlkLVZlcnNpb246IDIuMApQTy1S
ZXZpc2lvbi1EYXRlOiAyMDAzLTA0LTExIDEyOjQyLTA0MDAKTGFzdC1UcmFuc2xhdG9yOiBCYXJy
eSBBLiBXQXJzYXcgPGJhcnJ5QHB5dGhvbi5vcmc+Ckxhbmd1YWdlLVRlYW06IFhYIDxweXRob24t
ZGV2QHB5dGhvbi5vcmc+Ck1JTUUtVmVyc2lvbjogMS4wCkNvbnRlbnQtVHlwZTogdGV4dC9wbGFp
bjsgY2hhcnNldD11dGYtOApDb250ZW50LVRyYW5zZmVyLUVuY29kaW5nOiA3Yml0CkdlbmVyYXRl
ZC1CeTogbWFudWFsbHkKAMKkeXoA
'''
MMO_DATA = b'''\
3hIElQAAAAABAAAAHAAAACQAAAADAAAALAAAAAAAAAA4AAAAeAEAADkAAAABAAAAAAAAAAAAAAAA
UHJvamVjdC1JZC1WZXJzaW9uOiBObyBQcm9qZWN0IDAuMApQT1QtQ3JlYXRpb24tRGF0ZTogV2Vk
IERlYyAxMSAwNzo0NDoxNSAyMDAyClBPLVJldmlzaW9uLURhdGU6IDIwMDItMDgtMTQgMDE6MTg6
NTgrMDA6MDAKTGFzdC1UcmFuc2xhdG9yOiBKb2huIERvZSA8amRvZUBleGFtcGxlLmNvbT4KSmFu
ZSBGb29iYXIgPGpmb29iYXJAZXhhbXBsZS5jb20+Ckxhbmd1YWdlLVRlYW06IHh4IDx4eEBleGFt
cGxlLmNvbT4KTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UeXBlOiB0ZXh0L3BsYWluOyBjaGFy
c2V0PWlzby04ODU5LTE1CkNvbnRlbnQtVHJhbnNmZXItRW5jb2Rpbmc6IHF1b3RlZC1wcmludGFi
bGUKR2VuZXJhdGVkLUJ5OiBweWdldHRleHQucHkgMS4zCgA=
'''
LOCALEDIR = os.path.join('xx', 'LC_MESSAGES')
MOFILE = os.path.join(LOCALEDIR, 'gettext.mo')
UMOFILE = os.path.join(LOCALEDIR, 'ugettext.mo')
MMOFILE = os.path.join(LOCALEDIR, 'metadata.mo')
try:
LANG = os.environ['LANGUAGE']
except:
LANG = 'en'
class GettextBaseTest(unittest.TestCase):
def setUp(self):
if not os.path.isdir(LOCALEDIR):
os.makedirs(LOCALEDIR)
fp = open(MOFILE, 'wb')
fp.write(base64.decodestring(GNU_MO_DATA))
fp.close()
fp = open(UMOFILE, 'wb')
fp.write(base64.decodestring(UMO_DATA))
fp.close()
fp = open(MMOFILE, 'wb')
fp.write(base64.decodestring(MMO_DATA))
fp.close()
os.environ['LANGUAGE'] = 'xx'
def tearDown(self):
os.environ['LANGUAGE'] = LANG
shutil.rmtree(os.path.split(LOCALEDIR)[0])
class GettextTestCase1(GettextBaseTest):
def setUp(self):
GettextBaseTest.setUp(self)
self.localedir = os.curdir
self.mofile = MOFILE
gettext.install('gettext', self.localedir)
def test_some_translations(self):
eq = self.assertEqual
# test some translations
eq(_('albatross'), 'albatross')
eq(_('mullusk'), 'bacon')
eq(_(r'Raymond Luxury Yach-t'), 'Throatwobbler Mangrove')
eq(_(r'nudge nudge'), 'wink wink')
def test_double_quotes(self):
eq = self.assertEqual
# double quotes
eq(_("albatross"), 'albatross')
eq(_("mullusk"), 'bacon')
eq(_(r"Raymond Luxury Yach-t"), 'Throatwobbler Mangrove')
eq(_(r"nudge nudge"), 'wink wink')
def test_triple_single_quotes(self):
eq = self.assertEqual
# triple single quotes
eq(_('''albatross'''), 'albatross')
eq(_('''mullusk'''), 'bacon')
eq(_(r'''Raymond Luxury Yach-t'''), 'Throatwobbler Mangrove')
eq(_(r'''nudge nudge'''), 'wink wink')
def test_triple_double_quotes(self):
eq = self.assertEqual
# triple double quotes
eq(_("""albatross"""), 'albatross')
eq(_("""mullusk"""), 'bacon')
eq(_(r"""Raymond Luxury Yach-t"""), 'Throatwobbler Mangrove')
eq(_(r"""nudge nudge"""), 'wink wink')
def test_multiline_strings(self):
eq = self.assertEqual
# multiline strings
eq(_('''This module provides internationalization and localization
support for your Python programs by providing an interface to the GNU
gettext message catalog library.'''),
'''Guvf zbqhyr cebivqrf vagreangvbanyvmngvba naq ybpnyvmngvba
fhccbeg sbe lbhe Clguba cebtenzf ol cebivqvat na vagresnpr gb gur TAH
trggrkg zrffntr pngnybt yvoenel.''')
def test_the_alternative_interface(self):
eq = self.assertEqual
# test the alternative interface
fp = open(self.mofile, 'rb')
t = gettext.GNUTranslations(fp)
fp.close()
# Install the translation object
t.install()
eq(_('nudge nudge'), 'wink wink')
# Try unicode return type
t.install()
eq(_('mullusk'), 'bacon')
# Test installation of other methods
import builtins
t.install(names=["gettext", "lgettext"])
eq(_, t.gettext)
eq(builtins.gettext, t.gettext)
eq(lgettext, t.lgettext)
del builtins.gettext
del builtins.lgettext
class GettextTestCase2(GettextBaseTest):
def setUp(self):
GettextBaseTest.setUp(self)
self.localedir = os.curdir
# Set up the bindings
gettext.bindtextdomain('gettext', self.localedir)
gettext.textdomain('gettext')
# For convenience
self._ = gettext.gettext
def test_bindtextdomain(self):
self.assertEqual(gettext.bindtextdomain('gettext'), self.localedir)
def test_textdomain(self):
self.assertEqual(gettext.textdomain(), 'gettext')
def test_some_translations(self):
eq = self.assertEqual
# test some translations
eq(self._('albatross'), 'albatross')
eq(self._('mullusk'), 'bacon')
eq(self._(r'Raymond Luxury Yach-t'), 'Throatwobbler Mangrove')
eq(self._(r'nudge nudge'), 'wink wink')
def test_double_quotes(self):
eq = self.assertEqual
# double quotes
eq(self._("albatross"), 'albatross')
eq(self._("mullusk"), 'bacon')
eq(self._(r"Raymond Luxury Yach-t"), 'Throatwobbler Mangrove')
eq(self._(r"nudge nudge"), 'wink wink')
def test_triple_single_quotes(self):
eq = self.assertEqual
# triple single quotes
eq(self._('''albatross'''), 'albatross')
eq(self._('''mullusk'''), 'bacon')
eq(self._(r'''Raymond Luxury Yach-t'''), 'Throatwobbler Mangrove')
eq(self._(r'''nudge nudge'''), 'wink wink')
def test_triple_double_quotes(self):
eq = self.assertEqual
# triple double quotes
eq(self._("""albatross"""), 'albatross')
eq(self._("""mullusk"""), 'bacon')
eq(self._(r"""Raymond Luxury Yach-t"""), 'Throatwobbler Mangrove')
eq(self._(r"""nudge nudge"""), 'wink wink')
def test_multiline_strings(self):
eq = self.assertEqual
# multiline strings
eq(self._('''This module provides internationalization and localization
support for your Python programs by providing an interface to the GNU
gettext message catalog library.'''),
'''Guvf zbqhyr cebivqrf vagreangvbanyvmngvba naq ybpnyvmngvba
fhccbeg sbe lbhe Clguba cebtenzf ol cebivqvat na vagresnpr gb gur TAH
trggrkg zrffntr pngnybt yvoenel.''')
class PluralFormsTestCase(GettextBaseTest):
def setUp(self):
GettextBaseTest.setUp(self)
self.mofile = MOFILE
def test_plural_forms1(self):
eq = self.assertEqual
x = gettext.ngettext('There is %s file', 'There are %s files', 1)
eq(x, 'Hay %s fichero')
x = gettext.ngettext('There is %s file', 'There are %s files', 2)
eq(x, 'Hay %s ficheros')
def test_plural_forms2(self):
eq = self.assertEqual
fp = open(self.mofile, 'rb')
t = gettext.GNUTranslations(fp)
fp.close()
x = t.ngettext('There is %s file', 'There are %s files', 1)
eq(x, 'Hay %s fichero')
x = t.ngettext('There is %s file', 'There are %s files', 2)
eq(x, 'Hay %s ficheros')
def test_hu(self):
eq = self.assertEqual
f = gettext.c2py('0')
s = ''.join([ str(f(x)) for x in range(200) ])
eq(s, "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
def test_de(self):
eq = self.assertEqual
f = gettext.c2py('n != 1')
s = ''.join([ str(f(x)) for x in range(200) ])
eq(s, "10111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")
def test_fr(self):
eq = self.assertEqual
f = gettext.c2py('n>1')
s = ''.join([ str(f(x)) for x in range(200) ])
eq(s, "00111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")
def test_gd(self):
eq = self.assertEqual
f = gettext.c2py('n==1 ? 0 : n==2 ? 1 : 2')
s = ''.join([ str(f(x)) for x in range(200) ])
eq(s, "20122222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222")
def test_gd2(self):
eq = self.assertEqual
# Tests the combination of parentheses and "?:"
f = gettext.c2py('n==1 ? 0 : (n==2 ? 1 : 2)')
s = ''.join([ str(f(x)) for x in range(200) ])
eq(s, "20122222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222")
def test_lt(self):
eq = self.assertEqual
f = gettext.c2py('n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2')
s = ''.join([ str(f(x)) for x in range(200) ])
eq(s, "20111111112222222222201111111120111111112011111111201111111120111111112011111111201111111120111111112011111111222222222220111111112011111111201111111120111111112011111111201111111120111111112011111111")
def test_ru(self):
eq = self.assertEqual
f = gettext.c2py('n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2')
s = ''.join([ str(f(x)) for x in range(200) ])
eq(s, "20111222222222222222201112222220111222222011122222201112222220111222222011122222201112222220111222222011122222222222222220111222222011122222201112222220111222222011122222201112222220111222222011122222")
def test_pl(self):
eq = self.assertEqual
f = gettext.c2py('n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2')
s = ''.join([ str(f(x)) for x in range(200) ])
eq(s, "20111222222222222222221112222222111222222211122222221112222222111222222211122222221112222222111222222211122222222222222222111222222211122222221112222222111222222211122222221112222222111222222211122222")
def test_sl(self):
eq = self.assertEqual
f = gettext.c2py('n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3')
s = ''.join([ str(f(x)) for x in range(200) ])
eq(s, "30122333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333012233333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333")
def test_security(self):
raises = self.assertRaises
# Test for a dangerous expression
raises(ValueError, gettext.c2py, "os.chmod('/etc/passwd',0777)")
class UnicodeTranslationsTest(GettextBaseTest):
def setUp(self):
GettextBaseTest.setUp(self)
fp = open(UMOFILE, 'rb')
try:
self.t = gettext.GNUTranslations(fp)
finally:
fp.close()
self._ = self.t.gettext
def test_unicode_msgid(self):
unless = self.failUnless
unless(isinstance(self._(''), str))
unless(isinstance(self._(''), str))
def test_unicode_msgstr(self):
eq = self.assertEqual
eq(self._('ab\xde'), '\xa4yz')
class WeirdMetadataTest(GettextBaseTest):
def setUp(self):
GettextBaseTest.setUp(self)
fp = open(MMOFILE, 'rb')
try:
try:
self.t = gettext.GNUTranslations(fp)
except:
self.tearDown()
raise
finally:
fp.close()
def test_weird_metadata(self):
info = self.t.info()
self.assertEqual(len(info), 9)
self.assertEqual(info['last-translator'],
'John Doe <jdoe@example.com>\nJane Foobar <jfoobar@example.com>')
def test_main():
support.run_unittest(__name__)
if __name__ == '__main__':
test_main()
'''
msgid ""
msgstr ""
"Project-Id-Version: 2.0\n"
"PO-Revision-Date: 2003-04-11 14:32-0400\n"
"Last-Translator: J. David Ibanez <j-david@noos.fr>\n"
"Language-Team: XX <python-dev@python.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=iso-8859-1\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: pygettext.py 1.1\n"
"Plural-Forms: nplurals=2; plural=n!=1;\n"
msgid "nudge nudge"
msgstr "wink wink"
msgid "albatross"
msgstr ""
msgid "Raymond Luxury Yach-t"
msgstr "Throatwobbler Mangrove"
msgid "mullusk"
msgstr "bacon"
msgid ""
"This module provides internationalization and localization\n"
"support for your Python programs by providing an interface to the GNU\n"
"gettext message catalog library."
msgstr ""
"Guvf zbqhyr cebivqrf vagreangvbanyvmngvba naq ybpnyvmngvba\n"
"fhccbeg sbe lbhe Clguba cebtenzf ol cebivqvat na vagresnpr gb gur TAH\n"
"trggrkg zrffntr pngnybt yvoenel."
msgid "There is %s file"
msgid_plural "There are %s files"
msgstr[0] "Hay %s fichero"
msgstr[1] "Hay %s ficheros"
'''
'''
msgid ""
msgstr ""
"Project-Id-Version: 2.0\n"
"PO-Revision-Date: 2003-04-11 12:42-0400\n"
"Last-Translator: Barry A. WArsaw <barry@python.org>\n"
"Language-Team: XX <python-dev@python.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 7bit\n"
"Generated-By: manually\n"
msgid "ab\xc3\x9e"
msgstr "\xc2\xa4yz"
'''
'''
msgid ""
msgstr ""
"Project-Id-Version: No Project 0.0\n"
"POT-Creation-Date: Wed Dec 11 07:44:15 2002\n"
"PO-Revision-Date: 2002-08-14 01:18:58+00:00\n"
"Last-Translator: John Doe <jdoe@example.com>\n"
"Jane Foobar <jfoobar@example.com>\n"
"Language-Team: xx <xx@example.com>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=iso-8859-15\n"
"Content-Transfer-Encoding: quoted-printable\n"
"Generated-By: pygettext.py 1.3\n"
'''
|
"""warcextract - dump warc record context to standard out"""
import os
import sys
import sys
import os.path
from optparse import OptionParser
from contextlib import closing
from .warctools import WarcRecord
parser = OptionParser(usage="%prog [options] warc offset")
parser.add_option("-I", "--input", dest="input_format")
parser.add_option("-L", "--log-level", dest="log_level")
parser.set_defaults(output_directory=None, limit=None, log_level="info")
def main(argv):
(options, args) = parser.parse_args(args=argv[1:])
out = sys.stdout
if len(args) < 1:
# dump the first record on stdin
with closing(WarcRecord.open_archive(file_handle=sys.stdin, gzip=None)) as fh:
dump_record(fh)
else:
# dump a record from the filename, with optional offset
filename = args[0]
if len(args) > 1:
offset = int(args[1])
else:
offset = 0
with closing(WarcRecord.open_archive(filename=filename, gzip="auto")) as fh:
fh.seek(offset)
dump_record(fh)
return 0
def dump_record(fh):
for (offset, record, errors) in fh.read_records(limit=1, offsets=False):
if record:
sys.stdout.write(record.content[1])
elif errors:
print >> sys.stderr, "warc errors at %s:%d"%(name, offset if offset else 0)
for e in errors:
print '\t', e
break # only use one (I'm terrible)
def run():
sys.exit(main(sys.argv))
if __name__ == '__main__':
run()
|
from __future__ import print_function
DEBUG = 0
import copy, re
from struct import unpack
from .timemachine import *
from .biffh import BaseObject, unpack_unicode, unpack_string, \
upkbits, upkbitsL, fprintf, \
FUN, FDT, FNU, FGE, FTX, XL_CELL_NUMBER, XL_CELL_DATE, \
XL_FORMAT, XL_FORMAT2, \
XLRDError
excel_default_palette_b5 = (
( 0, 0, 0), (255, 255, 255), (255, 0, 0), ( 0, 255, 0),
( 0, 0, 255), (255, 255, 0), (255, 0, 255), ( 0, 255, 255),
(128, 0, 0), ( 0, 128, 0), ( 0, 0, 128), (128, 128, 0),
(128, 0, 128), ( 0, 128, 128), (192, 192, 192), (128, 128, 128),
(153, 153, 255), (153, 51, 102), (255, 255, 204), (204, 255, 255),
(102, 0, 102), (255, 128, 128), ( 0, 102, 204), (204, 204, 255),
( 0, 0, 128), (255, 0, 255), (255, 255, 0), ( 0, 255, 255),
(128, 0, 128), (128, 0, 0), ( 0, 128, 128), ( 0, 0, 255),
( 0, 204, 255), (204, 255, 255), (204, 255, 204), (255, 255, 153),
(153, 204, 255), (255, 153, 204), (204, 153, 255), (227, 227, 227),
( 51, 102, 255), ( 51, 204, 204), (153, 204, 0), (255, 204, 0),
(255, 153, 0), (255, 102, 0), (102, 102, 153), (150, 150, 150),
( 0, 51, 102), ( 51, 153, 102), ( 0, 51, 0), ( 51, 51, 0),
(153, 51, 0), (153, 51, 102), ( 51, 51, 153), ( 51, 51, 51),
)
excel_default_palette_b2 = excel_default_palette_b5[:16]
excel_default_palette_b8 = ( # (red, green, blue)
( 0, 0, 0), (255,255,255), (255, 0, 0), ( 0,255, 0), # 0
( 0, 0,255), (255,255, 0), (255, 0,255), ( 0,255,255), # 4
(128, 0, 0), ( 0,128, 0), ( 0, 0,128), (128,128, 0), # 8
(128, 0,128), ( 0,128,128), (192,192,192), (128,128,128), # 12
(153,153,255), (153, 51,102), (255,255,204), (204,255,255), # 16
(102, 0,102), (255,128,128), ( 0,102,204), (204,204,255), # 20
( 0, 0,128), (255, 0,255), (255,255, 0), ( 0,255,255), # 24
(128, 0,128), (128, 0, 0), ( 0,128,128), ( 0, 0,255), # 28
( 0,204,255), (204,255,255), (204,255,204), (255,255,153), # 32
(153,204,255), (255,153,204), (204,153,255), (255,204,153), # 36
( 51,102,255), ( 51,204,204), (153,204, 0), (255,204, 0), # 40
(255,153, 0), (255,102, 0), (102,102,153), (150,150,150), # 44
( 0, 51,102), ( 51,153,102), ( 0, 51, 0), ( 51, 51, 0), # 48
(153, 51, 0), (153, 51,102), ( 51, 51,153), ( 51, 51, 51), # 52
)
default_palette = {
80: excel_default_palette_b8,
70: excel_default_palette_b5,
50: excel_default_palette_b5,
45: excel_default_palette_b2,
40: excel_default_palette_b2,
30: excel_default_palette_b2,
21: excel_default_palette_b2,
20: excel_default_palette_b2,
}
"""
00H = Normal
01H = RowLevel_lv (see next field)
02H = ColLevel_lv (see next field)
03H = Comma
04H = Currency
05H = Percent
06H = Comma [0] (BIFF4-BIFF8)
07H = Currency [0] (BIFF4-BIFF8)
08H = Hyperlink (BIFF8)
09H = Followed Hyperlink (BIFF8)
"""
built_in_style_names = [
"Normal",
"RowLevel_",
"ColLevel_",
"Comma",
"Currency",
"Percent",
"Comma [0]",
"Currency [0]",
"Hyperlink",
"Followed Hyperlink",
]
def initialise_colour_map(book):
book.colour_map = {}
book.colour_indexes_used = {}
if not book.formatting_info:
return
# Add the 8 invariant colours
for i in xrange(8):
book.colour_map[i] = excel_default_palette_b8[i]
# Add the default palette depending on the version
dpal = default_palette[book.biff_version]
ndpal = len(dpal)
for i in xrange(ndpal):
book.colour_map[i+8] = dpal[i]
# Add the specials -- None means the RGB value is not known
# System window text colour for border lines
book.colour_map[ndpal+8] = None
# System window background colour for pattern background
book.colour_map[ndpal+8+1] = None #
for ci in (
0x51, # System ToolTip text colour (used in note objects)
0x7FFF, # 32767, system window text colour for fonts
):
book.colour_map[ci] = None
def nearest_colour_index(colour_map, rgb, debug=0):
# General purpose function. Uses Euclidean distance.
# So far used only for pre-BIFF8 WINDOW2 record.
# Doesn't have to be fast.
# Doesn't have to be fancy.
best_metric = 3 * 256 * 256
best_colourx = 0
for colourx, cand_rgb in colour_map.items():
if cand_rgb is None:
continue
metric = 0
for v1, v2 in zip(rgb, cand_rgb):
metric += (v1 - v2) * (v1 - v2)
if metric < best_metric:
best_metric = metric
best_colourx = colourx
if metric == 0:
break
if 0 and debug:
print("nearest_colour_index for %r is %r -> %r; best_metric is %d" \
% (rgb, best_colourx, colour_map[best_colourx], best_metric))
return best_colourx
class EqNeAttrs(object):
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return self.__dict__ != other.__dict__
class Font(BaseObject, EqNeAttrs):
##
# 1 = Characters are bold. Redundant; see "weight" attribute.
bold = 0
##
# Values: 0 = ANSI Latin, 1 = System default, 2 = Symbol,
# 77 = Apple Roman,
# 128 = ANSI Japanese Shift-JIS,
# 129 = ANSI Korean (Hangul),
# 130 = ANSI Korean (Johab),
# 134 = ANSI Chinese Simplified GBK,
# 136 = ANSI Chinese Traditional BIG5,
# 161 = ANSI Greek,
# 162 = ANSI Turkish,
# 163 = ANSI Vietnamese,
# 177 = ANSI Hebrew,
# 178 = ANSI Arabic,
# 186 = ANSI Baltic,
# 204 = ANSI Cyrillic,
# 222 = ANSI Thai,
# 238 = ANSI Latin II (Central European),
# 255 = OEM Latin I
character_set = 0
##
# An explanation of "colour index" is given in the Formatting
# section at the start of this document.
colour_index = 0
##
# 1 = Superscript, 2 = Subscript.
escapement = 0
##
# 0 = None (unknown or don't care)<br />
# 1 = Roman (variable width, serifed)<br />
# 2 = Swiss (variable width, sans-serifed)<br />
# 3 = Modern (fixed width, serifed or sans-serifed)<br />
# 4 = Script (cursive)<br />
# 5 = Decorative (specialised, for example Old English, Fraktur)
family = 0
##
# The 0-based index used to refer to this Font() instance.
# Note that index 4 is never used; xlrd supplies a dummy place-holder.
font_index = 0
##
# Height of the font (in twips). A twip = 1/20 of a point.
height = 0
##
# 1 = Characters are italic.
italic = 0
##
# The name of the font. Example: u"Arial"
name = UNICODE_LITERAL("")
##
# 1 = Characters are struck out.
struck_out = 0
##
# 0 = None<br />
# 1 = Single; 0x21 (33) = Single accounting<br />
# 2 = Double; 0x22 (34) = Double accounting
underline_type = 0
##
# 1 = Characters are underlined. Redundant; see "underline_type" attribute.
underlined = 0
##
# Font weight (100-1000). Standard values are 400 for normal text
# and 700 for bold text.
weight = 400
##
# 1 = Font is outline style (Macintosh only)
outline = 0
##
# 1 = Font is shadow style (Macintosh only)
shadow = 0
# No methods ...
def handle_efont(book, data): # BIFF2 only
if not book.formatting_info:
return
book.font_list[-1].colour_index = unpack('<H', data)[0]
def handle_font(book, data):
if not book.formatting_info:
return
if not book.encoding:
book.derive_encoding()
blah = DEBUG or book.verbosity >= 2
bv = book.biff_version
k = len(book.font_list)
if k == 4:
f = Font()
f.name = UNICODE_LITERAL('Dummy Font')
f.font_index = k
book.font_list.append(f)
k += 1
f = Font()
f.font_index = k
book.font_list.append(f)
if bv >= 50:
(
f.height, option_flags, f.colour_index, f.weight,
f.escapement_type, f.underline_type, f.family,
f.character_set,
) = unpack('<HHHHHBBB', data[0:13])
f.bold = option_flags & 1
f.italic = (option_flags & 2) >> 1
f.underlined = (option_flags & 4) >> 2
f.struck_out = (option_flags & 8) >> 3
f.outline = (option_flags & 16) >> 4
f.shadow = (option_flags & 32) >> 5
if bv >= 80:
f.name = unpack_unicode(data, 14, lenlen=1)
else:
f.name = unpack_string(data, 14, book.encoding, lenlen=1)
elif bv >= 30:
f.height, option_flags, f.colour_index = unpack('<HHH', data[0:6])
f.bold = option_flags & 1
f.italic = (option_flags & 2) >> 1
f.underlined = (option_flags & 4) >> 2
f.struck_out = (option_flags & 8) >> 3
f.outline = (option_flags & 16) >> 4
f.shadow = (option_flags & 32) >> 5
f.name = unpack_string(data, 6, book.encoding, lenlen=1)
# Now cook up the remaining attributes ...
f.weight = [400, 700][f.bold]
f.escapement_type = 0 # None
f.underline_type = f.underlined # None or Single
f.family = 0 # Unknown / don't care
f.character_set = 1 # System default (0 means "ANSI Latin")
else: # BIFF2
f.height, option_flags = unpack('<HH', data[0:4])
f.colour_index = 0x7FFF # "system window text colour"
f.bold = option_flags & 1
f.italic = (option_flags & 2) >> 1
f.underlined = (option_flags & 4) >> 2
f.struck_out = (option_flags & 8) >> 3
f.outline = 0
f.shadow = 0
f.name = unpack_string(data, 4, book.encoding, lenlen=1)
# Now cook up the remaining attributes ...
f.weight = [400, 700][f.bold]
f.escapement_type = 0 # None
f.underline_type = f.underlined # None or Single
f.family = 0 # Unknown / don't care
f.character_set = 1 # System default (0 means "ANSI Latin")
if blah:
f.dump(
book.logfile,
header="--- handle_font: font[%d] ---" % f.font_index,
footer="-------------------",
)
class Format(BaseObject, EqNeAttrs):
##
# The key into Book.format_map
format_key = 0
##
# A classification that has been inferred from the format string.
# Currently, this is used only to distinguish between numbers and dates.
# <br />Values:
# <br />FUN = 0 # unknown
# <br />FDT = 1 # date
# <br />FNU = 2 # number
# <br />FGE = 3 # general
# <br />FTX = 4 # text
type = FUN
##
# The format string
format_str = UNICODE_LITERAL('')
def __init__(self, format_key, ty, format_str):
self.format_key = format_key
self.type = ty
self.format_str = format_str
std_format_strings = {
# "std" == "standard for US English locale"
# #### TODO ... a lot of work to tailor these to the user's locale.
# See e.g. gnumeric-1.x.y/src/formats.c
0x00: "General",
0x01: "0",
0x02: "0.00",
0x03: "#,##0",
0x04: "#,##0.00",
0x05: "$#,##0_);($#,##0)",
0x06: "$#,##0_);[Red]($#,##0)",
0x07: "$#,##0.00_);($#,##0.00)",
0x08: "$#,##0.00_);[Red]($#,##0.00)",
0x09: "0%",
0x0a: "0.00%",
0x0b: "0.00E+00",
0x0c: "# ?/?",
0x0d: "# ??/??",
0x0e: "m/d/yy",
0x0f: "d-mmm-yy",
0x10: "d-mmm",
0x11: "mmm-yy",
0x12: "h:mm AM/PM",
0x13: "h:mm:ss AM/PM",
0x14: "h:mm",
0x15: "h:mm:ss",
0x16: "m/d/yy h:mm",
0x25: "#,##0_);(#,##0)",
0x26: "#,##0_);[Red](#,##0)",
0x27: "#,##0.00_);(#,##0.00)",
0x28: "#,##0.00_);[Red](#,##0.00)",
0x29: "_(* #,##0_);_(* (#,##0);_(* \"-\"_);_(@_)",
0x2a: "_($* #,##0_);_($* (#,##0);_($* \"-\"_);_(@_)",
0x2b: "_(* #,##0.00_);_(* (#,##0.00);_(* \"-\"??_);_(@_)",
0x2c: "_($* #,##0.00_);_($* (#,##0.00);_($* \"-\"??_);_(@_)",
0x2d: "mm:ss",
0x2e: "[h]:mm:ss",
0x2f: "mm:ss.0",
0x30: "##0.0E+0",
0x31: "@",
}
fmt_code_ranges = [ # both-inclusive ranges of "standard" format codes
# Source: the openoffice.org doc't
# and the OOXML spec Part 4, section 3.8.30
( 0, 0, FGE),
( 1, 13, FNU),
(14, 22, FDT),
(27, 36, FDT), # CJK date formats
(37, 44, FNU),
(45, 47, FDT),
(48, 48, FNU),
(49, 49, FTX),
# Gnumeric assumes (or assumed) that built-in formats finish at 49, not at 163
(50, 58, FDT), # CJK date formats
(59, 62, FNU), # Thai number (currency?) formats
(67, 70, FNU), # Thai number (currency?) formats
(71, 81, FDT), # Thai date formats
]
std_format_code_types = {}
for lo, hi, ty in fmt_code_ranges:
for x in xrange(lo, hi+1):
std_format_code_types[x] = ty
del lo, hi, ty, x
date_chars = UNICODE_LITERAL('ymdhs') # year, month/minute, day, hour, second
date_char_dict = {}
for _c in date_chars + date_chars.upper():
date_char_dict[_c] = 5
del _c, date_chars
skip_char_dict = {}
for _c in UNICODE_LITERAL('$-+/(): '):
skip_char_dict[_c] = 1
num_char_dict = {
UNICODE_LITERAL('0'): 5,
UNICODE_LITERAL('#'): 5,
UNICODE_LITERAL('?'): 5,
}
non_date_formats = {
UNICODE_LITERAL('0.00E+00'):1,
UNICODE_LITERAL('##0.0E+0'):1,
UNICODE_LITERAL('General') :1,
UNICODE_LITERAL('GENERAL') :1, # OOo Calc 1.1.4 does this.
UNICODE_LITERAL('general') :1, # pyExcelerator 0.6.3 does this.
UNICODE_LITERAL('@') :1,
}
fmt_bracketed_sub = re.compile(r'\[[^]]*\]').sub
def is_date_format_string(book, fmt):
# Heuristics:
# Ignore "text" and [stuff in square brackets (aarrgghh -- see below)].
# Handle backslashed-escaped chars properly.
# E.g. hh\hmm\mss\s should produce a display like 23h59m59s
# Date formats have one or more of ymdhs (caseless) in them.
# Numeric formats have # and 0.
# N.B. u'General"."' hence get rid of "text" first.
# TODO: Find where formats are interpreted in Gnumeric
# TODO: u'[h]\\ \\h\\o\\u\\r\\s' ([h] means don't care about hours > 23)
state = 0
s = ''
for c in fmt:
if state == 0:
if c == UNICODE_LITERAL('"'):
state = 1
elif c in UNICODE_LITERAL(r"\_*"):
state = 2
elif c in skip_char_dict:
pass
else:
s += c
elif state == 1:
if c == UNICODE_LITERAL('"'):
state = 0
elif state == 2:
# Ignore char after backslash, underscore or asterisk
state = 0
assert 0 <= state <= 2
if book.verbosity >= 4:
print("is_date_format_string: reduced format is %r" % s, file=book.logfile)
s = fmt_bracketed_sub('', s)
if s in non_date_formats:
return False
state = 0
separator = ";"
got_sep = 0
date_count = num_count = 0
for c in s:
if c in date_char_dict:
date_count += date_char_dict[c]
elif c in num_char_dict:
num_count += num_char_dict[c]
elif c == separator:
got_sep = 1
# print num_count, date_count, repr(fmt)
if date_count and not num_count:
return True
if num_count and not date_count:
return False
if date_count:
if book.verbosity:
fprintf(book.logfile,
'WARNING *** is_date_format: ambiguous d=%d n=%d fmt=%r\n',
date_count, num_count, fmt)
elif not got_sep:
if book.verbosity:
fprintf(book.logfile,
"WARNING *** format %r produces constant result\n",
fmt)
return date_count > num_count
def handle_format(self, data, rectype=XL_FORMAT):
DEBUG = 0
bv = self.biff_version
if rectype == XL_FORMAT2:
bv = min(bv, 30)
if not self.encoding:
self.derive_encoding()
strpos = 2
if bv >= 50:
fmtkey = unpack('<H', data[0:2])[0]
else:
fmtkey = self.actualfmtcount
if bv <= 30:
strpos = 0
self.actualfmtcount += 1
if bv >= 80:
unistrg = unpack_unicode(data, 2)
else:
unistrg = unpack_string(data, strpos, self.encoding, lenlen=1)
blah = DEBUG or self.verbosity >= 3
if blah:
fprintf(self.logfile,
"FORMAT: count=%d fmtkey=0x%04x (%d) s=%r\n",
self.actualfmtcount, fmtkey, fmtkey, unistrg)
is_date_s = self.is_date_format_string(unistrg)
ty = [FGE, FDT][is_date_s]
if not(fmtkey > 163 or bv < 50):
# user_defined if fmtkey > 163
# N.B. Gnumeric incorrectly starts these at 50 instead of 164 :-(
# if earlier than BIFF 5, standard info is useless
std_ty = std_format_code_types.get(fmtkey, FUN)
# print "std ty", std_ty
is_date_c = std_ty == FDT
if self.verbosity and 0 < fmtkey < 50 and (is_date_c ^ is_date_s):
DEBUG = 2
fprintf(self.logfile,
"WARNING *** Conflict between "
"std format key %d and its format string %r\n",
fmtkey, unistrg)
if DEBUG == 2:
fprintf(self.logfile,
"ty: %d; is_date_c: %r; is_date_s: %r; fmt_strg: %r",
ty, is_date_c, is_date_s, unistrg)
fmtobj = Format(fmtkey, ty, unistrg)
if blah:
fmtobj.dump(self.logfile,
header="--- handle_format [%d] ---" % (self.actualfmtcount-1, ))
self.format_map[fmtkey] = fmtobj
self.format_list.append(fmtobj)
def handle_palette(book, data):
if not book.formatting_info:
return
blah = DEBUG or book.verbosity >= 2
n_colours, = unpack('<H', data[:2])
expected_n_colours = (16, 56)[book.biff_version >= 50]
if ((DEBUG or book.verbosity >= 1)
and n_colours != expected_n_colours):
fprintf(book.logfile,
"NOTE *** Expected %d colours in PALETTE record, found %d\n",
expected_n_colours, n_colours)
elif blah:
fprintf(book.logfile,
"PALETTE record with %d colours\n", n_colours)
fmt = '<xx%di' % n_colours # use i to avoid long integers
expected_size = 4 * n_colours + 2
actual_size = len(data)
tolerance = 4
if not expected_size <= actual_size <= expected_size + tolerance:
raise XLRDError('PALETTE record: expected size %d, actual size %d' % (expected_size, actual_size))
colours = unpack(fmt, data[:expected_size])
assert book.palette_record == [] # There should be only 1 PALETTE record
# a colour will be 0xbbggrr
# IOW, red is at the little end
for i in xrange(n_colours):
c = colours[i]
red = c & 0xff
green = (c >> 8) & 0xff
blue = (c >> 16) & 0xff
old_rgb = book.colour_map[8+i]
new_rgb = (red, green, blue)
book.palette_record.append(new_rgb)
book.colour_map[8+i] = new_rgb
if blah:
if new_rgb != old_rgb:
print("%2d: %r -> %r" % (i, old_rgb, new_rgb), file=book.logfile)
def palette_epilogue(book):
# Check colour indexes in fonts etc.
# This must be done here as FONT records
# come *before* the PALETTE record :-(
for font in book.font_list:
if font.font_index == 4: # the missing font record
continue
cx = font.colour_index
if cx == 0x7fff: # system window text colour
continue
if cx in book.colour_map:
book.colour_indexes_used[cx] = 1
elif book.verbosity:
print("Size of colour table:", len(book.colour_map), file=book.logfile)
print("*** Font #%d (%r): colour index 0x%04x is unknown" \
% (font.font_index, font.name, cx), file=book.logfile)
if book.verbosity >= 1:
used = sorted(book.colour_indexes_used.keys())
print("\nColour indexes used:\n%r\n" % used, file=book.logfile)
def handle_style(book, data):
if not book.formatting_info:
return
blah = DEBUG or book.verbosity >= 2
bv = book.biff_version
flag_and_xfx, built_in_id, level = unpack('<HBB', data[:4])
xf_index = flag_and_xfx & 0x0fff
if (data == b"\0\0\0\0"
and "Normal" not in book.style_name_map):
# Erroneous record (doesn't have built-in bit set).
# Example file supplied by Jeff Bell.
built_in = 1
built_in_id = 0
xf_index = 0
name = "Normal"
level = 255
elif flag_and_xfx & 0x8000:
# built-in style
built_in = 1
name = built_in_style_names[built_in_id]
if 1 <= built_in_id <= 2:
name += str(level + 1)
else:
# user-defined style
built_in = 0
built_in_id = 0
level = 0
if bv >= 80:
try:
name = unpack_unicode(data, 2, lenlen=2)
except UnicodeDecodeError:
print("STYLE: built_in=%d xf_index=%d built_in_id=%d level=%d" \
% (built_in, xf_index, built_in_id, level), file=book.logfile)
print("raw bytes:", repr(data[2:]), file=book.logfile)
raise
else:
name = unpack_string(data, 2, book.encoding, lenlen=1)
if blah and not name:
print("WARNING *** A user-defined style has a zero-length name", file=book.logfile)
book.style_name_map[name] = (built_in, xf_index)
if blah:
print("STYLE: built_in=%d xf_index=%d built_in_id=%d level=%d name=%r" \
% (built_in, xf_index, built_in_id, level, name), file=book.logfile)
def check_colour_indexes_in_obj(book, obj, orig_index):
alist = sorted(obj.__dict__.items())
for attr, nobj in alist:
if hasattr(nobj, 'dump'):
check_colour_indexes_in_obj(book, nobj, orig_index)
elif attr.find('colour_index') >= 0:
if nobj in book.colour_map:
book.colour_indexes_used[nobj] = 1
continue
oname = obj.__class__.__name__
print("*** xf #%d : %s.%s = 0x%04x (unknown)" \
% (orig_index, oname, attr, nobj), file=book.logfile)
def fill_in_standard_formats(book):
for x in std_format_code_types.keys():
if x not in book.format_map:
ty = std_format_code_types[x]
# Note: many standard format codes (mostly CJK date formats) have
# format strings that vary by locale; xlrd does not (yet)
# handle those; the type (date or numeric) is recorded but the fmt_str will be None.
fmt_str = std_format_strings.get(x)
fmtobj = Format(x, ty, fmt_str)
book.format_map[x] = fmtobj
def handle_xf(self, data):
### self is a Book instance
# DEBUG = 0
blah = DEBUG or self.verbosity >= 3
bv = self.biff_version
xf = XF()
xf.alignment = XFAlignment()
xf.alignment.indent_level = 0
xf.alignment.shrink_to_fit = 0
xf.alignment.text_direction = 0
xf.border = XFBorder()
xf.border.diag_up = 0
xf.border.diag_down = 0
xf.border.diag_colour_index = 0
xf.border.diag_line_style = 0 # no line
xf.background = XFBackground()
xf.protection = XFProtection()
# fill in the known standard formats
if bv >= 50 and not self.xfcount:
# i.e. do this once before we process the first XF record
fill_in_standard_formats(self)
if bv >= 80:
unpack_fmt = '<HHHBBBBIiH'
(xf.font_index, xf.format_key, pkd_type_par,
pkd_align1, xf.alignment.rotation, pkd_align2,
pkd_used, pkd_brdbkg1, pkd_brdbkg2, pkd_brdbkg3,
) = unpack(unpack_fmt, data[0:20])
upkbits(xf.protection, pkd_type_par, (
(0, 0x01, 'cell_locked'),
(1, 0x02, 'formula_hidden'),
))
upkbits(xf, pkd_type_par, (
(2, 0x0004, 'is_style'),
# Following is not in OOo docs, but is mentioned
# in Gnumeric source and also in (deep breath)
# org.apache.poi.hssf.record.ExtendedFormatRecord.java
(3, 0x0008, 'lotus_123_prefix'), # Meaning is not known.
(4, 0xFFF0, 'parent_style_index'),
))
upkbits(xf.alignment, pkd_align1, (
(0, 0x07, 'hor_align'),
(3, 0x08, 'text_wrapped'),
(4, 0x70, 'vert_align'),
))
upkbits(xf.alignment, pkd_align2, (
(0, 0x0f, 'indent_level'),
(4, 0x10, 'shrink_to_fit'),
(6, 0xC0, 'text_direction'),
))
reg = pkd_used >> 2
for attr_stem in \
"format font alignment border background protection".split():
attr = "_" + attr_stem + "_flag"
setattr(xf, attr, reg & 1)
reg >>= 1
upkbitsL(xf.border, pkd_brdbkg1, (
(0, 0x0000000f, 'left_line_style'),
(4, 0x000000f0, 'right_line_style'),
(8, 0x00000f00, 'top_line_style'),
(12, 0x0000f000, 'bottom_line_style'),
(16, 0x007f0000, 'left_colour_index'),
(23, 0x3f800000, 'right_colour_index'),
(30, 0x40000000, 'diag_down'),
(31, 0x80000000, 'diag_up'),
))
upkbits(xf.border, pkd_brdbkg2, (
(0, 0x0000007F, 'top_colour_index'),
(7, 0x00003F80, 'bottom_colour_index'),
(14, 0x001FC000, 'diag_colour_index'),
(21, 0x01E00000, 'diag_line_style'),
))
upkbitsL(xf.background, pkd_brdbkg2, (
(26, 0xFC000000, 'fill_pattern'),
))
upkbits(xf.background, pkd_brdbkg3, (
(0, 0x007F, 'pattern_colour_index'),
(7, 0x3F80, 'background_colour_index'),
))
elif bv >= 50:
unpack_fmt = '<HHHBBIi'
(xf.font_index, xf.format_key, pkd_type_par,
pkd_align1, pkd_orient_used,
pkd_brdbkg1, pkd_brdbkg2,
) = unpack(unpack_fmt, data[0:16])
upkbits(xf.protection, pkd_type_par, (
(0, 0x01, 'cell_locked'),
(1, 0x02, 'formula_hidden'),
))
upkbits(xf, pkd_type_par, (
(2, 0x0004, 'is_style'),
(3, 0x0008, 'lotus_123_prefix'), # Meaning is not known.
(4, 0xFFF0, 'parent_style_index'),
))
upkbits(xf.alignment, pkd_align1, (
(0, 0x07, 'hor_align'),
(3, 0x08, 'text_wrapped'),
(4, 0x70, 'vert_align'),
))
orientation = pkd_orient_used & 0x03
xf.alignment.rotation = [0, 255, 90, 180][orientation]
reg = pkd_orient_used >> 2
for attr_stem in \
"format font alignment border background protection".split():
attr = "_" + attr_stem + "_flag"
setattr(xf, attr, reg & 1)
reg >>= 1
upkbitsL(xf.background, pkd_brdbkg1, (
( 0, 0x0000007F, 'pattern_colour_index'),
( 7, 0x00003F80, 'background_colour_index'),
(16, 0x003F0000, 'fill_pattern'),
))
upkbitsL(xf.border, pkd_brdbkg1, (
(22, 0x01C00000, 'bottom_line_style'),
(25, 0xFE000000, 'bottom_colour_index'),
))
upkbits(xf.border, pkd_brdbkg2, (
( 0, 0x00000007, 'top_line_style'),
( 3, 0x00000038, 'left_line_style'),
( 6, 0x000001C0, 'right_line_style'),
( 9, 0x0000FE00, 'top_colour_index'),
(16, 0x007F0000, 'left_colour_index'),
(23, 0x3F800000, 'right_colour_index'),
))
elif bv >= 40:
unpack_fmt = '<BBHBBHI'
(xf.font_index, xf.format_key, pkd_type_par,
pkd_align_orient, pkd_used,
pkd_bkg_34, pkd_brd_34,
) = unpack(unpack_fmt, data[0:12])
upkbits(xf.protection, pkd_type_par, (
(0, 0x01, 'cell_locked'),
(1, 0x02, 'formula_hidden'),
))
upkbits(xf, pkd_type_par, (
(2, 0x0004, 'is_style'),
(3, 0x0008, 'lotus_123_prefix'), # Meaning is not known.
(4, 0xFFF0, 'parent_style_index'),
))
upkbits(xf.alignment, pkd_align_orient, (
(0, 0x07, 'hor_align'),
(3, 0x08, 'text_wrapped'),
(4, 0x30, 'vert_align'),
))
orientation = (pkd_align_orient & 0xC0) >> 6
xf.alignment.rotation = [0, 255, 90, 180][orientation]
reg = pkd_used >> 2
for attr_stem in \
"format font alignment border background protection".split():
attr = "_" + attr_stem + "_flag"
setattr(xf, attr, reg & 1)
reg >>= 1
upkbits(xf.background, pkd_bkg_34, (
( 0, 0x003F, 'fill_pattern'),
( 6, 0x07C0, 'pattern_colour_index'),
(11, 0xF800, 'background_colour_index'),
))
upkbitsL(xf.border, pkd_brd_34, (
( 0, 0x00000007, 'top_line_style'),
( 3, 0x000000F8, 'top_colour_index'),
( 8, 0x00000700, 'left_line_style'),
(11, 0x0000F800, 'left_colour_index'),
(16, 0x00070000, 'bottom_line_style'),
(19, 0x00F80000, 'bottom_colour_index'),
(24, 0x07000000, 'right_line_style'),
(27, 0xF8000000, 'right_colour_index'),
))
elif bv == 30:
unpack_fmt = '<BBBBHHI'
(xf.font_index, xf.format_key, pkd_type_prot,
pkd_used, pkd_align_par,
pkd_bkg_34, pkd_brd_34,
) = unpack(unpack_fmt, data[0:12])
upkbits(xf.protection, pkd_type_prot, (
(0, 0x01, 'cell_locked'),
(1, 0x02, 'formula_hidden'),
))
upkbits(xf, pkd_type_prot, (
(2, 0x0004, 'is_style'),
(3, 0x0008, 'lotus_123_prefix'), # Meaning is not known.
))
upkbits(xf.alignment, pkd_align_par, (
(0, 0x07, 'hor_align'),
(3, 0x08, 'text_wrapped'),
))
upkbits(xf, pkd_align_par, (
(4, 0xFFF0, 'parent_style_index'),
))
reg = pkd_used >> 2
for attr_stem in \
"format font alignment border background protection".split():
attr = "_" + attr_stem + "_flag"
setattr(xf, attr, reg & 1)
reg >>= 1
upkbits(xf.background, pkd_bkg_34, (
( 0, 0x003F, 'fill_pattern'),
( 6, 0x07C0, 'pattern_colour_index'),
(11, 0xF800, 'background_colour_index'),
))
upkbitsL(xf.border, pkd_brd_34, (
( 0, 0x00000007, 'top_line_style'),
( 3, 0x000000F8, 'top_colour_index'),
( 8, 0x00000700, 'left_line_style'),
(11, 0x0000F800, 'left_colour_index'),
(16, 0x00070000, 'bottom_line_style'),
(19, 0x00F80000, 'bottom_colour_index'),
(24, 0x07000000, 'right_line_style'),
(27, 0xF8000000, 'right_colour_index'),
))
xf.alignment.vert_align = 2 # bottom
xf.alignment.rotation = 0
elif bv == 21:
#### Warning: incomplete treatment; formatting_info not fully supported.
#### Probably need to offset incoming BIFF2 XF[n] to BIFF8-like XF[n+16],
#### and create XF[0:16] like the standard ones in BIFF8
#### *AND* add 16 to all XF references in cell records :-(
(xf.font_index, format_etc, halign_etc) = unpack('<BxBB', data)
xf.format_key = format_etc & 0x3F
upkbits(xf.protection, format_etc, (
(6, 0x40, 'cell_locked'),
(7, 0x80, 'formula_hidden'),
))
upkbits(xf.alignment, halign_etc, (
(0, 0x07, 'hor_align'),
))
for mask, side in ((0x08, 'left'), (0x10, 'right'), (0x20, 'top'), (0x40, 'bottom')):
if halign_etc & mask:
colour_index, line_style = 8, 1 # black, thin
else:
colour_index, line_style = 0, 0 # none, none
setattr(xf.border, side + '_colour_index', colour_index)
setattr(xf.border, side + '_line_style', line_style)
bg = xf.background
if halign_etc & 0x80:
bg.fill_pattern = 17
else:
bg.fill_pattern = 0
bg.background_colour_index = 9 # white
bg.pattern_colour_index = 8 # black
xf.parent_style_index = 0 # ???????????
xf.alignment.vert_align = 2 # bottom
xf.alignment.rotation = 0
for attr_stem in \
"format font alignment border background protection".split():
attr = "_" + attr_stem + "_flag"
setattr(xf, attr, 1)
else:
raise XLRDError('programmer stuff-up: bv=%d' % bv)
xf.xf_index = len(self.xf_list)
self.xf_list.append(xf)
self.xfcount += 1
if blah:
xf.dump(
self.logfile,
header="--- handle_xf: xf[%d] ---" % xf.xf_index,
footer=" ",
)
# Now for some assertions ...
if self.formatting_info:
if self.verbosity and xf.is_style and xf.parent_style_index != 0x0FFF:
msg = "WARNING *** XF[%d] is a style XF but parent_style_index is 0x%04x, not 0x0fff\n"
fprintf(self.logfile, msg, xf.xf_index, xf.parent_style_index)
check_colour_indexes_in_obj(self, xf, xf.xf_index)
if xf.format_key not in self.format_map:
msg = "WARNING *** XF[%d] unknown (raw) format key (%d, 0x%04x)\n"
if self.verbosity:
fprintf(self.logfile, msg,
xf.xf_index, xf.format_key, xf.format_key)
xf.format_key = 0
def xf_epilogue(self):
# self is a Book instance.
self._xf_epilogue_done = 1
num_xfs = len(self.xf_list)
blah = DEBUG or self.verbosity >= 3
blah1 = DEBUG or self.verbosity >= 1
if blah:
fprintf(self.logfile, "xf_epilogue called ...\n")
def check_same(book_arg, xf_arg, parent_arg, attr):
# the _arg caper is to avoid a Warning msg from Python 2.1 :-(
if getattr(xf_arg, attr) != getattr(parent_arg, attr):
fprintf(book_arg.logfile,
"NOTE !!! XF[%d] parent[%d] %s different\n",
xf_arg.xf_index, parent_arg.xf_index, attr)
for xfx in xrange(num_xfs):
xf = self.xf_list[xfx]
if xf.format_key not in self.format_map:
msg = "ERROR *** XF[%d] unknown format key (%d, 0x%04x)\n"
fprintf(self.logfile, msg,
xf.xf_index, xf.format_key, xf.format_key)
xf.format_key = 0
cellty_from_fmtty = {
FNU: XL_CELL_NUMBER,
FUN: XL_CELL_NUMBER,
FGE: XL_CELL_NUMBER,
FDT: XL_CELL_DATE,
FTX: XL_CELL_NUMBER, # Yes, a number can be formatted as text.
}
fmt = self.format_map[xf.format_key]
cellty = cellty_from_fmtty[fmt.type]
self._xf_index_to_xl_type_map[xf.xf_index] = cellty
# Now for some assertions etc
if not self.formatting_info:
continue
if xf.is_style:
continue
if not(0 <= xf.parent_style_index < num_xfs):
if blah1:
fprintf(self.logfile,
"WARNING *** XF[%d]: is_style=%d but parent_style_index=%d\n",
xf.xf_index, xf.is_style, xf.parent_style_index)
# make it conform
xf.parent_style_index = 0
if self.biff_version >= 30:
if blah1:
if xf.parent_style_index == xf.xf_index:
fprintf(self.logfile,
"NOTE !!! XF[%d]: parent_style_index is also %d\n",
xf.xf_index, xf.parent_style_index)
elif not self.xf_list[xf.parent_style_index].is_style:
fprintf(self.logfile,
"NOTE !!! XF[%d]: parent_style_index is %d; style flag not set\n",
xf.xf_index, xf.parent_style_index)
if blah1 and xf.parent_style_index > xf.xf_index:
fprintf(self.logfile,
"NOTE !!! XF[%d]: parent_style_index is %d; out of order?\n",
xf.xf_index, xf.parent_style_index)
parent = self.xf_list[xf.parent_style_index]
if not xf._alignment_flag and not parent._alignment_flag:
if blah1: check_same(self, xf, parent, 'alignment')
if not xf._background_flag and not parent._background_flag:
if blah1: check_same(self, xf, parent, 'background')
if not xf._border_flag and not parent._border_flag:
if blah1: check_same(self, xf, parent, 'border')
if not xf._protection_flag and not parent._protection_flag:
if blah1: check_same(self, xf, parent, 'protection')
if not xf._format_flag and not parent._format_flag:
if blah1 and xf.format_key != parent.format_key:
fprintf(self.logfile,
"NOTE !!! XF[%d] fmtk=%d, parent[%d] fmtk=%r\n%r / %r\n",
xf.xf_index, xf.format_key, parent.xf_index, parent.format_key,
self.format_map[xf.format_key].format_str,
self.format_map[parent.format_key].format_str)
if not xf._font_flag and not parent._font_flag:
if blah1 and xf.font_index != parent.font_index:
fprintf(self.logfile,
"NOTE !!! XF[%d] fontx=%d, parent[%d] fontx=%r\n",
xf.xf_index, xf.font_index, parent.xf_index, parent.font_index)
def initialise_book(book):
initialise_colour_map(book)
book._xf_epilogue_done = 0
methods = (
handle_font,
handle_efont,
handle_format,
is_date_format_string,
handle_palette,
palette_epilogue,
handle_style,
handle_xf,
xf_epilogue,
)
for method in methods:
setattr(book.__class__, method.__name__, method)
class XFBorder(BaseObject, EqNeAttrs):
##
# The colour index for the cell's top line
top_colour_index = 0
##
# The colour index for the cell's bottom line
bottom_colour_index = 0
##
# The colour index for the cell's left line
left_colour_index = 0
##
# The colour index for the cell's right line
right_colour_index = 0
##
# The colour index for the cell's diagonal lines, if any
diag_colour_index = 0
##
# The line style for the cell's top line
top_line_style = 0
##
# The line style for the cell's bottom line
bottom_line_style = 0
##
# The line style for the cell's left line
left_line_style = 0
##
# The line style for the cell's right line
right_line_style = 0
##
# The line style for the cell's diagonal lines, if any
diag_line_style = 0
##
# 1 = draw a diagonal from top left to bottom right
diag_down = 0
##
# 1 = draw a diagonal from bottom left to top right
diag_up = 0
class XFBackground(BaseObject, EqNeAttrs):
##
# See section 3.11 of the OOo docs.
fill_pattern = 0
##
# See section 3.11 of the OOo docs.
background_colour_index = 0
##
# See section 3.11 of the OOo docs.
pattern_colour_index = 0
class XFAlignment(BaseObject, EqNeAttrs):
##
# Values: section 6.115 (p 214) of OOo docs
hor_align = 0
##
# Values: section 6.115 (p 215) of OOo docs
vert_align = 0
##
# Values: section 6.115 (p 215) of OOo docs.<br />
# Note: file versions BIFF7 and earlier use the documented
# "orientation" attribute; this will be mapped (without loss)
# into "rotation".
rotation = 0
##
# 1 = text is wrapped at right margin
text_wrapped = 0
##
# A number in range(15).
indent_level = 0
##
# 1 = shrink font size to fit text into cell.
shrink_to_fit = 0
##
# 0 = according to context; 1 = left-to-right; 2 = right-to-left
text_direction = 0
class XFProtection(BaseObject, EqNeAttrs):
##
# 1 = Cell is prevented from being changed, moved, resized, or deleted
# (only if the sheet is protected).
cell_locked = 0
##
# 1 = Hide formula so that it doesn't appear in the formula bar when
# the cell is selected (only if the sheet is protected).
formula_hidden = 0
class XF(BaseObject):
##
# 0 = cell XF, 1 = style XF
is_style = 0
##
# cell XF: Index into Book.xf_list
# of this XF's style XF<br />
# style XF: 0xFFF
parent_style_index = 0
##
#
_format_flag = 0
##
#
_font_flag = 0
##
#
_alignment_flag = 0
##
#
_border_flag = 0
##
#
_background_flag = 0
##
#
_protection_flag = 0
##
# Index into Book.xf_list
xf_index = 0
##
# Index into Book.font_list
font_index = 0
##
# Key into Book.format_map
# <p>
# Warning: OOo docs on the XF record call this "Index to FORMAT record".
# It is not an index in the Python sense. It is a key to a map.
# It is true <i>only</i> for Excel 4.0 and earlier files
# that the key into format_map from an XF instance
# is the same as the index into format_list, and <i>only</i>
# if the index is less than 164.
# </p>
format_key = 0
##
# An instance of an XFProtection object.
protection = None
##
# An instance of an XFBackground object.
background = None
##
# An instance of an XFAlignment object.
alignment = None
##
# An instance of an XFBorder object.
border = None
|
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login
from django.http import HttpResponseRedirect, Http404, HttpResponse, JsonResponse
from django.core import serializers
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status, generics, viewsets
from rest_framework import authentication, permissions, viewsets, filters
from .forms import *
from .models import *
from .serializers import *
import requests, json
from django_filters import rest_framework as filters
import django_filters
from django import forms
from django.views.generic.edit import FormView
from django.views import View, generic
from bs4 import BeautifulSoup
class carFilter(filters.FilterSet):
car_year = filters.CharFilter(name="car_year")
car_make = filters.CharFilter(name="car_make",lookup_expr='icontains')
car_model = filters.CharFilter(name="car_model",lookup_expr='icontains')
fuel = filters.CharFilter(name="fuel")
car_cylinder = filters.CharFilter(name="car_cylinder")
class Meta:
model = Car
fields = ['car_year', 'car_make', 'car_model']
class carsList(generics.ListAPIView):
queryset = Car.objects.all()
serializer_class = carSerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_class = carFilter
def get(self, request,format=None):
cars = Car.objects.all()
car_filter=carFilter(request.GET,queryset=cars)
serializer = carSerializer(car_filter, many=True)
return Response(serializer.data)
class gasList(generics.ListAPIView):
def get(self, request,format=None):
r = requests.get("https://www.gasbuddy.com/USA")
soup = BeautifulSoup(r.content, "html.parser")
links = soup.find_all("div", "col-sm-6 col-xs-6 siteName")
tests = soup.find_all("div", "col-sm-2 col-xs-3 text-right")
newDict = {}
for i in range(len(links)):
link = links[i]
test = tests[i]
newDict[str(link.contents[0].strip())] = str(test.contents[0].strip())
return JsonResponse(newDict)
# def get_queryset(self):
# project = Car.objects.all()
# print(self)
# if project is None:
# return self.queryset.none()
# return self.queryset \
# .filter(car_make="Mercury",car_model="Capri") \
# .filter(author=self.request.user)
# def post(self, request, format=None):
# serializer = carSerializer(data=request.data)
# if serializer.is_valid():
# serializer.save()
# return Response(serializer.data, status=status.HTTP_201_CREATED)
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
""" Now the user can read in a file I want to find what make a model which uses the price, class and gender
Author : AstroDave
Date : 18th September, 2012
"""
import csv as csv
import numpy as np
csv_file_object = csv.reader(open('train.csv', 'rb')) #Load in the csv file
header = csv_file_object.next() #Skip the fist line as it is a header
data=[] #Creat a variable called 'data'
for row in csv_file_object: #Skip through each row in the csv file
data.append(row[1:]) #adding each row to the data variable
data = np.array(data) #Then convert from a list to an array
fare_ceiling = 40
data[data[0::,8].astype(np.float) >= fare_ceiling, 8] = fare_ceiling-1.0
fare_bracket_size = 10
number_of_price_brackets = fare_ceiling / fare_bracket_size
number_of_classes = 3 #There were 1st, 2nd and 3rd classes on board
survival_table = np.zeros([2,number_of_classes,number_of_price_brackets],float)
for i in xrange(number_of_classes):
for j in xrange(number_of_price_brackets):
women_only_stats = data[ (data[0::,3] == "female") \
& (data[0::,1].astype(np.float) == i+1) \
& (data[0:,8].astype(np.float) >= j*fare_bracket_size) \
& (data[0:,8].astype(np.float) < (j+1)*fare_bracket_size), 0]
men_only_stats = data[ (data[0::,3] != "female") \
& (data[0::,1].astype(np.float) == i+1) \
& (data[0:,8].astype(np.float) >= j*fare_bracket_size) \
& (data[0:,8].astype(np.float) < (j+1)*fare_bracket_size), 0]
#if i == 0 and j == 3:
survival_table[0,i,j] = np.mean(women_only_stats.astype(np.float)) #Women stats
survival_table[1,i,j] = np.mean(men_only_stats.astype(np.float)) #Men stats
survival_table[ survival_table != survival_table ] = 0.
survival_table[ survival_table < 0.5 ] = 0
survival_table[ survival_table >= 0.5 ] = 1
test_file_obect = csv.reader(open('test.csv', 'rb'))
open_file_object = csv.writer(open("genderclassmodel.csv", "wb"))
open_file_object.writerow(["PassengerId", "Survived"])
header = test_file_obect.next()
for rown in test_file_obect:
row = rown[1:]
for j in xrange(number_of_price_brackets):
#If there is no fare then place the price of the ticket
#according to class
try:
row[7] = float(row[7]) #No fare recorded will come up as a string so
#try to make it a float
except: #If fails then just bin the fare according to the class
bin_fare = 3-float(row[0])
break #Break from the loop and move to the next row
if row[7] > fare_ceiling: #Otherwise now test to see if it is higher
#than the fare ceiling we set earlier
bin_fare = number_of_price_brackets-1
break #And then break to the next row
if row[7] >= j*fare_bracket_size\
and row[7] < (j+1)*fare_bracket_size:#If passed these tests then loop through
#each bin until you find the right one
#append it to the binned_price
#and move to the next loop
bin_fare = j
break
#Now I have the bin fare, the class and whether female or male we can
#just cross ref their details with our 'survivial table
if row[2] == 'female':
open_file_object.writerow([rown[0], "%d" % int(survival_table[0,float(row[0])-1,bin_fare])])
else:
open_file_object.writerow([rown[0], "%d" % int(survival_table[1,float(row[0])-1,bin_fare])])
|
"""Code for sentences tokenization: Old Norse.
Sentence tokenization for Old Norse is available using a regular-expression based tokenizer.
>>> from cltk.sentence.non import OldNorseRegexSentenceTokenizer
>>> from cltk.languages.example_texts import get_example_text
>>> splitter = OldNorseRegexSentenceTokenizer()
>>> sentences = splitter.tokenize(get_example_text("non"))
>>> sentences[:2]
['Gylfi konungr réð þar löndum er nú heitir Svíþjóð.', 'Frá honum er þat sagt at hann gaf einni farandi konu at launum skemmtunar sinnar eitt plógsland í ríki sínu þat er fjórir öxn drægi upp dag ok nótt.']
>>> len(sentences)
7
"""
__author__ = ["Clément Besnier <clem@clementbesnier.fr>"]
from cltk.sentence.sentence import RegexSentenceTokenizer
sent_end_chars = [".", "!", "?"]
class OldNorseRegexSentenceTokenizer(RegexSentenceTokenizer):
"""``RegexSentenceTokenizer`` for Old Norse."""
def __init__(self: object):
super().__init__(language="non", sent_end_chars=sent_end_chars)
|
import sublime
import sublime_plugin
import os
from .gotools_util import Buffers
from .gotools_util import GoBuffers
from .gotools_util import Logger
from .gotools_util import ToolRunner
from .gotools_settings import GoToolsSettings
class GotoolsOracleCommand(sublime_plugin.TextCommand):
def is_enabled(self):
return GoBuffers.is_go_source(self.view)
def run(self, edit, command=None):
if not command:
Logger.log("command is required")
return
filename, row, col, offset, offset_end = Buffers.location_at_cursor(self.view)
pos = filename+":#"+str(offset)
# Build up a package scope contaning all packages the user might have
# configured.
# TODO: put into a utility
package_scope = []
for p in GoToolsSettings.get().build_packages:
package_scope.append(os.path.join(GoToolsSettings.get().project_package, p))
for p in GoToolsSettings.get().test_packages:
package_scope.append(os.path.join(GoToolsSettings.get().project_package, p))
for p in GoToolsSettings.get().tagged_test_packages:
package_scope.append(os.path.join(GoToolsSettings.get().project_package, p))
sublime.active_window().run_command("hide_panel", {"panel": "output.gotools_oracle"})
if command == "callees":
sublime.set_timeout_async(lambda: self.do_plain_oracle("callees", pos, package_scope), 0)
if command == "callers":
sublime.set_timeout_async(lambda: self.do_plain_oracle("callers", pos, package_scope), 0)
if command == "callstack":
sublime.set_timeout_async(lambda: self.do_plain_oracle("callstack", pos, package_scope), 0)
if command == "describe":
sublime.set_timeout_async(lambda: self.do_plain_oracle("describe", pos, package_scope), 0)
if command == "freevars":
pos = filename+":#"+str(offset)+","+"#"+str(offset_end)
sublime.set_timeout_async(lambda: self.do_plain_oracle("freevars", pos, package_scope), 0)
if command == "implements":
sublime.set_timeout_async(lambda: self.do_plain_oracle("implements", pos, package_scope), 0)
if command == "peers":
sublime.set_timeout_async(lambda: self.do_plain_oracle("peers", pos, package_scope), 0)
if command == "referrers":
sublime.set_timeout_async(lambda: self.do_plain_oracle("referrers", pos, package_scope), 0)
def do_plain_oracle(self, mode, pos, package_scope=[], regex="^(.*):(\d+):(\d+):(.*)$"):
Logger.status("running oracle "+mode+"...")
args = ["-pos="+pos, "-format=plain", mode]
if len(package_scope) > 0:
args = args + package_scope
output, err, rc = ToolRunner.run("oracle", args, timeout=60)
Logger.log("oracle "+mode+" output: " + output.rstrip())
if rc != 0:
Logger.status("oracle call failed (" + str(rc) +")")
return
Logger.status("oracle "+mode+" finished")
panel = self.view.window().create_output_panel('gotools_oracle')
panel.set_scratch(True)
panel.settings().set("result_file_regex", regex)
panel.run_command("select_all")
panel.run_command("right_delete")
panel.run_command('append', {'characters': output})
self.view.window().run_command("show_panel", {"panel": "output.gotools_oracle"})
|
class Solution(object):
def findItinerary(self, tickets):
"""
:type tickets: List[List[str]]
:rtype: List[str]
"""
graph = collections.defaultdict(list)
for p, q in sorted(tickets)[::-1]:
graph[p].append(q)
route = []
def dfs(airport):
while graph[airport]:
dfs(graph[airport].pop())
route.append(airport)
dfs('JFK')
return route[::-1]
|
""""
This file is part of python-tdbus. Python-tdbus is free software
available under the terms of the MIT license. See the file "LICENSE" that
was provided together with this source file for the licensing terms.
Copyright (c) 2012 the python-tdbus authors. See the file "AUTHORS" for a
complete list.
"""
import fnmatch
import logging
from tdbus import _tdbus, DBusError
def method(path=None, member=None, interface=None):
def _decorate(func):
func.method = True
func.member = member or func.__name__
func.path = path
func.interface = interface
return func
return _decorate
def signal_handler(path=None, member=None, interface=None):
def _decorate(func):
func.signal_handler = True
func.member = member or func.__name__
func.path = path
func.interface = interface
return func
return _decorate
def dbus_object(cls):
_init_handlers(cls)
return cls
def _connection(self):
return self.local.connection
def _message(self):
return self.local.message
def _set_response(self, format, args):
"""Used by method call handlers to set the response arguments."""
self.logger.debug("Returning: (%s, %s)", format, args)
self.local.response = (format, args)
def _init_handlers(cls):
methods = {}
signal_handlers = {}
for name in dir(cls):
handler = getattr(cls, name)
if getattr(handler, 'method', False):
methods[handler.member] = handler
elif getattr(handler, 'signal_handler', False):
signal_handlers[handler.member] = handler
def dispatch(self, connection, message, ignore_path=False):
"""Dispatch a message. Returns True if the message was dispatched."""
if not hasattr(self, 'local'):
self.local = connection.Local()
self.local.connection = connection
self.local.message = message
self.local.response = (None, None)
mtype = message.get_type()
member = message.get_member()
if mtype == _tdbus.DBUS_MESSAGE_TYPE_METHOD_CALL:
if member not in methods:
return False
handler = methods[member]
if handler.interface and handler.interface != message.get_interface():
return False
if (not ignore_path and
handler.path and not fnmatch.fnmatch(message.get_path(), handler.path)):
return False
try:
self.logger.info("calling method for '%s'", member)
handler(self, message)
except Exception as e:
self.logger.error('Uncaught exception in method call: %s', e)
self.logger.exception(e)
connection.send_error(message, 'net.tdbus.UncaughtException.' + e.__class__.__name__,
format="s", args=[str(e)])
else:
fmt, args = self.local.response
connection.send_method_return(message, fmt, args)
elif mtype == _tdbus.DBUS_MESSAGE_TYPE_SIGNAL:
if member not in signal_handlers:
return False
handler = signal_handlers[member]
if handler.interface and handler.interface != message.get_interface():
return False
if handler.path and not fnmatch.fnmatch(message.get_path(), handler.path):
return False
try:
self.logger.info("calling signal handler for '%s'", member)
handler(self, message)
except Exception as e:
self.logger.error('Uncaught exception in signal handler:')
self.logger.exception(e)
else:
return False
cls.signal_handlers = signal_handlers
cls.set_response = _set_response
cls.dispatch = dispatch
cls.connection = property(_connection)
cls.message = property(_message)
cls.logger = logging.getLogger('tdbus')
class DBusHandler(object):
"""Handler for method calls and signals."""
def __init__(self):
_init_handlers(type(self))
|
def coordinates( fn, latlong=True ):
'''
take a raster file as input and return the centroid coords for each
of the grid cells as a pair of numpy 2d arrays (longitude, latitude)
'''
import rasterio
import numpy as np
from affine import Affine
from pyproj import Proj, transform
# Read raster
with rasterio.open(fn) as r:
T0 = r.affine # upper-left pixel corner affine transform
p1 = Proj(r.crs)
A = r.read( 1 ) # pixel values
# All rows and columns
cols, rows = np.meshgrid( np.arange( A.shape[1] ), np.arange( A.shape[0] ) )
# Get affine transform for pixel centres
T1 = T0 * Affine.translation( 0.5, 0.5 )
# Function to convert pixel row/column index (from 0) to easting/northing at centre
rc2en = lambda r, c: (c, r) * T1
# All eastings and northings (there is probably a faster way to do this)
eastings, northings = np.vectorize(rc2en, otypes=[np.float, np.float])(rows, cols)
if latlong != True:
return eastings, northings
elif latlong -- True:
# Project all longitudes, latitudes to latlong
longs, lats = transform(p1, p1.to_latlong(), eastings, northings)
return longs, lats
def calcRa( lats, jd ):
'''
################################################################################
This function calculates extraterrestrial solar radiation (radiation at the top
of the earth's atmosphere, aka insolation) from day of the year and latitude.
It is calculated separately for each day of the year.
NB: Latitude should be in radians (not degrees).
NB: Non-real values are produced for w (sunset hour angle) at times of the year
and at latitudes where the sun never rises or sets. However, it makes sense to
use just the real portion of the value, as it sets w to 0 in the winter and pi
(~3.14) in the summer.
Source: Allen et al. (1998)
>>> Translated to Python from Stephanie McAfee's R script by Michael Lindgren <<<
#################################################################################
Arguments:
lats = {numpy.ndarray} latitudes of raster pixel centroids
jd = {int} julian day (1 - 365)
'''
# convert lats to radians:
lats = (lats*np.pi)/180
# Calculate the earth-sun distance, which is a function solely of Julian day. It is a single value for each day
D = 1 + ( 0.033 * np.cos( 2 * np.pi * jd / 365 ) )
# Calculate declination, a function of Julian day. It is a single value for each day.
DC = 0.409 * np.sin( ( ( 2 * np.pi / 365) * jd ) - 1.39 )
# Calculate the sunset hour angle, a function of latitude and declination. Note that at v. high latitudes, this function can produce non-real values.
w = np.arccos( ( -1 * np.tan( DC ) * np.tan( lats ) ).astype( 'complex64' ) ).real
# Calculate Ra using the above variables
# S is the solar constant and is =0.082 MJ/m2min
ra = (24 * 60 / np.pi) * D * 0.082 *( w * np.sin(lats) * np.sin(DC) + np.cos(lats) * np.cos(DC) * np.sin(w) )
return ra
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
if __name__ == '__main__':
from collections import OrderedDict
import rasterio, os
import numpy as np
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point
output_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/girr_radiation'
template_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
# some setup of the grouping of the Julian Days to Months
month_days_dict = OrderedDict( [('01',31),('02',28),('03',31),('04',30),('05',31),('06',30),('07',31),('08',31),('09',30),('10',31),('11',30),('12',31)] )
julian_month_splitter = np.hstack(np.array([ np.repeat( count+1, i ).tolist() for count, i in enumerate( month_days_dict.values() ) ]))
julian_days = pd.Series( range( 1, 366 ) )
julian_months_grouped = julian_days.groupby( julian_month_splitter )
template_rst = rasterio.open( template_fn )
meta = template_rst.meta
meta.update( compress='lzw' )
lons, lats = coordinates( template_fn, False ) # test change
lats = np.ma.masked_where( template_rst.read( 1 ) < -3.39999995e+34, lats )
for month, days in julian_months_grouped.indices.iteritems():
month = str( month )
if len( month ) == 1:
month = '0' + month
month_mean = np.dstack([ calcRa( lats, day ) for day in days+1 ]).mean( axis=2 )
# [TEST]: it may be necessary to take this array and its coords in latlong and reproject it to 3338
lons = np.ma.masked_where( template_rst.read( 1 ) < -3.39999995e+34, lats )
pts = [ Point( lalo ) for lalo in zip(lons.ravel().tolist(), lats.ravel().tolist()) ]
mm = month_mean.ravel().tolist()
df = pd.DataFrame( {'Ra':mm, 'geometry':pts} ) # remove masking
gdf = gpd.GeoDataFrame( df )
break
# then rasterize this to the extent of the template_rst
# [END TEST]
output_filename = os.path.join( output_path, 'ra_mean_allen1998_'+month+'netest_akcan.tif' )
with rasterio.open( output_filename, 'w', **meta ) as out:
out.write( month_mean.astype( template_rst.dtypes[ 0 ] ), 1 )
|
import subprocess
import os
import sys
import glob
def merge(folder):
flv_list = sorted(glob.glob('{folder}/*.flv'.format(folder = folder)))
fid = open('%s/filelist.txt' % (folder), 'w')
for idx in flv_list:
fid.write("file '%s/%d.flv'\n" % (folder, idx))
fid.close()
cmd = ['ffmpeg', '-f', 'concat', '-i', folder+'/'+'filelist.txt','-codec', 'copy', folder+'/'+'output.mp4']
subprocess.Popen(cmd).wait()
if __name__ == "__main__":
merge(sys.argv[1]);
|
"""
Development Settings Module
"""
from .dev import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite3',
}
}
|
""" Set of Tokens to be used when parsing.
@label is a list describing the depth of a paragraph/context. It follows:
[ Part, Subpart/Appendix/Interpretations, Section, p-level-1, p-level-2,
p-level-3, p-level4, p-level5 ]
"""
import attr
import six
def uncertain_label(label_parts):
"""Convert a list of strings/Nones to a '-'-separated string with question
markers to replace the Nones. We use this format to indicate
uncertainty"""
return '-'.join(p or '?' for p in label_parts)
def _none_str(value):
"""Shorthand for displaying a variable as a string or the text None"""
if value is None:
return 'None'
else:
return "'{0}'".format(value)
@attr.attrs(frozen=True)
class Token(object):
"""Base class for all tokens. Provides methods for pattern matching and
copying this token"""
def match(self, *types, **fields):
"""Pattern match. self must be one of the types provided (if they
were provided) and all of the fields must match (if fields were
provided). If a successful match, returns self"""
type_match = not types or any(isinstance(self, typ) for typ in types)
has_fields = not fields or all(hasattr(self, f) for f in fields)
fields_match = not has_fields or all(
getattr(self, f) == v for f, v in fields.items())
return type_match and has_fields and fields_match and self
@attr.attrs(slots=True, frozen=True)
class Verb(Token):
"""Represents what action is taking place to the paragraphs"""
verb = attr.attrib()
active = attr.attrib()
and_prefix = attr.attrib(default=False)
PUT = 'PUT'
POST = 'POST'
MOVE = 'MOVE'
DELETE = 'DELETE'
DESIGNATE = 'DESIGNATE'
RESERVE = 'RESERVE'
KEEP = 'KEEP'
INSERT = 'INSERT'
@attr.attrs(slots=True, frozen=True)
class Context(Token):
"""Represents a bit of context for the paragraphs. This gets compressed
with the paragraph tokens to define the full scope of a paragraph. To
complicate matters, sometimes what looks like a Context is actually the
entity which is being modified (i.e. a paragraph). If we are certain
that this is only context, (e.g. "In Subpart A"), use 'certain'"""
# replace with Nones
label = attr.attrib(convert=lambda label: [p or None for p in label])
certain = attr.attrib(default=False)
@attr.attrs(slots=True, frozen=True)
class Paragraph(Token):
"""Represents an entity which is being modified by the amendment. Label
is a way to locate this paragraph (though see the above note). We might
be modifying a field of a paragraph (e.g. intro text only, or title
only;) if so, set the `field` parameter."""
label = attr.attrib(default=attr.Factory(list))
field = attr.attrib(default=None)
TEXT_FIELD = 'text'
HEADING_FIELD = 'title'
KEYTERM_FIELD = 'heading'
@classmethod
def make(cls, label=None, field=None, part=None, sub=None, section=None,
paragraphs=None, paragraph=None, subpart=None, is_interp=None,
appendix=None):
"""label and field are the only "materialized" fields. Everything
other field becomes part of the label, offering a more legible API.
Particularly useful for writing tests"""
if sub is None and subpart:
if isinstance(subpart, six.string_types):
sub = 'Subpart:{0}'.format(subpart)
else:
sub = 'Subpart'
if sub is None and is_interp:
sub = 'Interpretations'
if sub is None and appendix:
sub = 'Appendix:' + appendix
if paragraph:
paragraphs = [paragraph]
if label is None:
label = [part, sub, section] + (paragraphs or [])
# replace with Nones
label = [p or None for p in label]
# Trim the right side of the list
while label and not label[-1]:
label.pop()
return cls(label, field)
def label_text(self):
"""Converts self.label into a string"""
label = uncertain_label(self.label)
if self.field:
label += '[{0}]'.format(self.field)
return label
@attr.attrs(slots=True, frozen=True)
class TokenList(Token):
"""Represents a sequence of other tokens, e.g. comma separated of
created via "through" """
tokens = attr.attrib()
def __iter__(self):
return iter(self.tokens)
@attr.attrs(slots=True, frozen=True)
class AndToken(Token):
"""The word 'and' can help us determine if a Context token should be a
Paragraph token. Note that 'and' might also trigger the creation of a
TokenList, which takes precedent"""
|
"""
***************************************************************************
ParameterTableField.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
__revision__ = '$Format:%H$'
from sextante.parameters.Parameter import Parameter
class ParameterTableField(Parameter):
DATA_TYPE_NUMBER = 0
DATA_TYPE_STRING = 1
DATA_TYPE_ANY = -1
def __init__(self, name="", description="", parent=None, datatype=-1, optional=False):
Parameter.__init__(self, name, description)
self.parent = parent
self.value = None
self.datatype = datatype
self.optional= optional
def getValueAsCommandLineParameter(self):
return "\"" + str(self.value) + "\""
def getAsScriptCode(self):
return "##" + self.name + "=field " + str(self.parent)
def setValue(self, field):
if field is None:
return self.optional
elif len(field) > 0:
self.value = str(field)
else:
return self.optional
return True
def serialize(self):
return self.__module__.split(".")[-1] + "|" + self.name + "|" + self.description +\
"|" + str(self.parent) + "|" + str(self.datatype) + "|" + str(self.optional)
def deserialize(self, s):
tokens = s.split("|")
return ParameterTableField(tokens[1], tokens[2], tokens[3], int(tokens[4]), tokens[5] == str(True))
def __str__(self):
return self.name + " <" + self.__module__.split(".")[-1] +" from " + self.parent + ">"
|
from dataclasses import dataclass
from typing import (
Mapping,
Optional,
Sequence,
)
from pcs.common.interface.dto import DataTransferObject
from pcs.common.types import (
CibRuleExpressionType,
CibRuleInEffectStatus,
)
@dataclass(frozen=True)
class CibRuleDateCommonDto(DataTransferObject):
id: str # pylint: disable=invalid-name
options: Mapping[str, str]
@dataclass(frozen=True)
class CibRuleExpressionDto(DataTransferObject):
# pylint: disable=too-many-instance-attributes
id: str # pylint: disable=invalid-name
type: CibRuleExpressionType
in_effect: CibRuleInEffectStatus # only valid for type==rule
options: Mapping[str, str]
date_spec: Optional[CibRuleDateCommonDto]
duration: Optional[CibRuleDateCommonDto]
expressions: Sequence["CibRuleExpressionDto"]
as_string: str
|
from xva import Xva
from threading import Thread
import gtk
class ProgressBarOXC:
widget = None
widget2 = None
def __init__(self, widget, widget2):
self.widget = widget
self.widget2 = widget2
self.widget.show()
def update_amount(self, new_amount = None):
value = "%.2f" % new_amount
if float(value) > 1:
value=1
self.widget.set_fraction(float(value))
def update_text(self, text= None):
self.widget.set_text(text)
def finish(self):
image = gtk.Image()
image.set_from_stock(gtk.STOCK_CLOSE, gtk.ICON_SIZE_MENU)
self.widget2.set_image(image)
self.widget2.set_label("Close")
class oxcWindowTools:
"""
Class to manage OXC Tools
"""
def on_cancelmigratetool_clicked(self, widget, data=None):
"""
Cancel button pressed on migrate tool
"""
self.builder.get_object("migratetool").hide()
def on_acceptmigratetool_clicked(self, widget, data=None):
"""
Accept button pressed on migrate tool
"""
machine = Xva(classProgressBar=ProgressBarOXC(self.builder.get_object("progressmigrate"),
self.builder.get_object("cancelmigratetool")))
if not self.builder.get_object("fileossxenconfig").get_filename() and \
not self.builder.get_object("fileadddisk").get_filename():
return
if self.builder.get_object("fileossxenconfig").get_filename():
print self.builder.get_object("fileossxenconfig").get_filename()
params = {}
execfile(options.config,params)
if "name" in params: machine.set_name( params['name'] )
if "vpus" in params: machine.set_vcpus( params['vcpus'] )
if "kernel" in params:
if params['kernel'].endswith("hvmloader"):
machine.is_hvm()
else:
print "Kernels that are loaded from the Dom0 aren't supported. Use pygrub"
sys.exit(255)
else:
machine.is_pv()
if "disk" in params and len(params['disk']) != 0:
for disk in params['disk']:
(path, device, mode) = disk.split(",")
path_split = path.split(":")
path_split.reverse()
machine.add_disk(path_split[0])
else:
print "You need at least 1 Disk, Exiting"
sys.exit(254)
if "memory" in params:
try:
memory = int(params['memory'] )
machine.set_memory( memory * 1024 * 1024)
except:
print "Could parse memory, setting to 256M"
machine.set_memory(268435456)
if "apic" in params and params['apic'] == 0:
machine.set_apic(False)
if "acpi" in params and params['acpi'] == 0:
machine.set_acpi(False)
if "nx" in params and params['nx'] == 1:
machine.set_nx(options.nx)
if "pae" in params and params['pae'] == 0:
machine.set_pae(False)
else:
# Set VM name
machine.set_name(self.builder.get_object("txtmigratename").get_text())
# Set VM vcpus
machine.set_vcpus(self.builder.get_object("spinmigratevcpus").get_text())
# Set VM ACPI
machine.set_acpi(self.builder.get_object("checkmigrateacpi").get_active())
# Set VM ACIP
machine.set_apic(self.builder.get_object("checkmigrateapic").get_active())
# Set VM Viridian
machine.set_viridian(self.builder.get_object("checkmigrateviridian").get_active())
# Set VM PAE
machine.set_pae(self.builder.get_object("checkmigratepae").get_active())
# Set VM NX
machine.set_nx(self.builder.get_object("checkmigratenx").get_active())
# Set VM Memory
memory = int(self.builder.get_object("spinmigratemem").get_text())*1024*1024
machine.set_memory(memory)
# Add disk
machine.add_disk(self.builder.get_object("fileadddisk").get_filename())
if self.builder.get_object("radiomigratehvm").get_active():
machine.is_hvm()
else:
machine.is_pv()
sparse = self.builder.get_object("checkmigratesparse").get_active()
# Save
import sys
#sys.stdout = labelStream(self.builder.get_object("lblmigrateprogress"))
if self.builder.get_object("checkmigrateoutputxva").get_active():
# If save to xva file..
filename = self.builder.get_object("txtoutputxva").get_text()
Thread(target=machine.save_as, kwargs={"filename":filename, "sparse":sparse}).start()
else:
# Else export to server..
server = self.xc_servers[self.selected_host].host
username = self.xc_servers[self.selected_host].user
password = self.xc_servers[self.selected_host].password
ssl = self.xc_servers[self.selected_host].ssl
Thread(target=machine.save_as, kwargs={"server":server, "username":username,
"password":password, "ssl":ssl, "sparse":sparse}).start()
widget.set_sensitive(False)
#self.builder.get_object("migratetool").hide()
def on_helpmigratetool_clicked(self, widget, data=None):
"""
Help button pressed on migrate tool
"""
self.builder.get_object("migratetoolhelp").show()
def on_closemigratetoolhelp_clicked(self, widget, data=None):
"""
Closebutton pressed on migrate tool help
"""
self.builder.get_object("migratetoolhelp").hide()
def on_btoutputxva_clicked(self, widget, data=None):
"""
Function called when you press "choose xva file"
"""
# Show file chooser
self.builder.get_object("fileoutputxva").show()
def on_acceptfileoutputxva_clicked(self, widget, data=None):
"""
Function called when you accept output xva file chooser
"""
filename = self.builder.get_object("fileoutputxva").get_filename()
self.builder.get_object("txtoutputxva").set_text(filename)
self.builder.get_object("fileoutputxva").hide()
def on_cancelfileoutputxva_clicked(self, widget, data=None):
"""
Function called when you accept output xva file chooser
"""
self.builder.get_object("fileoutputxva").hide()
|
from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
config = context.config
fileConfig(config.config_file_name)
import os
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0, parentdir)
from app import db
target_metadata = db.metadata
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
from Screens.Screen import Screen
from Components.ActionMap import ActionMap
from Components.Sources.List import List
from Components.Sources.StaticText import StaticText
from Components.Pixmap import Pixmap
from enigma import ePicLoad
from Components.config import config, getConfigListEntry, ConfigInteger
from Components.ConfigList import ConfigListScreen
from Components.AVSwitch import AVSwitch
import DVDTitle
class TitleProperties(Screen,ConfigListScreen):
skin = """
<screen name="TitleProperties" position="center,center" size="560,445" title="Properties of current title" >
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget source="serviceinfo" render="Label" position="10,46" size="350,144" font="Regular;18" />
<widget name="thumbnail" position="370,46" size="180,144" alphatest="on" />
<widget name="config" position="10,206" size="540,228" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, parent, project, title_idx):
Screen.__init__(self, session)
self.parent = parent
self.project = project
self.title_idx = title_idx
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self["key_yellow"] = StaticText(_("Edit title"))
self["key_blue"] = StaticText()
self["serviceinfo"] = StaticText()
self["thumbnail"] = Pixmap()
self.picload = ePicLoad()
self.picload.PictureData.get().append(self.paintThumbPixmapCB)
self.properties = project.titles[title_idx].properties
ConfigListScreen.__init__(self, [])
self.properties.crop = DVDTitle.ConfigFixedText("crop")
self.properties.autochapter.addNotifier(self.initConfigList)
self.properties.aspect.addNotifier(self.initConfigList)
for audiotrack in self.properties.audiotracks:
audiotrack.active.addNotifier(self.initConfigList)
self["setupActions"] = ActionMap(["SetupActions", "ColorActions"],
{
"green": self.exit,
"red": self.cancel,
"yellow": self.editTitle,
"cancel": self.cancel,
"ok": self.ok,
}, -2)
self.onShown.append(self.update)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(_("Properties of current title"))
def initConfigList(self, element=None):
try:
self.properties.position = ConfigInteger(default = self.title_idx+1, limits = (1, len(self.project.titles)))
title = self.project.titles[self.title_idx]
self.list = []
self.list.append(getConfigListEntry("DVD " + _("Track"), self.properties.position))
self.list.append(getConfigListEntry("DVD " + _("Title"), self.properties.menutitle))
self.list.append(getConfigListEntry("DVD " + _("Description"), self.properties.menusubtitle))
if config.usage.setup_level.index >= 2: # expert+
for audiotrack in self.properties.audiotracks:
DVB_aud = audiotrack.DVB_lang.getValue() or audiotrack.pid.getValue()
self.list.append(getConfigListEntry(_("Burn audio track (%s)") % DVB_aud, audiotrack.active))
if audiotrack.active.getValue():
self.list.append(getConfigListEntry(_("Audio track (%s) format") % DVB_aud, audiotrack.format))
self.list.append(getConfigListEntry(_("Audio track (%s) language") % DVB_aud, audiotrack.language))
self.list.append(getConfigListEntry("DVD " + _("Aspect ratio"), self.properties.aspect))
if self.properties.aspect.getValue() == "16:9":
self.list.append(getConfigListEntry("DVD " + "widescreen", self.properties.widescreen))
else:
self.list.append(getConfigListEntry("DVD " + "widescreen", self.properties.crop))
if len(title.chaptermarks) == 0:
self.list.append(getConfigListEntry(_("Auto chapter split every ? minutes (0=never)"), self.properties.autochapter))
infotext = "DVB " + _("Title") + ': ' + title.DVBname + "\n" + _("Description") + ': ' + title.DVBdescr + "\n" + _("Channel") + ': ' + title.DVBchannel + '\n' + _("Start time") + title.formatDVDmenuText(": $D.$M.$Y, $T\n", self.title_idx+1)
chaptermarks = title.getChapterMarks(template="$h:$m:$s")
chapters_count = len(chaptermarks)
if chapters_count >= 1:
infotext += str(chapters_count+1) + ' ' + _("chapters") + ': '
infotext += ' / '.join(chaptermarks)
self["serviceinfo"].setText(infotext)
self["config"].setList(self.list)
except AttributeError:
pass
def editTitle(self):
self.parent.editTitle()
def update(self):
print "[onShown]"
self.initConfigList()
self.loadThumb()
def loadThumb(self):
thumbfile = self.project.titles[self.title_idx].inputfile.rsplit('.',1)[0] + ".png"
sc = AVSwitch().getFramebufferScale()
self.picload.setPara((self["thumbnail"].instance.size().width(), self["thumbnail"].instance.size().height(), sc[0], sc[1], False, 1, "#00000000"))
self.picload.startDecode(thumbfile)
def paintThumbPixmapCB(self, picInfo=None):
ptr = self.picload.getData()
if ptr is not None:
self["thumbnail"].instance.setPixmap(ptr.__deref__())
def changedConfigList(self):
self.initConfigList()
def exit(self):
self.applySettings()
self.close()
def applySettings(self):
for x in self["config"].list:
x[1].save()
current_pos = self.title_idx+1
new_pos = self.properties.position.getValue()
if new_pos != current_pos:
print "title got repositioned from ", current_pos, "to", new_pos
swaptitle = self.project.titles.pop(current_pos-1)
self.project.titles.insert(new_pos-1, swaptitle)
def ok(self):
#key = self.keydict[self["config"].getCurrent()[1]]
#if key in self.project.filekeys:
#self.session.openWithCallback(self.FileBrowserClosed, FileBrowser, key, self.settings)
pass
def cancel(self):
self.close()
from Tools.ISO639 import LanguageCodes
class LanguageChoices():
def __init__(self):
from Components.Language import language as syslanguage
syslang = syslanguage.getLanguage()[:2]
self.langdict = { }
self.choices = []
for key, val in LanguageCodes.iteritems():
if len(key) == 2:
self.langdict[key] = val[0]
for key, val in self.langdict.iteritems():
if key not in (syslang, 'en'):
self.langdict[key] = val
self.choices.append((key, val))
self.choices.sort()
self.choices.insert(0,("nolang", ("unspecified")))
self.choices.insert(1,(syslang, self.langdict[syslang]))
if syslang != "en":
self.choices.insert(2,("en", self.langdict["en"]))
def getLanguage(self, DVB_lang):
DVB_lang = DVB_lang.lower()
for word in ("stereo", "audio", "description", "2ch", "dolby digital"):
DVB_lang = DVB_lang.replace(word,"").strip()
for key, val in LanguageCodes.iteritems():
if DVB_lang.find(key.lower()) == 0:
if len(key) == 2:
return key
else:
DVB_lang = (LanguageCodes[key])[0]
elif DVB_lang.find(val[0].lower()) > -1:
if len(key) == 2:
return key
else:
DVB_lang = (LanguageCodes[key])[0]
for key, val in self.langdict.iteritems():
if val == DVB_lang:
return key
return "nolang"
languageChoices = LanguageChoices()
|
__author__ = 'Koichi Takahashi <shafi@e-cell.org>'
__license__ = 'GPL'
__copyright__ = 'Copyright The Molecular Sciences Institute 2006-2007'
import unittest
import _greens_functions as mod
import numpy
class GreensFunction3DRadAbsTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_instantiation(self):
D = 1e-12
kf = 1e8
sigma = 1e-8
a = 1e-7
r0 = 5e-8
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
self.failIf(gf == None)
def test_draw_time(self):
D = 1e-12
kf = 1e-8
sigma = 1e-8
a = 1e-7
r0 = 5e-8
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
t = gf.drawTime(0.5)
self.failIf(t <= 0.0 or t >= numpy.inf)
t = gf.drawTime(0.0)
self.failIf(t < 0.0 or t >= numpy.inf)
t = gf.drawTime(1 - 1e-16)
self.failIf(t <= 0.0 or t >= numpy.inf)
def test_draw_time_a_equal_sigma(self):
D = 1e-12
kf = 1e-8
sigma = 1e-8
a = sigma
r0 = a
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
t = gf.drawTime(0.5)
self.assertEqual(0.0, t)
def test_draw_time_a_near_sigma(self):
D = 1e-12
kf = 1e-8
sigma = 1e-8
a = sigma + sigma * 1e-6
r0 = (a + sigma) * .5
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
t = gf.drawTime(0.5)
self.failIf(t <= 0.0 or t >= numpy.inf)
def test_draw_time_r0_equal_a(self):
D = 1e-12
kf = 1e-8
sigma = 1e-8
a = 1e-7
r0 = a
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
t = gf.drawTime(0.5)
self.assertEqual(0.0, t)
def test_draw_time_r0_equal_sigma_kf_zero(self):
D = 1e-12
kf = 0.0 # note this
sigma = 1e-8
a = 1e-7
r0 = sigma
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
t = gf.drawTime(0.5)
self.failIf(t < 0.0 or t >= numpy.inf)
def no_test_draw_time_r0_equal_sigma_kf_large(self):
D = 1e-12
kf = 1e-8
sigma = 1e-8
a = 10e-7
r0 = sigma + 1e-12
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
t = gf.drawTime(0.5)
self.failIf(t < 0.0 or t >= numpy.inf)
def test_draw_event_type(self):
D = 1e-12
kf = 1e-8
sigma = 1e-8
a = 1e-7
r0 = 5e-8
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
t = gf.drawTime(0.5)
event_type = gf.drawEventType(0.5, t)
self.failIf(event_type != mod.PairEventKind.IV_REACTION and
event_type != mod.PairEventKind.IV_ESCAPE)
event_type = gf.drawEventType(0.0, t)
self.assertEqual(event_type, mod.PairEventKind.IV_REACTION)
event_type = gf.drawEventType(0.999999, t)
self.assertEqual(event_type, mod.PairEventKind.IV_ESCAPE)
def no_test_draw_event_type_smallt(self):
D = 1e-12
kf = 1e-8
sigma = 1e-8
a = 1e-6 #sigma + sigma * 0.001
r0 = 1.1e-8 #sigma+(a-sigma)/2
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
t = gf.drawTime(0.999)
event_type = gf.drawEventType(0.5, t)
self.failIf(event_type != mod.PairEventKind.IV_REACTION and
event_type != mod.PairEventKind.IV_ESCAPE)
event_type = gf.drawEventType(0.0, t)
self.assertEqual(event_type, mod.PairEventKind.IV_REACTION)
event_type = gf.drawEventType(0.9999, t)
#self.assertEqual(event_type, mod.PairEventKind.IV_ESCAPE)
'''
def test_draw_time2(self):
D = 1e-12
kf = 1e-18
#kf = 0
sigma = 1e-8
a = 1e-7
r0 = 9e-8
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
print '==============================================='
t, et = gf.drawTime2(0.5, 0.5)
t2 = gf.drawTime(0.5)
print t, et, t2
self.failIf(t <= 0.0 or t >= numpy.inf)
print '==============================================='
t, et = gf.drawTime2(0.0, 0.0)
self.failIf(t < 0.0 or t >= numpy.inf)
print t, et
print '==============================================='
t, et = gf.drawTime2(1 - 1e-8, 1 - 1e-8)
self.failIf(t <= 0.0 or t >= numpy.inf)
print t, et
print '==============================================='
def test_draw_time2_a_equal_sigma(self):
D = 1e-12
kf = 1e-8
sigma = 1e-8
a = sigma
r0 = a
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
t, et = gf.drawTime2(0.5, 0.5)
self.assertEqual(0.0, t)
self.assertEqual(et, mod.PairEventKind.IV_ESCAPE)
def test_draw_time2_squeezed(self):
D = 1e-12
kf = 1e-10
sigma = 1e-8
a = sigma + sigma * 1e-6
r0 = (a + sigma) * .5
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
t, et = gf.drawTime2(0.5, 0.5)
self.failIf(t <= 0.0 or t >= numpy.inf)
def test_draw_time2_r0_equal_a(self):
D = 1e-12
kf = 1e-8
sigma = 1e-8
a = 1e-7
r0 = a
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
t, et = gf.drawTime2(0.5, 0.5)
self.assertEqual(0.0, t)
self.assertEqual(et, mod.PairEventKind.IV_ESCAPE)
def test_draw_time2_r0_equal_sigma_kf_zero(self):
D = 1e-12
kf = 0.0 # note this
sigma = 1e-8
a = 1e-7
r0 = sigma
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
t, et = gf.drawTime2(0.5, 0.5)
self.failIf(t < 0.0 or t >= numpy.inf)
self.assertEqual(et, mod.PairEventKind.IV_ESCAPE)
# when kf == 0, pleavea == psurvival
t2 = gf.drawTime(0.5)
self.assertAlmostEqual(t, t2)
def test_draw_time2_r0_near_sigma(self):
D = 1e-12
kf = 1e-8
sigma = 1e-8
a = 1e-7
r0 = sigma*1.1
print '**************'
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
t, et = gf.drawTime2(0.3, 0.3, r0)
t2 = gf.drawTime(0.3, r0)
et2 = gf.drawEventType(0.3, t2)
print '**************'
print 't',t, 't2', t2, 'et', et, 'et2', et2
self.failIf(t < 0.0 or t >= numpy.inf)
self.assertEqual(et, mod.PairEventKind.IV_REACTION)
self.assertAlmostEqual(t, t2)
def no_test_draw_time2_r0_equal_sigma_kf_large(self):
D = 1e-12
kf = 1e-5
sigma = 1e-8
a = 10e-7
r0 = sigma + 1e-12
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
t, et = gf.drawTime2(0.5, 0.5, r0)
self.failIf(t < 0.0 or t >= numpy.inf)
'''
def test_drawR(self):
D = 1e-12
kf = 1e-8
sigma = 1e-8
a = 1e-7
r0 = 2e-8
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
t = 1e-3
r = gf.drawR(0.5, t)
self.failIf(r < sigma or r > a)
r1 = gf.drawR(0.0, t)
r2 = gf.drawR(0.999999999999, t)
self.failIf(r1 < sigma or r1 > a)
self.failIf(r2 < sigma or r2 > a)
self.assertAlmostEqual(r1, sigma)
self.assertAlmostEqual(r2, a)
def test_drawR_zerot(self):
D = 1e-12
kf = 1e-8
sigma = 1e-8
a = 1e-7
r0 = 2e-8
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
t = 0.0
r = gf.drawR(0.5, t)
self.assertEqual(r0, r)
def test_drawR_r0_equal_sigma(self):
D = 1e-12
kf = 1e-8
sigma = 1e-8
a = 1e-7
r0 = sigma
t = 1e-3
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
r = gf.drawR(0.5, t)
self.failIf(r < sigma or r > a)
def test_drawR_squeezed(self):
D = 1e-12
kf = 1e-8
sigma = 1e-8
a = 1.01e-8
t = 1e-6
r0 = 1.005e-8
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
r = gf.drawR(0.5, t)
self.failIf(r < sigma or r > a)
# near s
r = 1.0001e-8
r0 = 1.0001e-8
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
r = gf.drawR(0.5, t)
self.failIf(r < sigma or r > a)
# near a
r = 1.0099e-8
r0 = 1.0099e-8
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
r = gf.drawR(0.5, t)
self.failIf(r < sigma or r > a)
def test_draw_theta(self):
D = 1e-12
kf = 1e-8
sigma = 1e-8
a = 1e-7
r0 = 5e-8
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
t = gf.drawTime(0.5)
event_type = gf.drawEventType(0.5, t)
r = gf.drawR(0.5, t)
theta = gf.drawTheta(0.5, r, t)
self.failIf(theta < 0.0 or theta > numpy.pi)
theta = gf.drawTheta(0.0, r, t)
self.failIf(theta < 0.0 or theta > numpy.pi)
theta = gf.drawTheta(0.999999, r, t)
self.failIf(theta < 0.0 or theta > numpy.pi)
def test_draw_theta_zerot(self):
D = 1e-12
kf = 1e-8
sigma = 1e-8
a = 1e-7
r = 5e-8
r0 = 5e-8
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
t = 0.0
theta = gf.drawTheta(0.5, r0, t)
self.assertEqual(0.0, theta)
def test_draw_theta_smallt(self):
D = 1e-12
kf = 1e-8
sigma = 1e-8
a = 1e-7
r = 2e-8
r0 = 2e-8
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
t = 1e-7 # well this is not *very* small..
theta = gf.drawTheta(0.5, r, t)
self.failIf(theta < 0.0 or theta > numpy.pi)
def test_draw_theta_squeezed(self):
D = 1e-12
kf = 1e-8
sigma = 1e-8
a = 1.001e-8
t = 1e-8
r = 1.0001e-8
r0 = 1.0001e-8
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
theta = gf.drawTheta(0.5, r, t)
self.failIf(theta < 0.0 or theta > numpy.pi)
# near s
r = 1.00001e-8
r0 = 1.00001e-8
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
theta = gf.drawTheta(0.5, r, t)
self.failIf(theta < 0.0 or theta > numpy.pi)
# near a
r = 1.00099e-8
r0 = 1.00099e-8
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
theta = gf.drawTheta(0.5, r, t)
self.failIf(theta < 0.0 or theta > numpy.pi)
def test_ip_theta_squeezed(self):
D = 1e-12
kf = 1e-8
sigma = 1e-8
a = 1.001e-8
t = 1e-10
r = 1.00099e-8
r0 = 1.00099e-8
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
ip = gf.ip_theta(1, r, t)
r = 1.0000001e-8
r0 = 1.0000001e-8
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
ip = gf.ip_theta(1, r, t)
def test_draw_theta_r0_equal_sigma(self):
D = 1e-12
kf = 1e-8
sigma = 1e-8
a = 1e-7
r0 = sigma
t = 1e-3
r = r0
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
theta = gf.drawTheta(0.5, r, t)
self.failIf(theta < 0.0 or theta > numpy.pi)
def test_draw_theta_r_equal_a(self):
D = 1e-12
kf = 1e-8
sigma = 1e-8
a = 1e-7
r0 = 9e-8
t = 1e-3
r = a
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
theta = gf.drawTheta(0.5, r, t)
self.failIf(theta < 0.0 or theta > numpy.pi)
def test_draw_theta_1(self):
r0 = 1.0206416181e-07
t = 4.41358538629e-08
D = 4e-11
sigma = 1e-07
a = 1.05134e-07
kf = 0 # h = 0
r = 1.03421535312e-07
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
theta = gf.drawTheta(0.5, r, t)
self.failIf(theta < 0.0 or theta > numpy.pi)
def test_alpha0(self):
D = 1e-12
sigma = 1e-8
kf = 1e-8
a = 1e-7
r0 = 5e-8
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
maxerror = 0.0
for i in range(100):
alpha = gf.alpha0_i(i)
error = abs(gf.f_alpha0(alpha) / alpha)
#print error/alpha, gf.f_alpha0(alpha*1.1)/alpha
maxerror = max(error, maxerror)
self.failIf(abs(maxerror) > 1e-10)
def test_psurvival_is_pleaves_plus_pleavea(self):
D = 1e-12
sigma = 1e-8
kf = 1e-8
t = 1e-5
r0 = 5e-8
a = 1e-7
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
surv = gf.p_survival(t)
pleaves = gf.p_leaves(t)
pleavea = gf.p_leavea(t)
#print 'pll', surv, pleaves, pleavea
self.failIf(surv <= 0.0)
self.failIf(pleavea <= 0.0 or pleaves <= 0.0)
self.assertAlmostEqual(surv, pleaves + pleavea)
def test_dpsurvival_is_leaves_plus_leavea(self):
D = 1e-12
sigma = 1e-8
kf = 1e-13
t = 1e-3
r0 = 2e-8
a = 1e-7
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
dsurv = gf.dp_survival(t)
leaves = gf.leaves(t) * 4.0 * numpy.pi * sigma * sigma
leavea = gf.leavea(t) * 4.0 * numpy.pi * a * a
#print 'll', leavea, leaves, dsurv
self.assertNotEqual(0.0, dsurv)
self.assertAlmostEqual(dsurv, leaves + leavea)
def test_psurvival_smallt(self):
D = 1e-12
sigma = 1e-8
kf = 1e-8
t = 1e-4
r0 = 2e-8
a = 1e-7
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
for i in range(5):
psurv = gf.p_survival(t)
pleaves = gf.p_leaves(t)
pleavea = gf.p_leavea(t)
self.assertNotEqual(0.0, psurv)
self.assertAlmostEqual(pleaves + pleavea, psurv)
t *= .1
def test_p_int_r(self):
D = 1e-12
sigma = 1e-8
kf = 1e-8
t = 1e-3
r0 = 5e-8
a = 1e-7
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
r = r0
pintr = gf.p_int_r(r, t)
self.failIf(pintr < 0.0 or pintr > 1.0, 'pintr %f' % pintr)
def test_p_int_r_at_a_is_p_survival(self):
D = 1e-12
sigma = 1e-8
kf = 1e-8
t = 1e-3
r0 = 5e-8
a = 1e-7
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
r = r0
pintr = gf.p_int_r(a, t)
psurv = gf.p_survival(t)
self.assertAlmostEqual(pintr, psurv)
def test_p_int_r_at_s_is_zero(self):
D = 1e-12
sigma = 1e-8
kf = 1e-8
t = 1e-3
r0 = 5e-8
a = 1e-7
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
pintr = gf.p_int_r(gf.getSigma(), t)
self.assertEqual(0.0, pintr)
def test_p_int_r_never_decrease(self):
D = 1e-12
sigma = 1e-8
kf = 1e-8
# smaller t causes problem
t = 1e-3
r0 = sigma
a = 3e-7
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
psurv = gf.p_survival(t)
pintr_prev = 0.0
resolution = 500
for i in range(resolution):
r = i * (a-sigma) / resolution + sigma
pintr = gf.p_int_r(r, t)
#print r, pintr, psurv
self.failIf(pintr > psurv)
self.failIf(pintr < pintr_prev)
pintr_prev = pintr
def test_ip_theta_is_int_p_theta(self):
import scipy.integrate
D = 1e-12
sigma = 1e-8
kf = 1e-10
t = 1e-2 #FIXME: smaller t should be fine
r0 = 5e-8
a = 1e-7
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
r = r0
ip = gf.ip_theta(0.0, r, t)
self.assertEqual(0.0, ip)
resolution = 10
for i in range(1, resolution):
theta = i * numpy.pi / resolution
ip = gf.ip_theta(theta, r, t)
result = scipy.integrate.quad(gf.p_theta, 0.0, theta,
args=(r, t))
np = result[0]
self.assertAlmostEqual(0.0, (np-ip)/ip, 5)
def test_ip_theta_pi_is_p_0(self):
D = 1e-12
sigma = 1e-8
kf = 1e-8
t = 1e-5
r0 = 5e-8
r = r0
a = 1e-7
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
ip = gf.ip_theta(numpy.pi, r, t)
p0 = gf.p_0(t, r) * 2
self.assertNotEqual(0.0, ip)
self.assertAlmostEqual(1.0, ip/p0, 5)
def test_p_theta_never_negative(self):
D = 1e-12
sigma = 1e-8
kf = 1e-8
# smaller t causes problem
t = 1e-3
r0 = 5e-8
r = r0
a = 1e-7
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
pint = gf.ip_theta(numpy.pi, r, t)
pmin = 0.0
resolution = 50
for i in range(resolution):
theta = i * numpy.pi / resolution
p = gf.p_theta(theta, r, t) / pint / resolution
pmin = min(pmin, p)
#print 'theta: ', theta, '\tp: ', p
self.failIf(pmin < 0.0, 'Negative p_theta; t= %g, %s'
% (t, gf.dump()))
def test_ip_theta_never_decrease(self):
D = 1e-12
sigma = 1e-8
kf = 1e-8
# smaller t causes problem
t = 1e-3
r0 = 5e-8
r = r0
a = 1e-7
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
pint_prev = 0.0
resolution = 50
for i in range(resolution):
theta = i * numpy.pi / resolution
pint = gf.ip_theta(theta, r, t)
self.failIf(pint < pint_prev)
pint_prev = pint
def test_int_dp_theta_at_a_is_leavea(self):
D = 1e-12
sigma = 1e-8
kf = 1e-8
# smaller t causes problem
t = 1e-4
r0 = 9e-8
a = 1e-7
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
leavea = gf.leavea(t) * numpy.pi * a * a * 2
iptheta = gf.idp_theta(numpy.pi, a, t) * numpy.pi * a * a
self.assertAlmostEqual(leavea / iptheta, 1.0, 5) # SBG's accuracy
'''
def test_p_theta_free_is_p_theta_smallt(self):
D = 1e-12
sigma = 1e-8
kf = 1e-8
t = 1e-7
r0 = 5e-7
r = 5e-7
a = 1e-6
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
resolution = 20
for i in range(1, resolution):
theta = i * numpy.pi / resolution
pfree = mod.p_theta_free(theta, r, r0, t, D)
p = gf.p_theta(theta, r, t)* 4 * numpy.pi * r * r
print pfree, p
self.assertAlmostEqual(0.0, (pfree - p)/pfree)
'''
'''
def test_alphan(self):
D = 1e-12
sigma = 1e-8
kf = 1e-18
a = 2e-7
gf = mod.GreensFunction3DRadAbs(D, kf, r0, sigma, a)
maxerror = 0
for n in range(100):
for i in range(1000):
alpha = gf.alpha_i(n, i)
error = abs(gf.f_alpha0(alpha))
maxerror = max(error, maxerror)
self.failIf(abs(maxerror) > 1e-8)
'''
if __name__ == "__main__":
unittest.main()
|
"""
An Object Editor widget.
"""
import sys
from PyQt5 import QtCore
from PyQt5.QtCore import QSettings
from PyQt5.QtGui import QTextCursor
from PyQt5.QtWidgets import (
QDoubleSpinBox, QLabel, QPushButton, QVBoxLayout, QWidget)
import app
import objecteditor
from . import defineoffset
class Widget(QWidget):
def __init__(self, tool):
super(Widget, self).__init__(tool)
self.mainwindow = tool.mainwindow()
self.define = None
import panelmanager
self.svgview = panelmanager.manager(tool.mainwindow()).svgview.widget().view
layout = QVBoxLayout(spacing=1)
self.setLayout(layout)
self.elemLabel = QLabel()
self.XOffsetBox = QDoubleSpinBox()
self.XOffsetBox.setRange(-99,99)
self.XOffsetBox.setSingleStep(0.1)
self.XOffsetLabel = l = QLabel()
l.setBuddy(self.XOffsetBox)
self.YOffsetBox = QDoubleSpinBox()
self.YOffsetBox.setRange(-99,99)
self.YOffsetBox.setSingleStep(0.1)
self.YOffsetLabel = l = QLabel()
l.setBuddy(self.YOffsetBox)
self.insertButton = QPushButton("insert offset in source", self)
self.insertButton.clicked.connect(self.callInsert)
layout.addWidget(self.elemLabel)
layout.addWidget(self.XOffsetLabel)
layout.addWidget(self.XOffsetBox)
layout.addWidget(self.YOffsetLabel)
layout.addWidget(self.YOffsetBox)
layout.addWidget(self.insertButton)
layout.addStretch(1)
app.translateUI(self)
self.loadSettings()
self.connectSlots()
def connectSlots(self):
# On creation we connect to all available signals
self.connectToSvgView()
def connectToSvgView(self):
"""Register with signals emitted by the
SVG viewer for processing graphical editing.
"""
self.svgview.objectStartDragging.connect(self.startDragging)
self.svgview.objectDragging.connect(self.Dragging)
self.svgview.objectDragged.connect(self.Dragged)
self.svgview.cursor.connect(self.setObjectFromCursor)
def disconnectFromSvgView(self):
"""Do not process graphical edits when the
Object Editor isn't visible."""
self.svgview.objectStartDragging.disconnect()
self.svgview.objectDragging.disconnect()
self.svgview.objectDragged.disconnect()
self.svgview.cursor.disconnect()
def translateUI(self):
self.XOffsetLabel.setText(_("X Offset"))
self.XOffsetBox.setToolTip(_("Display the X Offset"))
self.YOffsetLabel.setText(_("Y Offset"))
self.YOffsetBox.setToolTip(_("Display the Y Offset"))
self.insertButton.setEnabled(False)
def hideEvent(self, event):
"""Disconnect from all graphical editing signals
when the panel isn't visible
"""
self.disconnectFromSvgView()
event.accept()
def showEvent(self, event):
"""Connect to the graphical editing signals
when the panel becomes visible
"""
self.connectToSvgView()
event.accept()
def callInsert(self):
""" Insert the override command in the source."""
if self.define:
self.define.insertOverride(self.XOffsetBox.value(), self.YOffsetBox.value())
@QtCore.pyqtSlot(float, float)
def setOffset(self, x, y):
"""Display the updated offset."""
self.XOffsetBox.setValue(x)
self.YOffsetBox.setValue(y)
@QtCore.pyqtSlot(float, float)
def startDragging(self, x, y):
"""Set the value of the offset externally."""
# temporary debug output
#print("Start dragging with offset", x, y)
self.setOffset(x, y)
@QtCore.pyqtSlot(float, float)
def Dragging(self, x, y):
"""Set the value of the offset externally."""
# temporary debug output
# print("Dragging with offset", x, y)
self.setOffset(x, y)
@QtCore.pyqtSlot(float, float)
def Dragged(self, x, y):
"""Set the value of the offset externally."""
# temporary debug output
#print("Dragged to", x, y)
self.setOffset(x, y)
@QtCore.pyqtSlot(QTextCursor)
def setObjectFromCursor(self, cursor):
"""Set selected element."""
self.define = defineoffset.DefineOffset(self.mainwindow.currentDocument())
self.elemLabel.setText(self.define.getCurrentLilyObject(cursor))
self.insertButton.setEnabled(True)
def loadSettings(self):
"""Called on construction. Load settings and set checkboxes state."""
s = QSettings()
s.beginGroup('object_editor')
def saveSettings(self):
"""Called on close. Save settings and checkboxes state."""
s = QSettings()
s.beginGroup('object_editor')
|
import bpy
from bpy.props import StringProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import (updateNode)
class SvSortObjsNode(bpy.types.Node, SverchCustomTreeNode):
''' Sort Objects '''
bl_idname = 'SvSortObjsNode'
bl_label = 'Object ID Sort'
bl_icon = 'OUTLINER_OB_EMPTY'
Modes = StringProperty(name='formula', default='location.x', update=updateNode)
def sv_init(self, context):
self.inputs.new('StringsSocket', 'Objects')
self.inputs.new('StringsSocket', 'CustomValue')
self.outputs.new('StringsSocket', 'Objects')
def draw_buttons(self, context, layout):
if not self.inputs['CustomValue'].is_linked:
layout.prop(self, "Modes", text="")
def process(self):
Io, Cv = self.inputs
Oo = self.outputs[0]
if Oo.is_linked:
X = Io.sv_get()
if isinstance(X[0], list):
X = X[0]
if Cv.is_linked:
CV = Cv.sv_get()
Y = CV[0] if isinstance(CV[0],list) else CV
else:
Y = eval("[i."+self.Modes+" for i in X]")
X.sort(key=dict(zip(X, Y)).get)
Oo.sv_set(X)
def update_socket(self, context):
self.update()
def register():
bpy.utils.register_class(SvSortObjsNode)
def unregister():
bpy.utils.unregister_class(SvSortObjsNode)
|
""" Python 'utf-16-le' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.utf_16_le_encode
decode = codecs.utf_16_le_decode
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
def getregentry():
return (Codec.encode,Codec.decode,StreamReader,StreamWriter)
|
"""
MakePro.py
By: Denver Coneybeare <denver@sleepydragon.org>
Jan 03, 2012
Makes changes to the source code to convert the app to the "Pro" version
"""
from __future__ import print_function
from __future__ import unicode_literals
import io
import re
import sys
import tempfile
class FileFilter(object):
def __init__(self, regex_pattern, group1_replacement):
self.expr = re.compile(regex_pattern)
self.group1_replacement = group1_replacement
self.match_found = False
def filter(self, line):
match = self.expr.match(line)
if match is not None:
self.match_found = True
(start, end) = match.span(1)
prefix = line[:start]
suffix = line[end:]
line = "{}{}{}".format(prefix, self.group1_replacement, suffix)
yield line
def error_message(self):
return "regular expression match not found in file: {}".format(self.expr.pattern)
class AndroidManifestFilter(FileFilter):
def __init__(self):
FileFilter.__init__(self, r"\s*package=\"org.sleepydragon.capbutnbrightness()\"", ".pro")
class PrefsXmlFilter(FileFilter):
def __init__(self):
FileFilter.__init__(self, r"\s*android:targetPackage=\"org.sleepydragon.capbutnbrightness()\"", ".pro")
class FixRImportFilter(FileFilter):
def __init__(self):
FileFilter.__init__(self, r"\s*import org.sleepydragon.capbutnbrightness().R;", ".pro")
class VersionStringFilter(FileFilter):
def __init__(self):
FileFilter.__init__(self, r"\s*<string name=\"app_version_display\">[^<]*()</string>", " Pro")
class AppNameStringFilter(FileFilter):
def __init__(self):
FileFilter.__init__(self, r"\s*<string name=\"app_name\">[^<]*()</string>", " Pro")
class AppTitleStringFilter(FileFilter):
def __init__(self):
FileFilter.__init__(self, r"\s*<string name=\"title_activity_main\">[^<]*()</string>", " Pro")
class ImportNewRFilter(FileFilter):
LINE_PREFIX = "package org.sleepydragon.capbutnbrightness;"
def __init__(self):
self.match_found = False
def filter(self, line):
yield line
if line.strip() == self.LINE_PREFIX:
self.match_found = True
linesep = line[len(line.rstrip()):]
import_line = "import org.sleepydragon.capbutnbrightness.pro.R;{}".format(linesep)
yield import_line
def error_message():
return "line not found: {}".format(self.LINE_PREFIX)
class UpgradeBannerLayoutXmlFilter(FileFilter):
BEGIN_LINE = "<!-- Begin upgrade banner -->"
END_LINE = "<!-- End upgrade banner -->"
def __init__(self):
self.begin_found = False
self.end_found = False
self.match_found = False
def filter(self, line):
if line.strip() == self.BEGIN_LINE:
self.begin_found = True
elif line.strip() == self.END_LINE:
self.end_found = True
self.match_found = self.begin_found and self.end_found
if not self.begin_found and not self.end_found:
yield line
elif self.begin_found and self.end_found:
yield line
def error_message():
if not self.begin_found:
return "line not found: {}".format(self.BEGIN_LINE)
elif not self.end_found:
return "line not found: {}".format(self.END_LINE)
class UpgradeBannerJavaFilter(FileFilter):
LINE1 = "final View btnUpgrade = this.findViewById(R.id.btnUpgrade);"
LINE2 = "btnUpgrade.setOnClickListener(this);"
LINE3 = "if (view.getId() == R.id.btnUpgrade) {"
def __init__(self):
self.line1_found = False
self.line2_found = False
self.line3_found = False
self.match_found = False
def filter(self, line):
if line.strip() == self.LINE1:
self.line1_found = True
elif line.strip() == self.LINE2:
self.line2_found = True
elif line.strip() == self.LINE3:
self.line3_found = True
i = line.find(self.LINE3)
newline = line[len(line.rstrip()):]
yield line[:i] + "if (1 == 2) {" + newline
else:
yield line
self.match_found = (self.line1_found and self.line2_found
and self.line3_found)
def error_message():
missing_lines = []
if not self.line1_found:
missing_lines.append(self.LINE1)
if not self.line2_found:
missing_lines.append(self.LINE2)
if not self.line3_found:
missing_lines.append(self.LINE3)
return "{} lines not found: {}".format(
len(missing_lines),
", ".join('"{}"'.format(x) for x in missing_lines))
filters = [
("AndroidManifest.xml", AndroidManifestFilter()),
("res/layout/activity_main.xml", UpgradeBannerLayoutXmlFilter()),
("res/values/strings.xml", VersionStringFilter()),
("res/values/strings.xml", AppNameStringFilter()),
("res/values/strings.xml", AppTitleStringFilter()),
("res/xml/preferences.xml", PrefsXmlFilter()),
("src/org/sleepydragon/capbutnbrightness/AboutActivity.java", ImportNewRFilter()),
("src/org/sleepydragon/capbutnbrightness/ButtonBrightnessAppWidgetProvider.java", ImportNewRFilter()),
("src/org/sleepydragon/capbutnbrightness/CreditsActivity.java", ImportNewRFilter()),
("src/org/sleepydragon/capbutnbrightness/MainActivity.java", ImportNewRFilter()),
("src/org/sleepydragon/capbutnbrightness/MainActivity.java", UpgradeBannerJavaFilter()),
("src/org/sleepydragon/capbutnbrightness/SetBrightnessService.java", ImportNewRFilter()),
("src/org/sleepydragon/capbutnbrightness/SettingsActivity.java", ImportNewRFilter()),
("src/org/sleepydragon/capbutnbrightness/debug/DebugActivity.java", FixRImportFilter()),
("src/org/sleepydragon/capbutnbrightness/debug/DebugLinesGenerator.java", FixRImportFilter()),
]
for (path, filter) in filters:
print("Making Pro: {}".format(path))
with tempfile.TemporaryFile() as tf:
with io.open(path, "rt", encoding="UTF-8", newline="") as f:
for (linenum, line) in enumerate(f):
lines_filtered = filter.filter(line)
for line_filtered in lines_filtered:
line_filtered_encoded = line_filtered.encode("UTF-8")
tf.write(line_filtered_encoded)
if not filter.match_found:
msg = filter.error_message()
print("ERROR: {}: {}".format(path, msg), file=sys.stderr)
sys.exit(1)
tf.seek(0)
with open(path, "wb") as f:
while True:
buf = tf.read(1024)
if not buf:
break
f.write(buf)
|
from __future__ import print_function
import datetime
import dateutil.tz
import os
import shutil
import zipfile
from opinel.utils.console import printInfo
from AWSScout2 import AWSCONFIG, EXCEPTIONS, HTMLREPORT, AWSRULESET, AWSCONFIG_FILE, EXCEPTIONS_FILE, HTMLREPORT_FILE, GENERATOR_FILE, REPORT_TITLE
from AWSScout2.output.utils import get_filename, prompt_4_overwrite
from AWSScout2.output.js import JavaScriptReaderWriter
class HTMLReport(object):
"""
Base HTML report
"""
def __init__(self, profile, report_dir, timestamp = False, exceptions = {}):
self.report_dir = report_dir
self.profile = profile.replace('/', '_').replace('\\', '_') # Issue 111
self.current_time = datetime.datetime.now(dateutil.tz.tzlocal())
if timestamp != False:
self.timestamp = self.current_time.strftime("%Y-%m-%d_%Hh%M%z") if not timestamp else timestamp
self.profile = '%s-%s' % (self.profile, self.timestamp)
self.exceptions = exceptions
self.scout2_report_data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
self.html_data_path = os.path.join(self.scout2_report_data_path, 'html')
self.jsrw = JavaScriptReaderWriter(self.profile, report_dir, timestamp)
def get_content_from(self, templates_type):
contents = ''
template_dir = os.path.join(self.html_data_path, templates_type)
template_files = [os.path.join(template_dir, f) for f in os.listdir(template_dir) if os.path.isfile(os.path.join(template_dir, f))]
for filename in template_files:
with open('%s' % filename, 'rt') as f:
contents = contents + f.read()
return contents
def prepare_html_report_dir(self):
if not os.path.isdir(self.report_dir):
os.makedirs(self.report_dir)
aws_config_dir = os.path.join(self.report_dir, 'inc-awsconfig')
if not os.path.isdir(aws_config_dir):
os.makedirs(aws_config_dir)
# Copy static 3rd-party files
archive = os.path.join(self.scout2_report_data_path, 'includes.zip')
zip_ref = zipfile.ZipFile(archive)
zip_ref.extractall(self.report_dir)
zip_ref.close()
# Copy static files
inc_scout2_dir = os.path.join(self.report_dir, 'inc-scout2')
src_inc_scout2_dir = os.path.join(self.scout2_report_data_path, 'inc-scout2')
if os.path.isdir(inc_scout2_dir):
shutil.rmtree(inc_scout2_dir)
shutil.copytree(src_inc_scout2_dir, inc_scout2_dir)
class Scout2Report(HTMLReport):
"""
Scout2 HTML report
"""
def __init__(self, profile, report_dir = None, timestamp = False, exceptions = {}):
self.html_root = HTMLREPORT_FILE
super(Scout2Report, self).__init__(profile, report_dir, timestamp, exceptions)
def save(self, config, exceptions, force_write = False, debug = False):
self.prepare_html_report_dir()
self.jsrw.save_to_file(config, AWSCONFIG, force_write, debug)
self.jsrw.save_to_file(exceptions, EXCEPTIONS, force_write, debug)
return self.create_html_report(force_write)
def create_html_report(self, force_write):
contents = ''
# Use all scripts under html/partials/
contents += self.get_content_from('partials')
# Use all scripts under html/summaries/
contents += self.get_content_from('summaries')
new_file, first_line = get_filename(HTMLREPORT, self.profile, self.report_dir)
printInfo('Creating %s ...' % new_file)
if prompt_4_overwrite(new_file, force_write):
if os.path.exists(new_file):
os.remove(new_file)
with open(os.path.join(self.html_data_path, self.html_root)) as f:
with open(new_file, 'wt') as nf:
for line in f:
newline = line.replace(REPORT_TITLE, REPORT_TITLE + ' [' + self.profile + ']')
if self.profile != 'default':
new_config_filename = AWSCONFIG_FILE.replace('.js', '-%s.js' % self.profile)
new_exceptions_filename = EXCEPTIONS_FILE.replace('.js', '-%s.js' % self.profile)
newline = newline.replace(AWSCONFIG_FILE, new_config_filename)
newline = newline.replace(EXCEPTIONS_FILE, new_exceptions_filename)
newline = newline.replace('<!-- PLACEHOLDER -->', contents)
nf.write(newline)
return new_file
class RulesetGenerator(HTMLReport):
"""
HTML ruleset generator for Scout2
"""
def __init__(self, ruleset_name, report_dir = None, timestamp = False, exceptions = {}):
self.html_root = GENERATOR_FILE
self.ruleset_name = ruleset_name
super(RulesetGenerator, self).__init__(ruleset_name, report_dir, timestamp, exceptions)
def create_html_report(self, force_write):
src_rule_generator = os.path.join(self.html_data_path, GENERATOR_FILE)
rule_generator = os.path.join(self.report_dir, GENERATOR_FILE)
shutil.copyfile(src_rule_generator, rule_generator)
return rule_generator
def save(self, config, force_write = False, debug = False):
self.prepare_html_report_dir()
self.jsrw.save_to_file(config, AWSRULESET, force_write, debug)
return self.create_html_report(force_write)
|
import os
import threading
import sys
import sickbeard
from sickbeard.webserve import LoginHandler, LogoutHandler, KeyHandler, CalendarHandler
from sickbeard.webapi import ApiHandler
from sickbeard import logger
from sickbeard.helpers import create_https_certificates, generateApiKey
from tornado.web import Application, StaticFileHandler, RedirectHandler
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.routes import route
class SRWebServer(threading.Thread):
def __init__(self, options={}, io_loop=None):
threading.Thread.__init__(self)
self.daemon = True
self.alive = True
self.name = "TORNADO"
self.io_loop = io_loop or IOLoop.current()
self.options = options
self.options.setdefault('port', 8081)
self.options.setdefault('host', '0.0.0.0')
self.options.setdefault('log_dir', None)
self.options.setdefault('username', '')
self.options.setdefault('password', '')
self.options.setdefault('web_root', '/')
assert isinstance(self.options['port'], int)
assert 'data_root' in self.options
# video root
if sickbeard.ROOT_DIRS:
root_dirs = sickbeard.ROOT_DIRS.split('|')
self.video_root = root_dirs[int(root_dirs[0]) + 1]
else:
self.video_root = None
# web root
if self.options['web_root']:
sickbeard.WEB_ROOT = self.options['web_root'] = ('/' + self.options['web_root'].lstrip('/').strip('/'))
# api root
if not sickbeard.API_KEY:
sickbeard.API_KEY = generateApiKey()
self.options['api_root'] = r'%s/api/%s' % (sickbeard.WEB_ROOT, sickbeard.API_KEY)
# tornado setup
self.enable_https = self.options['enable_https']
self.https_cert = self.options['https_cert']
self.https_key = self.options['https_key']
if self.enable_https:
# If either the HTTPS certificate or key do not exist, make some self-signed ones.
if not (self.https_cert and os.path.exists(self.https_cert)) or not (
self.https_key and os.path.exists(self.https_key)):
if not create_https_certificates(self.https_cert, self.https_key):
logger.log(u"Unable to create CERT/KEY files, disabling HTTPS")
sickbeard.ENABLE_HTTPS = False
self.enable_https = False
if not (os.path.exists(self.https_cert) and os.path.exists(self.https_key)):
logger.log(u"Disabled HTTPS because of missing CERT and KEY files", logger.WARNING)
sickbeard.ENABLE_HTTPS = False
self.enable_https = False
# Load the app
self.app = Application([],
debug=True,
autoreload=False,
gzip=True,
xheaders=sickbeard.HANDLE_REVERSE_PROXY,
cookie_secret=sickbeard.WEB_COOKIE_SECRET,
login_url='%s/login/' % self.options['web_root'],
)
# Main Handlers
self.app.add_handlers('.*$', [
# webapi handler
(r'%s(/?.*)' % self.options['api_root'], ApiHandler),
# webapi key retrieval
(r'%s/getkey(/?.*)' % self.options['web_root'], KeyHandler),
# webapi builder redirect
(r'%s/api/builder' % self.options['web_root'], RedirectHandler, {"url": self.options['web_root'] + '/apibuilder/'}),
# webui login/logout handlers
(r'%s/login(/?)' % self.options['web_root'], LoginHandler),
(r'%s/logout(/?)' % self.options['web_root'], LogoutHandler),
# webui handlers
] + route.get_routes(self.options['web_root']))
# Web calendar handler (Needed because option Unprotected calendar)
self.app.add_handlers('.*$', [
(r'%s/calendar' % self.options['web_root'], CalendarHandler),
])
# Static File Handlers
self.app.add_handlers(".*$", [
# favicon
(r'%s/(favicon\.ico)' % self.options['web_root'], StaticFileHandler,
{"path": os.path.join(self.options['data_root'], 'images/ico/favicon.ico')}),
# images
(r'%s/images/(.*)' % self.options['web_root'], StaticFileHandler,
{"path": os.path.join(self.options['data_root'], 'images')}),
# cached images
(r'%s/cache/images/(.*)' % self.options['web_root'], StaticFileHandler,
{"path": os.path.join(sickbeard.CACHE_DIR, 'images')}),
# css
(r'%s/css/(.*)' % self.options['web_root'], StaticFileHandler,
{"path": os.path.join(self.options['data_root'], 'css')}),
# javascript
(r'%s/js/(.*)' % self.options['web_root'], StaticFileHandler,
{"path": os.path.join(self.options['data_root'], 'js')}),
# videos
] + [(r'%s/videos/(.*)' % self.options['web_root'], StaticFileHandler,
{"path": self.video_root})])
def run(self):
if self.enable_https:
protocol = "https"
self.server = HTTPServer(self.app, ssl_options={"certfile": self.https_cert, "keyfile": self.https_key})
else:
protocol = "http"
self.server = HTTPServer(self.app)
logger.log(u"Starting SickRage on " + protocol + "://" + str(self.options['host']) + ":" + str(
self.options['port']) + "/")
try:
self.server.listen(self.options['port'], self.options['host'])
except:
etype, evalue, etb = sys.exc_info()
logger.log(
"Could not start webserver on %s. Excpeption: %s, Error: %s" % (self.options['port'], etype, evalue),
logger.ERROR)
return
try:
self.io_loop.start()
self.io_loop.close(True)
except (IOError, ValueError):
# Ignore errors like "ValueError: I/O operation on closed kqueue fd". These might be thrown during a reload.
pass
def shutDown(self):
self.alive = False
self.io_loop.stop()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.