gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#-*- coding=utf-8 -*-
#author: zhihua.ye@spreadtrum.com
"""
1. assemble sipp cmd
sipp -sf reg.xml -p 5060 -t u1 -m 1 -trace_err
2. assemble tmtc cmd
echo -n "c-reg" | busybox nc 127.0.0.1 21904
"""
import sys
import json
import os
from logConf import *
from utjsonparser import *
from time import gmtime, strftime
import re
class CmdException(Exception):
def __init__(self, message):
super(CmdException, self).__init__(message)
self.message = message
class cmdObj(dict):
def __init__(self, *arg, **kw):
self['cmd'] = ''
self['timeout'] = 1
super(cmdObj, self).__init__(*arg, **kw)
class cmdhelper:
def __init__(self, confdir=''):
self.confdir = confdir
self.config = dict()
self.logger = logConf()
conffile = os.path.realpath(confdir) + '/config.json'
self.timestamp = strftime("%Y_%m_%d_%H_%M_%S", gmtime())
try:
with open(conffile, 'r') as conf:
self.config = json.load(conf)
except:
etype = sys.exc_info()[0]
evalue = sys.exc_info()[1]
estr = str(etype) +' '+str(evalue)
self.logger.logger.error("Unexpected error:"+ estr)
raise(CmdException(estr))
self.execdir = ''
#extract info
self.xmls = list()
self.timeouts = list()
#real cmd list
self.sippcmds = list()
self.nccmds = list()
#ugly var
self.termcmd = None
def gettimestamp(self):
return self.timestamp
def getDesc(self):
desc = self.config['description']
self.logger.logger.info('scenario is ' + desc['scenario'])
self.logger.logger.info('bug id is ' + str(desc['bugid']) + ', commit id is '+ str(desc['commitid']))
def getCasename(self):
return self.config['description']['casename']
def getCategory(self):
return self.config['description']['category']
def getConfDelta(self):
"""
provision.ini delta value
:return:
"""
pass
def getUeConfig(self):
ueconfig = dict()
ueconfig['tmtcport'] = 21904
#if there is space in casename, replace it with _
newcasename = re.sub(r'[\s+]', '_', self.config['description']['casename'])
postfix = newcasename + '_' + self.timestamp
ueconfig['execdir'] = "/data/data/ut/" + postfix
ueconfig['config'] = "provision.ini"
ueconfig['binary'] = 'tmtclient'
ueconfig['startuptime'] = 3
ueconfig['lib'] = [
"libavatar_ut.so",
"liblemon_ut.so"
]
ueconfig['preference'] = dict()
if 'preference' in self.config['ue']:
ueconfig['preference'] = self.config['ue']['preference']
if 'tmtcport' in self.config['ue']:
ueconfig['tmtcport'] = self.config['ue']['tmtcport']
if 'execdir' in self.config['ue']:
ueconfig['execdir'] = self.config['ue']['execdir'] + postfix
if 'config' in self.config['ue']:
ueconfig['config'] = self.config['ue']['config']
if 'binary' in self.config['ue']:
ueconfig['binary'] = self.config['ue']['binary']
if 'lib' in self.config['ue']:
ueconfig['lib'] = self.config['ue']['lib']
if 'startuptime' in self.config['ue']:
ueconfig['startuptime'] = self.config['ue']['startuptime']
self.execdir = ueconfig['execdir']
return ueconfig
def buildCmd(self):
cases = self.config['cases']
for index,case in enumerate(cases):
try:
#init
sippcmd = cmdObj()
nccmd = cmdObj()
xml = case['xml']
timeout = case['timeout']
tmtccmd = case['tmtccmd']
desc = case['desc']
opts = ''
if 'opts' in case:
opts = case['opts']
self.xmls.append(xml)
self.timeouts.append(timeout)
if validCmd(tmtccmd) is False:
tmtccmd = None
if xml:
sippcmd = self.buildsipp(xml, timeout,desc, opts=opts)
self.sippcmds.append(sippcmd)
if tmtccmd:
nccmd = self.buildnc(tmtccmd)
self.nccmds.append(nccmd)
else:
#just keep nccmd the same number as sipcmd
dummynccmd = cmdObj()
dummynccmd['cmd'] = DUMMY_CMD
self.nccmds.append(dummynccmd)
except:
#most likely KeyError
etype = sys.exc_info()[0]
evalue = sys.exc_info()[1]
estr = str(etype) +' '+str(evalue)
self.logger.logger.error("Unexpected error:" + estr)
raise(CmdException(estr))
def buildsipp(self, xml='', timeout=None, desc=None, opts=''):
"""
sipp -sf reg.xml -p 5060 -t u1 -m 1 -trace_err -trace_msg -message_file reg.msg -trace_shortmsg -shortmessage_file regshort.msg
sipp -sf mt_815908.xml 127.0.0.1:5065 -p 5060 -t u1 -m 1 -trace_err -trace_msg -message_file mt.msg -trace_shortmsg -shortmessage_file mtshort.msg
:return:
"""
#
sippcmd = cmdObj()
prefix = xml.split('.')[0]
msgopt = " -trace_msg -message_file " + str(prefix) + ".msg "
shortmsgopt = " -trace_shortmsg -shortmessage_file " + str(prefix) + "short.msg "
cdcmd = "cd " + self.execdir
sippcmd['cmd'] = cdcmd + "&& sipp -sf " + xml + ' ' + opts + ' -p 5060 -t u1 -m 1 -trace_err ' + msgopt + shortmsgopt
sippcmd['timeout'] = timeout
sippcmd['desc'] = desc
return sippcmd
def buildnc(self, cmd=''):
"""
adb shell echo -n "c-reg" | busybox nc 127.0.0.1 21904
use loopback device and tmtc listening on 21904
:return:
"""
tmtcport = 21904
if 'tmtcport' in self.config['ue']:
tmtcport = self.config['ue']['tmtcport']
nccmd = cmdObj()
nccmd['cmd'] = "echo -n " + cmd + ' | busybox nc 127.0.0.1 ' + str(tmtcport)
#FIXME: nc should be responsed quickly, hardcoded here.
nccmd['timeout'] = 1
return nccmd
def getxmls(self):
return self.xmls
def gettimeouts(self):
return self.timeouts
def getsippcmds(self):
return self.sippcmds
def getnccmds(self):
return self.nccmds
def gettermcmd(self):
self.termcmd = cmdObj()
tmtcport = 21904
if 'tmtcport' in self.config['ue']:
tmtcport = self.config['ue']['tmtcport']
self.termcmd['cmd'] = "echo -n exit" + ' | busybox nc 127.0.0.1 ' + str(tmtcport)
self.termcmd['timeout'] = 1
return self.termcmd
def printCmds(self):
for index, sipp in enumerate(self.sippcmds):
self.logger.logger.info("< Flow No." + str(index+1) + ' >')
self.logger.logger.info('sippcmd is ' + sipp['cmd'] + ', timeout is ' + str(sipp['timeout']))
self.logger.logger.info('netcat cmd is ' + self.nccmds[index]['cmd'] +
', timeout is ' + str(self.nccmds[index]['timeout']))
if __name__ == '__main__':
cmd = cmdhelper(confdir="../cases/mt/")
cmd.getDesc()
cmd.buildCmd()
ueconfig = cmd.getUeConfig()
cmd.printCmds()
|
|
"""
Paramiko run support
"""
from cStringIO import StringIO
from paramiko import ChannelFile
import gevent
import gevent.event
import socket
import pipes
import logging
import shutil
from ..contextutil import safe_while
from ..exceptions import (CommandCrashedError, CommandFailedError,
ConnectionLostError)
log = logging.getLogger(__name__)
class RemoteProcess(object):
"""
An object to begin and monitor execution of a process on a remote host
"""
__slots__ = [
'client', 'args', 'check_status', 'command', 'hostname',
'stdin', 'stdout', 'stderr',
'_stdin_buf', '_stdout_buf', '_stderr_buf',
'returncode', 'exitstatus',
'greenlets',
# for orchestra.remote.Remote to place a backreference
'remote',
'label',
]
def __init__(self, client, args, check_status=True, hostname=None, label=None):
"""
Create the object. Does not initiate command execution.
:param client: paramiko.SSHConnection to run the command with
:param args: Command to run.
:type args: String or list of strings
:param check_status: Whether to raise CommandFailedError on non-zero
exit status, and . Defaults to True. All signals
and connection loss are made to look like SIGHUP.
:param hostname: Name of remote host (optional)
:param label: Can be used to label or describe what the
command is doing.
"""
self.client = client
self.args = args
if isinstance(args, basestring):
self.command = args
else:
self.command = quote(args)
self.check_status = check_status
self.label = label
if hostname:
self.hostname = hostname
else:
(self.hostname, port) = client.get_transport().getpeername()
self.greenlets = []
self.stdin, self.stdout, self.stderr = (None, None, None)
self.returncode = self.exitstatus = None
def execute(self):
"""
Execute remote command
"""
prefix = "Running:"
if self.label:
prefix = "Running ({label}):".format(label=self.label)
log.getChild(self.hostname).info(u"{prefix} {cmd!r}".format(
cmd=self.command, prefix=prefix))
(self._stdin_buf, self._stdout_buf, self._stderr_buf) = \
self.client.exec_command(self.command)
(self.stdin, self.stdout, self.stderr) = \
(self._stdin_buf, self._stdout_buf, self._stderr_buf)
def add_greenlet(self, greenlet):
self.greenlets.append(greenlet)
def wait(self):
"""
Block until remote process finishes.
:returns: self.returncode
"""
for greenlet in self.greenlets:
greenlet.get()
status = self._get_exitstatus()
self.exitstatus = self.returncode = status
if self.check_status:
if status is None:
# command either died due to a signal, or the connection
# was lost
transport = self.client.get_transport()
if transport is None or not transport.is_active():
# look like we lost the connection
raise ConnectionLostError(command=self.command,
node=self.hostname)
# connection seems healthy still, assuming it was a
# signal; sadly SSH does not tell us which signal
raise CommandCrashedError(command=self.command)
if status != 0:
raise CommandFailedError(command=self.command,
exitstatus=status, node=self.hostname,
label=self.label)
return status
def _get_exitstatus(self):
"""
:returns: the remote command's exit status (return code). Note that
if the connection is lost, or if the process was killed by a
signal, this returns None instead of paramiko's -1.
"""
status = self._stdout_buf.channel.recv_exit_status()
if status == -1:
status = None
return status
@property
def finished(self):
return self._stdout_buf.channel.exit_status_ready()
def poll(self):
"""
:returns: self.returncode if the process is finished; else None
"""
if self.finished:
return self.returncode
return None
def __repr__(self):
return '{classname}(client={client!r}, args={args!r}, check_status={check}, hostname={name!r})'.format( # noqa
classname=self.__class__.__name__,
client=self.client,
args=self.args,
check=self.check_status,
name=self.hostname,
)
class Raw(object):
"""
Raw objects are passed to remote objects and are not processed locally.
"""
def __init__(self, value):
self.value = value
def __repr__(self):
return '{cls}({value!r})'.format(
cls=self.__class__.__name__,
value=self.value,
)
def quote(args):
"""
Internal quote wrapper.
"""
if isinstance(args, basestring):
return args
def _quote(args):
"""
Handle quoted string, testing for raw charaters.
"""
for a in args:
if isinstance(a, Raw):
yield a.value
else:
yield pipes.quote(a)
return ' '.join(_quote(args))
def copy_to_log(f, logger, loglevel=logging.INFO):
"""
Interface to older xreadlines api.
"""
# Work-around for http://tracker.ceph.com/issues/8313
if isinstance(f, ChannelFile):
f._flags += ChannelFile.FLAG_BINARY
# i can't seem to get fudge to fake an iterable, so using this old
# api for now
for line in f.xreadlines():
line = line.rstrip()
# Second part of work-around for http://tracker.ceph.com/issues/8313
try:
line = unicode(line, 'utf-8', 'replace').encode('utf-8')
logger.log(loglevel, line)
except (UnicodeDecodeError, UnicodeEncodeError):
logger.exception("Encountered unprintable line in command output")
def copy_and_close(src, fdst):
"""
copyfileobj call wrapper.
"""
if src is not None:
if isinstance(src, basestring):
src = StringIO(src)
shutil.copyfileobj(src, fdst)
fdst.close()
def copy_file_to(f, dst):
"""
Copy file
:param f: file to be copied.
:param dst: destination
:param host: original host location
"""
if hasattr(dst, 'log'):
# looks like a Logger to me; not using isinstance to make life
# easier for unit tests
handler = copy_to_log
else:
handler = shutil.copyfileobj
return handler(f, dst)
def spawn_asyncresult(fn, *args, **kwargs):
"""
Spawn a Greenlet and pass it's results to an AsyncResult.
This function is useful to shuffle data from a Greenlet to
AsyncResult, which then again is useful because any Greenlets that
raise exceptions will cause tracebacks to be shown on stderr by
gevent, even when ``.link_exception`` has been called. Using an
AsyncResult avoids this.
"""
r = gevent.event.AsyncResult()
def wrapper():
"""
Internal wrapper.
"""
try:
value = fn(*args, **kwargs)
except Exception as e:
r.set_exception(e)
else:
r.set(value)
gevent.spawn(wrapper)
return r
class Sentinel(object):
"""
Sentinel -- used to define PIPE file-like object.
"""
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
PIPE = Sentinel('PIPE')
class KludgeFile(object):
"""
Wrap Paramiko's ChannelFile in a way that lets ``f.close()``
actually cause an EOF for the remote command.
"""
def __init__(self, wrapped):
self._wrapped = wrapped
def __getattr__(self, name):
return getattr(self._wrapped, name)
def close(self):
"""
Close and shutdown.
"""
self._wrapped.close()
self._wrapped.channel.shutdown_write()
def run(
client, args,
stdin=None, stdout=None, stderr=None,
logger=None,
check_status=True,
wait=True,
name=None,
label=None
):
"""
Run a command remotely. If any of 'args' contains shell metacharacters
that you want to pass unquoted, pass it as an instance of Raw(); otherwise
it will be quoted with pipes.quote() (single quote, and single quotes
enclosed in double quotes).
:param client: SSHConnection to run the command with
:param args: command to run
:type args: list of string
:param stdin: Standard input to send; either a string, a file-like object,
None, or `PIPE`. `PIPE` means caller is responsible for
closing stdin, or command may never exit.
:param stdout: What to do with standard output. Either a file-like object,
a `logging.Logger`, `PIPE`, or `None` for copying to default
log. `PIPE` means caller is responsible for reading, or
command may never exit.
:param stderr: What to do with standard error. See `stdout`.
:param logger: If logging, write stdout/stderr to "out" and "err" children
of this logger. Defaults to logger named after this module.
:param check_status: Whether to raise CommandFailedError on non-zero exit
status, and . Defaults to True. All signals and
connection loss are made to look like SIGHUP.
:param wait: Whether to wait for process to exit. If False, returned
``r.exitstatus`` s a `gevent.event.AsyncResult`, and the
actual status is available via ``.get()``.
:param name: Human readable name (probably hostname) of the destination
host
:param label: Can be used to label or describe what the command is doing.
"""
try:
(host, port) = client.get_transport().getpeername()
except socket.error:
raise ConnectionLostError(command=quote(args), node=name)
if name is None:
name = host
r = RemoteProcess(client, args, check_status=check_status, hostname=name, label=label)
r.execute()
r.stdin = KludgeFile(wrapped=r.stdin)
g_in = None
if stdin is not PIPE:
g_in = gevent.spawn(copy_and_close, stdin, r.stdin)
r.add_greenlet(g_in)
r.stdin = None
else:
assert not wait, \
"Using PIPE for stdin without wait=False would deadlock."
if logger is None:
logger = log
g_err = None
if stderr is not PIPE:
if stderr is None:
stderr = logger.getChild(name).getChild('stderr')
g_err = gevent.spawn(copy_file_to, r.stderr, stderr)
r.add_greenlet(g_err)
r.stderr = stderr
else:
assert not wait, \
"Using PIPE for stderr without wait=False would deadlock."
g_out = None
if stdout is not PIPE:
if stdout is None:
stdout = logger.getChild(name).getChild('stdout')
g_out = gevent.spawn(copy_file_to, r.stdout, stdout)
r.add_greenlet(g_out)
r.stdout = stdout
else:
assert not wait, \
"Using PIPE for stdout without wait=False would deadlock."
if wait:
r.wait()
return r
def wait(processes, timeout=None):
"""
Wait for all given processes to exit.
Raise if any one of them fails.
Optionally, timeout after 'timeout' seconds.
"""
if timeout and timeout > 0:
with safe_while(tries=(timeout / 6)) as check_time:
not_ready = list(processes)
while len(not_ready) > 0:
check_time()
for proc in list(not_ready):
if proc.finished:
not_ready.remove(proc)
for proc in processes:
proc.wait()
|
|
# -*- coding: utf-8 -
import operator
from .local_time import get_now, TZ
from copy import deepcopy
from datetime import timedelta
from dateutil.parser import parse
from dpath.util import delete as xpathdelete, get as xpathget, new as xpathnew
from haversine import haversine
from json import load, loads
from jsonpath_rw import parse as parse_path
from munch import Munch, munchify
from robot.errors import ExecutionFailed
from robot.libraries.BuiltIn import BuiltIn
from robot.output import LOGGER
from robot.output.loggerhelper import Message
# These imports are not pointless. Robot's resource and testsuite files
# can access them by simply importing library "service_keywords".
# Please ignore the warning given by Flake8 or other linter.
from .initial_data import (
create_fake_doc,
create_fake_sentence,
create_fake_amount,
create_fake_number,
create_fake_date,
create_fake_funder,
create_fake_period,
get_fake_funder_scheme,
fake,
subtraction,
field_with_id,
test_bid_data,
test_bid_value,
test_bid_value_esco,
test_bid_data_selection,
test_change_data,
test_claim_answer_data,
test_claim_data,
test_complaint_data,
test_complaint_reply_data,
test_confirm_data,
test_feature_data,
test_invalid_features_data,
test_item_data,
test_lot_data,
test_lot_document_data,
test_related_question,
test_question_answer_data,
test_question_data,
test_supplier_data,
test_tender_data,
test_tender_data_competitive_dialogue,
test_tender_data_limited,
test_tender_data_openeu,
test_tender_data_openua,
test_tender_data_planning,
test_tender_data_openua_defense,
test_tender_data_framework_agreement,
test_tender_data_selection,
test_bid_competitive_data,
tets_monitoring_data,
test_party,
test_dialogue,
test_conclusion,
test_status_data,
test_elimination_report,
test_tender_data_esco,
test_modification_data,
test_agreement_change_data,
create_fake_title,
create_fake_value_amount,
test_change_document_data,
convert_amount,
get_number_of_minutes,
get_hash,
test_criteria_data,
create_fake_eng_sentence,
create_unit_en
)
from barbecue import chef
from restkit import request
# End of non-pointless import
import os
import re
NUM_TYPES = (int, long, float)
STR_TYPES = (str, unicode)
def get_current_tzdate():
return get_now().strftime('%Y-%m-%d %H:%M:%S.%f')
def add_minutes_to_date(date, minutes):
return (parse(date) + timedelta(minutes=float(minutes))).isoformat()
def compare_date(left, right, accuracy="minute", absolute_delta=True):
'''Compares dates with specified accuracy
Before comparison dates are parsed into datetime.datetime format
and localized.
:param left: First date
:param right: Second date
:param accuracy: Max difference between dates to consider them equal
Default value - "minute"
Possible values - "day", "hour", "minute" or float value
of seconds
:param absolute_delta: Type of comparison. If set to True, then no matter which date order. If set to
False then right must be lower then left for accuracy value.
Default value - True
Possible values - True and False or something what can be casted into them
:returns: Boolean value
:error: ValueError when there is problem with converting accuracy
into float value. When it will be catched warning will be
given and accuracy will be set to 60.
'''
left = parse(left)
right = parse(right)
if left.tzinfo is None:
left = TZ.localize(left)
if right.tzinfo is None:
right = TZ.localize(right)
delta = (left - right).total_seconds()
if accuracy == "day":
accuracy = 24 * 60 * 60 - 1
elif accuracy == "hour":
accuracy = 60 * 60 - 1
elif accuracy == "minute":
accuracy = 60 - 1
else:
try:
accuracy = float(accuracy)
except ValueError:
LOGGER.log_message(
Message("Could not convert from {} to float. Accuracy is set to 60 seconds.".format(accuracy), "WARN"))
accuracy = 60
if absolute_delta:
delta = abs(delta)
if delta > accuracy:
return False
return True
def compare_coordinates(left_lat, left_lon, right_lat, right_lon, accuracy=0.1):
'''Compares coordinates with specified accuracy
:param left_lat: First coordinate latitude
:param left_lon: First coordinate longitude
:param right_lat: Second coordinate latitude
:param right_lon: Second coordinate longitude
:param accuracy: Max difference between coordinates to consider them equal
Default value - 0.1
Possible values - float or integer value of kilometers
:returns: Boolean value
:error: ValueError when there is problem with converting accuracy
into float value. When it will be catched warning will be
given and accuracy will be set to 0.1.
'''
for key, value in {'left_lat': left_lat, 'left_lon': left_lon, 'right_lat': right_lat,
'right_lon': right_lon}.iteritems():
if not isinstance(value, NUM_TYPES):
raise TypeError("Invalid type for coordinate '{0}'. "
"Expected one of {1}, got {2}".format(
key, str(NUM_TYPES), str(type(value))))
distance = haversine((left_lat, left_lon), (right_lat, right_lon))
if distance > accuracy:
return False
return True
def log_object_data(data, file_name=None, format="yaml", update=False, artifact=False):
"""Log object data in pretty format (JSON or YAML)
Two output formats are supported: "yaml" and "json".
If a file name is specified, the output is written into that file.
If you would like to get similar output everywhere,
use the following snippet somewhere in your code
before actually using Munch. For instance,
put it into your __init__.py, or, if you use zc.buildout,
specify it in "initialization" setting of zc.recipe.egg.
from munch import Munch
Munch.__str__ = lambda self: Munch.toYAML(self, allow_unicode=True,
default_flow_style=False)
Munch.__repr__ = Munch.__str__
"""
if not isinstance(data, Munch):
data = munchify(data)
if file_name:
if artifact:
file_path = os.path.join(os.path.dirname(__file__), 'data', file_name + '.' + format)
else:
output_dir = BuiltIn().get_variable_value("${OUTPUT_DIR}")
file_path = os.path.join(output_dir, file_name + '.' + format)
if update:
try:
with open(file_path, "r+") as file_obj:
new_data = data.copy()
data = munch_from_object(file_obj.read(), format)
data.update(new_data)
file_obj.seek(0)
file_obj.truncate()
except IOError as e:
LOGGER.log_message(Message(e, "INFO"))
LOGGER.log_message(Message("Nothing to update, "
"creating new file.", "INFO"))
data_obj = munch_to_object(data, format)
with open(file_path, "w") as file_obj:
file_obj.write(data_obj)
data_obj = munch_to_object(data, format)
LOGGER.log_message(Message(data_obj.decode('utf-8'), "INFO"))
def munch_from_object(data, format="yaml"):
if format.lower() == 'json':
return Munch.fromJSON(data)
else:
return Munch.fromYAML(data)
def munch_to_object(data, format="yaml"):
if format.lower() == 'json':
return data.toJSON(indent=2)
else:
return data.toYAML(allow_unicode=True, default_flow_style=False)
def load_data_from(file_name, mode=None, external_params_name=None):
"""We assume that 'external_params' is a a valid json if passed
"""
external_params = BuiltIn(). \
get_variable_value('${{{name}}}'.format(name=external_params_name))
if not os.path.exists(file_name):
file_name = os.path.join(os.path.dirname(__file__), 'data', file_name)
with open(file_name) as file_obj:
if file_name.endswith('.json'):
file_data = Munch.fromDict(load(file_obj))
elif file_name.endswith('.yaml'):
file_data = Munch.fromYAML(file_obj)
if mode == 'brokers':
default = file_data.pop('Default')
brokers = {}
for k, v in file_data.iteritems():
brokers[k] = merge_dicts(default, v)
file_data = brokers
try:
ext_params_munch \
= Munch.fromDict(loads(external_params)) \
if external_params else Munch()
except ValueError:
raise ValueError(
'Value {param} of command line parameter {name} is invalid'.
format(name=external_params_name, param=str(external_params))
)
return merge_dicts(file_data, ext_params_munch)
def compute_intrs(brokers_data, used_brokers):
"""Compute optimal values for period intervals.
Notice: This function is maximally effective if ``brokers_data``
does not contain ``Default`` entry.
Using `load_data_from` with ``mode='brokers'`` is recommended.
"""
keys_to_prefer_lesser = ('accelerator',)
def recur(l, r, prefer_greater_numbers=True):
l, r = deepcopy(l), deepcopy(r)
if isinstance(l, list) and isinstance(r, list) and len(l) == len(r):
lst = []
for ll, rr in zip(l, r):
lst.append(recur(ll, rr))
return lst
elif isinstance(l, NUM_TYPES) and isinstance(r, NUM_TYPES):
if l == r:
return l
if l > r:
return l if prefer_greater_numbers else r
if l < r:
return r if prefer_greater_numbers else l
elif isinstance(l, dict) and isinstance(r, dict):
for k, v in r.iteritems():
if k not in l.keys():
l[k] = v
elif k in keys_to_prefer_lesser:
l[k] = recur(l[k], v, prefer_greater_numbers=False)
else:
l[k] = recur(l[k], v)
return l
else:
raise TypeError("Couldn't recur({0}, {1})".format(
str(type(l)), str(type(r))))
intrs = []
for i in used_brokers:
intrs.append(brokers_data[i]['intervals'])
result = intrs.pop(0)
for i in intrs:
result = recur(result, i)
return result
def prepare_test_tender_data(procedure_intervals,
tender_parameters,
submissionMethodDetails,
accelerator,
funders):
# Get actual intervals by mode name
mode = tender_parameters['mode']
if mode in procedure_intervals:
intervals = procedure_intervals[mode]
else:
intervals = procedure_intervals['default']
LOGGER.log_message(Message(intervals))
tender_parameters['intervals'] = intervals
# Set acceleration value for certain modes
assert isinstance(intervals['accelerator'], int), \
"Accelerator should be an 'int', " \
"not '{}'".format(type(intervals['accelerator']).__name__)
assert intervals['accelerator'] >= 0, \
"Accelerator should not be less than 0"
if mode == 'negotiation':
return munchify({'data': test_tender_data_limited(tender_parameters)})
elif mode == 'negotiation.quick':
return munchify({'data': test_tender_data_limited(tender_parameters)})
elif mode == 'openeu':
return munchify({'data': test_tender_data_openeu(
tender_parameters, submissionMethodDetails)})
elif mode == 'openua':
return munchify({'data': test_tender_data_openua(
tender_parameters, submissionMethodDetails)})
elif mode == 'openua_defense':
return munchify({'data': test_tender_data_openua_defense(
tender_parameters, submissionMethodDetails)})
elif mode == 'open_competitive_dialogue':
return munchify({'data': test_tender_data_competitive_dialogue(
tender_parameters, submissionMethodDetails)})
elif mode == 'reporting':
return munchify({'data': test_tender_data_limited(tender_parameters)})
elif mode == 'open_framework':
return munchify({'data': test_tender_data_framework_agreement(
tender_parameters, submissionMethodDetails)})
elif mode == 'belowThreshold':
return munchify({'data': test_tender_data(
tender_parameters,
submissionMethodDetails=submissionMethodDetails,
funders=funders,
accelerator=accelerator)})
elif mode == 'open_esco':
return munchify({'data': test_tender_data_esco(
tender_parameters, submissionMethodDetails)})
# The previous line needs an explicit keyword argument because,
# unlike previous functions, this one has three arguments.
raise ValueError("Invalid mode for prepare_test_tender_data")
def run_keyword_and_ignore_keyword_definitions(name, *args, **kwargs):
"""This keyword is pretty similar to `Run Keyword And Ignore Error`,
which, unfortunately, does not suppress the error when you try
to use it to run a keyword which is not defined.
As a result, the execution of its parent keyword / test case is aborted.
How this works:
This is a simple wrapper for `Run Keyword And Ignore Error`.
It handles the error mentioned above and additionally provides
a meaningful error message.
"""
try:
status, _ = BuiltIn().run_keyword_and_ignore_error(name, *args, **kwargs)
except ExecutionFailed as e:
status, _ = "FAIL", e.message
return status, _
def set_access_key(tender, access_token):
tender.access = munchify({"token": access_token})
return tender
def get_from_object(obj, path):
"""Gets data from a dictionary using a dotted accessor-string"""
jsonpath_expr = parse_path(path)
return_list = [i.value for i in jsonpath_expr.find(obj)]
if return_list:
return return_list[0]
else:
raise AttributeError('Attribute not found: {0}'.format(path))
def set_to_object(obj, path, value):
def recur(obj, path, value):
if not isinstance(obj, dict):
raise TypeError('expected %s, got %s' %
(dict.__name__, type(obj)))
# Search the list index in path to value
groups = re.search(r'^(?P<key>[0-9a-zA-Z_]+)(?:\[(?P<index>-?\d+)\])?'
'(?:\.(?P<suffix>.+))?$', path)
err = RuntimeError('could not parse the path: ' + path)
if not groups:
raise err
gd = {k: v for k, v in groups.groupdict().items() if v is not None}
is_list = False
suffix = None
if 'key' not in gd:
raise err
key = gd['key']
if 'index' in gd:
is_list = True
index = int(gd['index'])
if 'suffix' in gd:
suffix = gd['suffix']
if is_list:
if key not in obj:
obj[key] = []
elif not isinstance(obj[key], list):
raise TypeError('expected %s, got %s' %
(list.__name__, type(obj[key])))
plusone = 1 if index >= 0 else 0
if len(obj[key]) < abs(index) + plusone:
while not len(obj[key]) == abs(index) + plusone:
extension = [None] * (abs(index) + plusone - len(obj[key]))
if index < 0:
obj[key] = extension + obj[key]
else:
obj[key].extend(extension)
if suffix:
obj[key][index] = {}
if suffix:
obj[key][index] = recur(obj[key][index], suffix, value)
else:
obj[key][index] = value
else:
if key not in obj:
obj[key] = {}
if suffix:
obj[key] = recur(obj[key], suffix, value)
else:
obj[key] = value
return obj
if not isinstance(path, STR_TYPES):
raise TypeError('Path must be one of ' + str(STR_TYPES))
return munchify(recur(obj, path, value))
def wait_to_date(date_stamp):
date = parse(date_stamp)
LOGGER.log_message(Message("date: {}".format(date.isoformat()), "INFO"))
now = get_now()
LOGGER.log_message(Message("now: {}".format(now.isoformat()), "INFO"))
wait_seconds = (date - now).total_seconds()
wait_seconds += 2
if wait_seconds < 0:
return 0
return wait_seconds
def merge_dicts(a, b):
"""Merge dicts recursively.
Origin: https://www.xormedia.com/recursively-merge-dictionaries-in-python/
"""
if not isinstance(b, dict):
return b
result = deepcopy(a)
for k, v in b.iteritems():
if k in result and isinstance(result[k], dict):
result[k] = merge_dicts(result[k], v)
else:
result[k] = deepcopy(v)
return munchify(result)
def create_data_dict(path_to_value=None, value=None):
"""Create a dictionary with one key, 'data'.
If `path_to_value` is not given, set the key's value
to an empty dictionary.
If `path_to_value` is given, set the key's value to `value`.
In case it's the latter and if `value` is not set,
the key's value is set to `None`.
Please note that `path_to_value` is relative to the parent dictionary,
thus, you may need to prepend `data.` to your path string.
To better understand how `path_to_value` is handled,
please refer to the `set_to_object()` function.
"""
data_dict = {'data': {}}
if path_to_value:
data_dict = set_to_object(data_dict, path_to_value, value)
return data_dict
def munch_dict(arg=None, data=False):
if arg is None:
arg = {}
if data:
arg['data'] = {}
return munchify(arg)
def get_id_from_object(obj):
regex = r'(^[filq]-[0-9a-fA-F]{8}): '
title = obj.get('title', '')
if title:
if not isinstance(title, STR_TYPES):
raise TypeError('title must be one of %s' % str(STR_TYPES))
obj_id = re.match(regex, title)
if obj_id and len(obj_id.groups()) >= 1:
return obj_id.group(1)
description = obj.get('description', '')
if description:
if not isinstance(description, STR_TYPES):
raise TypeError('description must be one of %s' % str(STR_TYPES))
obj_id = re.match(regex, description)
if obj_id and len(obj_id.groups()) >= 1:
return obj_id.group(1)
raise ValueError('could not find object ID in "title": "%s", '
'"description": "%s"' % (title, description))
def get_id_from_string(string):
return re.match(r'[dc]\-[0-9a-fA-F]{8}', string).group(0)
def get_object_type_by_id(object_id):
prefixes = {'q': 'questions', 'f': 'features', 'i': 'items', 'l': 'lots'}
return prefixes.get(object_id[0])
def get_complaint_index_by_complaintID(data, complaintID):
if not data:
return 0
for index, element in enumerate(data):
if element['complaintID'] == complaintID:
break
else:
index += 1
return index
def get_object_index_by_id(data, object_id):
if not data:
return 0
for index, element in enumerate(data):
element_id = get_id_from_object(element)
if element_id == object_id:
break
else:
index += 1
return index
def get_object_by_id(data, given_object_id, slice_element, object_id):
"""
data: object to slice
given_object_id: with what id we should compare
slice_element: what path should be extracted (e.g. from { key: val } extract key )
object_id: what property is id (e.g. from { id: 1, name: 2 } extract id)
"""
# Slice the given object, e.g. slice bid object to lotValues object
try:
sliced_object = data[slice_element]
except KeyError:
return data
# If there is one sliced object, get the 1st element
if len(sliced_object) == 1:
return sliced_object[0]
# Compare given object id and id from sliced object
for index, element in enumerate(sliced_object):
element_id = element[object_id]
if element_id == given_object_id:
return element
return sliced_object[0]
def generate_test_bid_data(tender_data):
if tender_data.get('procurementMethodType', '') in (
'aboveThresholdUA',
'aboveThresholdUA.defense',
'aboveThresholdEU',
'competitiveDialogueUA',
'competitiveDialogueEU',
'closeFrameworkAgreementUA',
'esco'
):
bid = test_bid_competitive_data()
bid.data.selfEligible = True
bid.data.selfQualified = True
else:
bid = test_bid_data()
if 'lots' in tender_data:
bid.data.lotValues = []
for lot in tender_data['lots']:
if tender_data.get('procurementMethodType', '') == 'esco':
value = test_bid_value_esco(tender_data)
else:
value = test_bid_value(lot['value']['amount'])
value['relatedLot'] = lot.get('id', '')
bid.data.lotValues.append(value)
else:
if tender_data.get('procurementMethodType', '') == 'esco':
value = test_bid_value(tender_data)
bid.data.update(value)
else:
bid.data.update(test_bid_value(tender_data['value']['amount']))
if 'features' in tender_data:
bid.data.parameters = []
for feature in tender_data['features']:
parameter = {"value": fake.random_element(elements=(0.05, 0.01, 0)), "code": feature.get('code', '')}
bid.data.parameters.append(parameter)
return bid
def mult_and_round(*args, **kwargs):
return round(reduce(operator.mul, args), kwargs.get('precision', 2))
def convert_amount_string_to_float(amount_string):
return float(amount_string.replace(' ', '').replace(',', '.'))
def compare_rationale_types(type1, type2):
return set(type1) == set(type2)
def delete_from_dictionary(variable, path):
if not type(path) in STR_TYPES:
raise TypeError('path must be one of: ' +
str(STR_TYPES))
return xpathdelete(variable, path, separator='.')
def dictionary_should_not_contain_path(dictionary, path):
try:
xpathget(dictionary, path, separator='.')
except KeyError:
return
raise RuntimeError("Dictionary contains path '%s'." % path)
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import io
import os
import re
from distutils.version import LooseVersion
import subprocess
import pdfkit
import six
from bs4 import BeautifulSoup
from PyPDF2 import PdfFileReader, PdfFileWriter
import frappe
from frappe import _
from frappe.utils import scrub_urls
from frappe.utils.jinja_globals import bundled_asset
PDF_CONTENT_ERRORS = ["ContentNotFoundError", "ContentOperationNotPermittedError",
"UnknownContentError", "RemoteHostClosedError"]
def get_pdf(html, options=None, output=None):
html = scrub_urls(html)
html, options = prepare_options(html, options)
options.update({
"disable-javascript": "",
"disable-local-file-access": ""
})
filedata = ''
if LooseVersion(get_wkhtmltopdf_version()) > LooseVersion('0.12.3'):
options.update({"disable-smart-shrinking": ""})
try:
# Set filename property to false, so no file is actually created
filedata = pdfkit.from_string(html, False, options=options or {})
# https://pythonhosted.org/PyPDF2/PdfFileReader.html
# create in-memory binary streams from filedata and create a PdfFileReader object
reader = PdfFileReader(io.BytesIO(filedata))
except OSError as e:
if any([error in str(e) for error in PDF_CONTENT_ERRORS]):
if not filedata:
frappe.throw(_("PDF generation failed because of broken image links"))
# allow pdfs with missing images if file got created
if output: # output is a PdfFileWriter object
output.appendPagesFromReader(reader)
else:
raise
finally:
cleanup(options)
if "password" in options:
password = options["password"]
if six.PY2:
password = frappe.safe_encode(password)
if output:
output.appendPagesFromReader(reader)
return output
writer = PdfFileWriter()
writer.appendPagesFromReader(reader)
if "password" in options:
writer.encrypt(password)
filedata = get_file_data_from_writer(writer)
return filedata
def get_file_data_from_writer(writer_obj):
# https://docs.python.org/3/library/io.html
stream = io.BytesIO()
writer_obj.write(stream)
# Change the stream position to start of the stream
stream.seek(0)
# Read up to size bytes from the object and return them
return stream.read()
def prepare_options(html, options):
if not options:
options = {}
options.update({
'print-media-type': None,
'background': None,
'images': None,
'quiet': None,
# 'no-outline': None,
'encoding': "UTF-8",
#'load-error-handling': 'ignore'
})
if not options.get("margin-right"):
options['margin-right'] = '15mm'
if not options.get("margin-left"):
options['margin-left'] = '15mm'
html, html_options = read_options_from_html(html)
options.update(html_options or {})
# cookies
options.update(get_cookie_options())
# page size
if not options.get("page-size"):
options['page-size'] = frappe.db.get_single_value("Print Settings", "pdf_page_size") or "A4"
return html, options
def get_cookie_options():
options = {}
if frappe.session and frappe.session.sid and hasattr(frappe.local, "request"):
# Use wkhtmltopdf's cookie-jar feature to set cookies and restrict them to host domain
cookiejar = "/tmp/{}.jar".format(frappe.generate_hash())
# Remove port from request.host
# https://werkzeug.palletsprojects.com/en/0.16.x/wrappers/#werkzeug.wrappers.BaseRequest.host
domain = frappe.utils.get_host_name().split(":", 1)[0]
with open(cookiejar, "w") as f:
f.write("sid={}; Domain={};\n".format(frappe.session.sid, domain))
options['cookie-jar'] = cookiejar
return options
def read_options_from_html(html):
options = {}
soup = BeautifulSoup(html, "html5lib")
options.update(prepare_header_footer(soup))
toggle_visible_pdf(soup)
# use regex instead of soup-parser
for attr in ("margin-top", "margin-bottom", "margin-left", "margin-right", "page-size", "header-spacing", "orientation"):
try:
pattern = re.compile(r"(\.print-format)([\S|\s][^}]*?)(" + str(attr) + r":)(.+)(mm;)")
match = pattern.findall(html)
if match:
options[attr] = str(match[-1][3]).strip()
except:
pass
return str(soup), options
def prepare_header_footer(soup):
options = {}
head = soup.find("head").contents
styles = soup.find_all("style")
print_css = bundled_asset('print.bundle.css').lstrip('/')
css = frappe.read_file(os.path.join(frappe.local.sites_path, print_css))
# extract header and footer
for html_id in ("header-html", "footer-html"):
content = soup.find(id=html_id)
if content:
# there could be multiple instances of header-html/footer-html
for tag in soup.find_all(id=html_id):
tag.extract()
toggle_visible_pdf(content)
html = frappe.render_template("templates/print_formats/pdf_header_footer.html", {
"head": head,
"content": content,
"styles": styles,
"html_id": html_id,
"css": css
})
# create temp file
fname = os.path.join("/tmp", "frappe-pdf-{0}.html".format(frappe.generate_hash()))
with open(fname, "wb") as f:
f.write(html.encode("utf-8"))
# {"header-html": "/tmp/frappe-pdf-random.html"}
options[html_id] = fname
else:
if html_id == "header-html":
options["margin-top"] = "15mm"
elif html_id == "footer-html":
options["margin-bottom"] = "15mm"
return options
def cleanup(options):
for key in ("header-html", "footer-html", "cookie-jar"):
if options.get(key) and os.path.exists(options[key]):
os.remove(options[key])
def toggle_visible_pdf(soup):
for tag in soup.find_all(attrs={"class": "visible-pdf"}):
# remove visible-pdf class to unhide
tag.attrs['class'].remove('visible-pdf')
for tag in soup.find_all(attrs={"class": "hidden-pdf"}):
# remove tag from html
tag.extract()
def get_wkhtmltopdf_version():
wkhtmltopdf_version = frappe.cache().hget("wkhtmltopdf_version", None)
if not wkhtmltopdf_version:
try:
res = subprocess.check_output(["wkhtmltopdf", "--version"])
wkhtmltopdf_version = res.decode('utf-8').split(" ")[1]
frappe.cache().hset("wkhtmltopdf_version", None, wkhtmltopdf_version)
except Exception:
pass
return (wkhtmltopdf_version or '0')
|
|
import logging
import collections
import pickle
from copy import deepcopy
import pymongo
import bson
from . import utils
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
_CONVERSION_MAP = dict()
ID_KEY = '_id'
CONVERSION_KEY = '_conversion_key'
TIMESTAMP_CREATION_KEY = 'timestamp_created'
TIMESTAMP_MODIFICATION_KEY = 'timestamp_modified'
class MangoException(Exception):
"""Base exception for all Mango-specific exceptions."""
pass
def assign(database, collection = None):
"""
A class decorator that assigns a :class:`Document` class to a database/collection.
If `collection` is ``None``, Mango generates a :class:`Collection` automatically based on the name of the :class:`Document` class.
If `collection` is passed, it must have been generated from the `database`.
Parameters
----------
database : a :class:`Database`
collection : a :class:`Collection`, optional
"""
def assigner(cls, database = database, collection = collection):
if collection is None:
collection = Collection(database, f'coll_{cls.__name__.lower()}')
if collection.database != database:
raise MangoException(f'Could not assign document type {cls} to {collection}: the collection must belong to {database}, not {collection.database}')
cls.collection = collection
cls.database = collection.database
logger.debug(f'Assigned document type {cls} to collection {collection}')
return cls
return assigner
def register(cls):
"""
A class decorator that "manually" registers a class for conversion when coming back from the database.
Can be used as a plain function as well.
Parameters
----------
cls
The class to register.
"""
_CONVERSION_MAP[cls.__name__] = cls
logger.debug('Registered objects of type {cls} for conversion')
return cls
def serialize(cls):
"""
A class decorator that marks a class to be serialized when stored in a :class:`Document`.
Parameters
----------
cls
The class to mark for serialization.
"""
cls._serialize = True
logger.debug('Marked objects of type {cls} for serialization')
return cls
class Serializable(type):
"""
Setting this as a metaclass is an alternative to using the :func:`serialize` class decorator.
"""
def __new__(mcs, *args, **kwargs):
"""Mark the class for serialization."""
clsobj = super().__new__(mcs, *args, **kwargs)
serialize(clsobj)
return clsobj
class DocumentMetaClass(type):
"""
This is the metaclass for :class:`Document`, which handles automatically registering Documents via :func:`register`.
"""
def __new__(mcs, *args, **kwargs):
"""Automatically register Documents."""
clsobj = super().__new__(mcs, *args, **kwargs)
register(clsobj)
return clsobj
class Document(dict, metaclass = DocumentMetaClass):
"""
The base class for any user-defined documents.
Document is a subclass of :class:`dict`, with dot access to its keys.
There are two forbidden keys: ``'_id'`` and ``'_docname'``.
These are used internally to uniquely identify objects and to reconstruct raw data from the database, respectively.
Mango automatically generates a UUID for the Document to use as its ``_id``, as well as a creation and modification timestamp.
Attributes
----------
id : :class:`str`
A UUID, unique to the document, generated by Mango.
conversion_key : :class:`str`
The name of the Document (i.e., the name of the class).
id_filter : :class:`dict`
A dictionary: ``{'_id': self.id}``.
collection : :class:`Collection`
The collection where Documents of this type are stored.
database : :class:`Database`
The database where this class's Collection is stored.
"""
_reserved_keys = {
'_reserved_keys',
ID_KEY, 'id',
CONVERSION_KEY, 'conversion_key',
TIMESTAMP_CREATION_KEY,
TIMESTAMP_MODIFICATION_KEY,
}
def __init__(self, **kwargs):
super().__init__(**kwargs) # off to the dict constructor
if ID_KEY not in self:
self[ID_KEY] = utils.get_uid()
if CONVERSION_KEY not in self:
self[CONVERSION_KEY] = self.__class__.__name__
now = utils.get_now()
for key in (TIMESTAMP_CREATION_KEY, TIMESTAMP_MODIFICATION_KEY):
if key not in self:
self[key] = now
@property
def id(self):
return self[ID_KEY]
@property
def conversion_key(self):
return self[CONVERSION_KEY]
def __eq__(self, other):
return isinstance(other, self.__class__) and super().__eq__(other)
def __hash__(self):
return hash(self[ID_KEY])
def copy(self):
"""
Return a deep copy of the Document.
Returns
-------
:class:`Document`
"""
return deepcopy(self)
def __getattr__(self, item):
"""Override attribute access to also check the dict, allowing dot access for the dict members."""
try:
return self[item]
except KeyError:
return super().__getattribute__(item)
def __setattr__(self, key, value):
"""Override attribute access to actually go the dict."""
if key not in self._reserved_keys:
self['timestamp_modified'] = utils.get_now()
self[key] = value
else:
raise MangoException(f"{self.__class__.__name__} key '{key}' is managed by Mango and should not be modified.")
def __str__(self):
return ''.join((repr(self),
' {',
*(f'\n {k}: {v}' for k, v in sorted(self.items()) if k not in self._reserved_keys),
'\n}'))
def __repr__(self):
return f'{self.__class__.__name__} [id = {self.id}]'
def equal_on_keys(self, other, *keys, match_document_type = True):
"""
Test whether this Document and another Document are equal, considering only the `keys`.
Parameters
----------
other : :class:`Document`
The Document to be checked against this one for equality.
keys : any number of :class:`str`
The keys to check for equality on.
match_document_type : :class:`bool`
If ``True``, the Documents must match type as well as key values.
Returns
-------
:class:`bool`
``True`` if the Documents agree on all `keys`, ``False`` otherwise.
"""
keys = set(keys)
if match_document_type:
keys.add(CONVERSION_KEY)
return all(self[key] == other[key] for key in keys)
def agree_on_keys(self, other):
"""
Return the keys that this Document and `other` agree on.
Parameters
----------
other : :class:`Document`
The Document to compare to.
Returns
-------
an iterator of :class:`str`
The keys that the two Documents agree on.
"""
yield from (key for key, value in self.items() if other[key] == value)
def disagree_on_keys(self, other):
"""
Return the keys that this Document and `other` disagree on.
Parameters
----------
other : :class:`Document`
The Document to compare to.
Returns
-------
an iterator of :class:`str`
The keys that the two Documents disagree on.
"""
yield from (key for key, value in self.items() if other[key] != value)
def _to_db(self):
"""
Return a dictionary representing this object, but with serializable contents (instances of classes tagged with :func:`serialize`) replaced with their serializations.
Called internally to get a representation more suitable for the database.
Returns
-------
:class:`dict`
A dictionary that's ready to be inserted into the database.
"""
copy = {**self, **{k: bson.Binary(pickle.dumps(v)) for k, v in self.items() if getattr(v, '_serialize', False)}}
return copy
def save(self):
"""
Save the Document to its assigned collection.
Returns
-------
:class:`pymongo.results._WriteResult`
A pymongo database write result object (see `<https://api.mongodb.com/python/current/api/pymongo/results.html>`_)
"""
try:
return self.collection.find_one_and_replace(self.id_filter, self._to_db(), upsert = True)
except AttributeError: # self.collection doesn't exist, most likely because the document hasn't been assigned to a collection using mango.assign()
raise MangoException(f'Document {repr(self)} cannot be saved (probably because it has not been assigned to a collection).')
@property
def id_filter(self):
return {ID_KEY: self.id}
def find_matching(self, *keys, match_document_type = True):
"""
Find all documents in the collection that match this document on the values of the given keys.
Parameters
----------
keys : any number of :class:`str`
Found documents will match this document on these keys.
match_document_type : optional, default `True`
If `True`, found documents must match this document's type as well. Equivalent to adding ``'_docname'`` to `keys`.
Returns
-------
:class:`Cursor`
A :class:`Cursor` representing the results of the query.
"""
keys = set(keys)
if match_document_type:
keys.add(CONVERSION_KEY)
return self.collection.find(utils.filter_dict(self, keys))
def find_self(self):
"""Find this Document from the database. Shortcut for ``doc.find_one('_id')``."""
return self.collection.find_one(self.id_filter)
def reload(self):
"""Perform an in-place update of this Document from the database."""
self.__dict__.update(self.find_self())
@classmethod
def find(cls, **filters):
"""Find all documents in the collection of this type, with additional filters given by the kwargs as if they were arguments to a dictionary used as Collection.find(filters)."""
filters[CONVERSION_KEY] = cls.__name__
return cls.collection.find({**filters})
@classmethod
def find_one(cls, **filters):
"""Find all documents in the collection of this type, with additional filters given by the kwargs as if they were arguments to a dictionary used as Collection.find(filters)."""
filters[CONVERSION_KEY] = cls.__name__
return cls.collection.find_one({**filters})
def save_many(*documents):
"""
Save many :class:`Documents <Document>` to their associated collections.
This function performs a single bulk write operation for each unique collection among the `documents`.
Parameters
----------
documents : any number of :class:`Documents <Document>`
Documents to be saved.
Returns
-------
:class:`dict`
A dictionary: ``{collection: write_result}`` (:class:`pymongo.results.BulkWriteResult`).
"""
requests_by_collection = collections.defaultdict(list)
for doc in documents:
requests_by_collection[doc.collection].append(pymongo.ReplaceOne(doc.id_filter, doc._to_db(), upsert = True))
return {collection: collection.bulk_write(requests) for collection, requests in requests_by_collection.items()}
class Client(pymongo.MongoClient):
"""
Drop-in replacement for :class:`pymongo.mongo_client.MongoClient`.
When databases and collections are created from a :class:`Client`, they are really instances of Mango's :class:`Database` and :class:`Collection` classes.
"""
def __getitem__(self, name):
"""Get a Mango :class:`Database` instead of a `pymongo` :class:`pymongo.database.Database`."""
return Database(self, name)
class Database(pymongo.database.Database):
"""
Drop-in replacement for :class:`pymongo.database.Database`.
"""
def __getitem__(self, name):
"""Get a Mango :class:`Collection` instead of a`pymongo` :class:`pymongo.collection.Collection`."""
return Collection(self, name)
def __hash__(self):
return hash(repr(self))
class Collection(pymongo.collection.Collection):
"""
Drop-in replacement for :class:`pymongo.collection.Collection`.
"""
def find(self, *args, **kwargs):
"""Override to use `Mango`'s :class:`Cursor` instead of `pymongo`'s :class:`pymongo.cursor.Cursor`, passing along the document_type."""
return Cursor(self, *args, **kwargs)
def __hash__(self):
return hash(repr(self))
def _convert(doc, depth = 0):
"""Turn a document returned from the database as a dictionary into its corresponding :class:`Document` based on the global ``_CONVERSION_MAP`` dictionary."""
if isinstance(doc, dict) and CONVERSION_KEY in doc:
doc = _CONVERSION_MAP[doc[CONVERSION_KEY]](**doc)
elif isinstance(doc, bytes): # might be a pickled python object
try:
doc = pickle.loads(doc)
except pickle.PickleError: # never mind, its just plain bytes
pass
elif depth == 0:
doc = Document(**doc)
try:
for k, v in doc.items():
doc[k] = _convert(v, depth = depth + 1)
except AttributeError: # doc is not a dictionary
pass
return doc
class Cursor(pymongo.cursor.Cursor):
"""
Drop-in replacement for :class:`pymongo.cursor.Cursor`.
"""
def _clone_base(self):
"""Override clone method in Cursor to build based on ``self.__class__`` instead of a hard-coded name."""
return self.__class__(self.__collection)
def __next__(self):
"""Wrap calls to __next__ by the document_type."""
return _convert(super().__next__())
def __getitem__(self, item):
"""
Wrap calls to ``__getitem__`` so that they return Mango :class:`Documents <Document>'.
Some care must be taken because the ``super()`` call might return a new :class:`Cursor` over a subset of the originally returned documents instead of a single document (i.e., indexing).
In this case, we should simply return the cursor
"""
from_super = super().__getitem__(item)
if isinstance(from_super, self.__class__):
return from_super
else:
return _convert(from_super, depth = 0)
|
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module contains classes that help to emulate xcodebuild behavior on top of
other build systems, such as make and ninja.
"""
import gyp.common
import os.path
import re
import shlex
import subprocess
import sys
from gyp.common import GypError
class XcodeSettings(object):
"""A class that understands the gyp 'xcode_settings' object."""
# Populated lazily by _SdkPath(). Shared by all XcodeSettings, so cached
# at class-level for efficiency.
_sdk_path_cache = {}
def __init__(self, spec):
self.spec = spec
# Per-target 'xcode_settings' are pushed down into configs earlier by gyp.
# This means self.xcode_settings[config] always contains all settings
# for that config -- the per-target settings as well. Settings that are
# the same for all configs are implicitly per-target settings.
self.xcode_settings = {}
configs = spec['configurations']
for configname, config in configs.iteritems():
self.xcode_settings[configname] = config.get('xcode_settings', {})
# This is only non-None temporarily during the execution of some methods.
self.configname = None
# Used by _AdjustLibrary to match .a and .dylib entries in libraries.
self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$')
def _Settings(self):
assert self.configname
return self.xcode_settings[self.configname]
def _Test(self, test_key, cond_key, default):
return self._Settings().get(test_key, default) == cond_key
def _Appendf(self, lst, test_key, format_str, default=None):
if test_key in self._Settings():
lst.append(format_str % str(self._Settings()[test_key]))
elif default:
lst.append(format_str % str(default))
def _WarnUnimplemented(self, test_key):
if test_key in self._Settings():
print 'Warning: Ignoring not yet implemented key "%s".' % test_key
def _IsBundle(self):
return int(self.spec.get('mac_bundle', 0)) != 0
def GetFrameworkVersion(self):
"""Returns the framework version of the current target. Only valid for
bundles."""
assert self._IsBundle()
return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A')
def GetWrapperExtension(self):
"""Returns the bundle extension (.app, .framework, .plugin, etc). Only
valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('loadable_module', 'shared_library'):
default_wrapper_extension = {
'loadable_module': 'bundle',
'shared_library': 'framework',
}[self.spec['type']]
wrapper_extension = self.GetPerTargetSetting(
'WRAPPER_EXTENSION', default=default_wrapper_extension)
return '.' + self.spec.get('product_extension', wrapper_extension)
elif self.spec['type'] == 'executable':
return '.app'
else:
assert False, "Don't know extension for '%s', target '%s'" % (
self.spec['type'], self.spec['target_name'])
def GetProductName(self):
"""Returns PRODUCT_NAME."""
return self.spec.get('product_name', self.spec['target_name'])
def GetFullProductName(self):
"""Returns FULL_PRODUCT_NAME."""
if self._IsBundle():
return self.GetWrapperName()
else:
return self._GetStandaloneBinaryPath()
def GetWrapperName(self):
"""Returns the directory name of the bundle represented by this target.
Only valid for bundles."""
assert self._IsBundle()
return self.GetProductName() + self.GetWrapperExtension()
def GetBundleContentsFolderPath(self):
"""Returns the qualified path to the bundle's contents folder. E.g.
Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] == 'shared_library':
return os.path.join(
self.GetWrapperName(), 'Versions', self.GetFrameworkVersion())
else:
# loadable_modules have a 'Contents' folder like executables.
return os.path.join(self.GetWrapperName(), 'Contents')
def GetBundleResourceFolder(self):
"""Returns the qualified path to the bundle's resource folder. E.g.
Chromium.app/Contents/Resources. Only valid for bundles."""
assert self._IsBundle()
return os.path.join(self.GetBundleContentsFolderPath(), 'Resources')
def GetBundlePlistPath(self):
"""Returns the qualified path to the bundle's plist file. E.g.
Chromium.app/Contents/Info.plist. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('executable', 'loadable_module'):
return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist')
else:
return os.path.join(self.GetBundleContentsFolderPath(),
'Resources', 'Info.plist')
def GetProductType(self):
"""Returns the PRODUCT_TYPE of this target."""
if self._IsBundle():
return {
'executable': 'com.apple.product-type.application',
'loadable_module': 'com.apple.product-type.bundle',
'shared_library': 'com.apple.product-type.framework',
}[self.spec['type']]
else:
return {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.apple.product-type.library.dynamic',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
}[self.spec['type']]
def GetMachOType(self):
"""Returns the MACH_O_TYPE of this target."""
# Weird, but matches Xcode.
if not self._IsBundle() and self.spec['type'] == 'executable':
return ''
return {
'executable': 'mh_execute',
'static_library': 'staticlib',
'shared_library': 'mh_dylib',
'loadable_module': 'mh_bundle',
}[self.spec['type']]
def _GetBundleBinaryPath(self):
"""Returns the name of the bundle binary of by this target.
E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('shared_library'):
path = self.GetBundleContentsFolderPath()
elif self.spec['type'] in ('executable', 'loadable_module'):
path = os.path.join(self.GetBundleContentsFolderPath(), 'MacOS')
return os.path.join(path, self.GetExecutableName())
def _GetStandaloneExecutableSuffix(self):
if 'product_extension' in self.spec:
return '.' + self.spec['product_extension']
return {
'executable': '',
'static_library': '.a',
'shared_library': '.dylib',
'loadable_module': '.so',
}[self.spec['type']]
def _GetStandaloneExecutablePrefix(self):
return self.spec.get('product_prefix', {
'executable': '',
'static_library': 'lib',
'shared_library': 'lib',
# Non-bundled loadable_modules are called foo.so for some reason
# (that is, .so and no prefix) with the xcode build -- match that.
'loadable_module': '',
}[self.spec['type']])
def _GetStandaloneBinaryPath(self):
"""Returns the name of the non-bundle binary represented by this target.
E.g. hello_world. Only valid for non-bundles."""
assert not self._IsBundle()
assert self.spec['type'] in (
'executable', 'shared_library', 'static_library', 'loadable_module'), (
'Unexpected type %s' % self.spec['type'])
target = self.spec['target_name']
if self.spec['type'] == 'static_library':
if target[:3] == 'lib':
target = target[3:]
elif self.spec['type'] in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = self._GetStandaloneExecutablePrefix()
target = self.spec.get('product_name', target)
target_ext = self._GetStandaloneExecutableSuffix()
return target_prefix + target + target_ext
def GetExecutableName(self):
"""Returns the executable name of the bundle represented by this target.
E.g. Chromium."""
if self._IsBundle():
return self.spec.get('product_name', self.spec['target_name'])
else:
return self._GetStandaloneBinaryPath()
def GetExecutablePath(self):
"""Returns the directory name of the bundle represented by this target. E.g.
Chromium.app/Contents/MacOS/Chromium."""
if self._IsBundle():
return self._GetBundleBinaryPath()
else:
return self._GetStandaloneBinaryPath()
def _GetSdkVersionInfoItem(self, sdk, infoitem):
job = subprocess.Popen(['xcodebuild', '-version', '-sdk', sdk, infoitem],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out = job.communicate()[0]
if job.returncode != 0:
sys.stderr.write(out + '\n')
raise GypError('Error %d running xcodebuild' % job.returncode)
return out.rstrip('\n')
def _SdkPath(self):
sdk_root = self.GetPerTargetSetting('SDKROOT', default='macosx')
if sdk_root not in XcodeSettings._sdk_path_cache:
XcodeSettings._sdk_path_cache[sdk_root] = self._GetSdkVersionInfoItem(
sdk_root, 'Path')
return XcodeSettings._sdk_path_cache[sdk_root]
def _AppendPlatformVersionMinFlags(self, lst):
self._Appendf(lst, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s')
if 'IPHONEOS_DEPLOYMENT_TARGET' in self._Settings():
# TODO: Implement this better?
sdk_path_basename = os.path.basename(self._SdkPath())
if sdk_path_basename.lower().startswith('iphonesimulator'):
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-mios-simulator-version-min=%s')
else:
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-miphoneos-version-min=%s')
def GetCflags(self, configname):
"""Returns flags that need to be added to .c, .cc, .m, and .mm
compilations."""
# This functions (and the similar ones below) do not offer complete
# emulation of all xcode_settings keys. They're implemented on demand.
self.configname = configname
cflags = []
sdk_root = self._SdkPath()
if 'SDKROOT' in self._Settings():
cflags.append('-isysroot %s' % sdk_root)
if self._Test('CLANG_WARN_CONSTANT_CONVERSION', 'YES', default='NO'):
cflags.append('-Wconstant-conversion')
if self._Test('GCC_CHAR_IS_UNSIGNED_CHAR', 'YES', default='NO'):
cflags.append('-funsigned-char')
if self._Test('GCC_CW_ASM_SYNTAX', 'YES', default='YES'):
cflags.append('-fasm-blocks')
if 'GCC_DYNAMIC_NO_PIC' in self._Settings():
if self._Settings()['GCC_DYNAMIC_NO_PIC'] == 'YES':
cflags.append('-mdynamic-no-pic')
else:
pass
# TODO: In this case, it depends on the target. xcode passes
# mdynamic-no-pic by default for executable and possibly static lib
# according to mento
if self._Test('GCC_ENABLE_PASCAL_STRINGS', 'YES', default='YES'):
cflags.append('-mpascal-strings')
self._Appendf(cflags, 'GCC_OPTIMIZATION_LEVEL', '-O%s', default='s')
if self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES'):
dbg_format = self._Settings().get('DEBUG_INFORMATION_FORMAT', 'dwarf')
if dbg_format == 'dwarf':
cflags.append('-gdwarf-2')
elif dbg_format == 'stabs':
raise NotImplementedError('stabs debug format is not supported yet.')
elif dbg_format == 'dwarf-with-dsym':
cflags.append('-gdwarf-2')
else:
raise NotImplementedError('Unknown debug format %s' % dbg_format)
if self._Test('GCC_SYMBOLS_PRIVATE_EXTERN', 'YES', default='NO'):
cflags.append('-fvisibility=hidden')
if self._Test('GCC_TREAT_WARNINGS_AS_ERRORS', 'YES', default='NO'):
cflags.append('-Werror')
if self._Test('GCC_WARN_ABOUT_MISSING_NEWLINE', 'YES', default='NO'):
cflags.append('-Wnewline-eof')
self._AppendPlatformVersionMinFlags(cflags)
# TODO:
if self._Test('COPY_PHASE_STRIP', 'YES', default='NO'):
self._WarnUnimplemented('COPY_PHASE_STRIP')
self._WarnUnimplemented('GCC_DEBUGGING_SYMBOLS')
self._WarnUnimplemented('GCC_ENABLE_OBJC_EXCEPTIONS')
# TODO: This is exported correctly, but assigning to it is not supported.
self._WarnUnimplemented('MACH_O_TYPE')
self._WarnUnimplemented('PRODUCT_TYPE')
archs = self._Settings().get('ARCHS', ['i386'])
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
cflags.append('-arch ' + archs[0])
if archs[0] in ('i386', 'x86_64'):
if self._Test('GCC_ENABLE_SSE3_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse3')
if self._Test('GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS', 'YES',
default='NO'):
cflags.append('-mssse3') # Note 3rd 's'.
if self._Test('GCC_ENABLE_SSE41_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.1')
if self._Test('GCC_ENABLE_SSE42_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.2')
cflags += self._Settings().get('WARNING_CFLAGS', [])
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
cflags.append('-F' + directory.replace('$(SDKROOT)', sdk_root))
self.configname = None
return cflags
def GetCflagsC(self, configname):
"""Returns flags that need to be added to .c, and .m compilations."""
self.configname = configname
cflags_c = []
self._Appendf(cflags_c, 'GCC_C_LANGUAGE_STANDARD', '-std=%s')
cflags_c += self._Settings().get('OTHER_CFLAGS', [])
self.configname = None
return cflags_c
def GetCflagsCC(self, configname):
"""Returns flags that need to be added to .cc, and .mm compilations."""
self.configname = configname
cflags_cc = []
clang_cxx_language_standard = self._Settings().get(
'CLANG_CXX_LANGUAGE_STANDARD')
# Note: Don't make c++0x to c++11 so that c++0x can be used with older
# clangs that don't understand c++11 yet (like Xcode 4.2's).
if clang_cxx_language_standard:
cflags_cc.append('-std=%s' % clang_cxx_language_standard)
self._Appendf(cflags_cc, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
if self._Test('GCC_ENABLE_CPP_RTTI', 'NO', default='YES'):
cflags_cc.append('-fno-rtti')
if self._Test('GCC_ENABLE_CPP_EXCEPTIONS', 'NO', default='YES'):
cflags_cc.append('-fno-exceptions')
if self._Test('GCC_INLINES_ARE_PRIVATE_EXTERN', 'YES', default='NO'):
cflags_cc.append('-fvisibility-inlines-hidden')
if self._Test('GCC_THREADSAFE_STATICS', 'NO', default='YES'):
cflags_cc.append('-fno-threadsafe-statics')
# Note: This flag is a no-op for clang, it only has an effect for gcc.
if self._Test('GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO', 'NO', default='YES'):
cflags_cc.append('-Wno-invalid-offsetof')
other_ccflags = []
for flag in self._Settings().get('OTHER_CPLUSPLUSFLAGS', ['$(inherited)']):
# TODO: More general variable expansion. Missing in many other places too.
if flag in ('$inherited', '$(inherited)', '${inherited}'):
flag = '$OTHER_CFLAGS'
if flag in ('$OTHER_CFLAGS', '$(OTHER_CFLAGS)', '${OTHER_CFLAGS}'):
other_ccflags += self._Settings().get('OTHER_CFLAGS', [])
else:
other_ccflags.append(flag)
cflags_cc += other_ccflags
self.configname = None
return cflags_cc
def _AddObjectiveCGarbageCollectionFlags(self, flags):
gc_policy = self._Settings().get('GCC_ENABLE_OBJC_GC', 'unsupported')
if gc_policy == 'supported':
flags.append('-fobjc-gc')
elif gc_policy == 'required':
flags.append('-fobjc-gc-only')
def GetCflagsObjC(self, configname):
"""Returns flags that need to be added to .m compilations."""
self.configname = configname
cflags_objc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objc)
self.configname = None
return cflags_objc
def GetCflagsObjCC(self, configname):
"""Returns flags that need to be added to .mm compilations."""
self.configname = configname
cflags_objcc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objcc)
if self._Test('GCC_OBJC_CALL_CXX_CDTORS', 'YES', default='NO'):
cflags_objcc.append('-fobjc-call-cxx-cdtors')
self.configname = None
return cflags_objcc
def GetInstallNameBase(self):
"""Return DYLIB_INSTALL_NAME_BASE for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
install_base = self.GetPerTargetSetting(
'DYLIB_INSTALL_NAME_BASE',
default='/Library/Frameworks' if self._IsBundle() else '/usr/local/lib')
return install_base
def _StandardizePath(self, path):
"""Do :standardizepath processing for path."""
# I'm not quite sure what :standardizepath does. Just call normpath(),
# but don't let @executable_path/../foo collapse to foo.
if '/' in path:
prefix, rest = '', path
if path.startswith('@'):
prefix, rest = path.split('/', 1)
rest = os.path.normpath(rest) # :standardizepath
path = os.path.join(prefix, rest)
return path
def GetInstallName(self):
"""Return LD_DYLIB_INSTALL_NAME for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
default_install_name = \
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)'
install_name = self.GetPerTargetSetting(
'LD_DYLIB_INSTALL_NAME', default=default_install_name)
# Hardcode support for the variables used in chromium for now, to
# unblock people using the make build.
if '$' in install_name:
assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/'
'$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), (
'Variables in LD_DYLIB_INSTALL_NAME are not generally supported '
'yet in target \'%s\' (got \'%s\')' %
(self.spec['target_name'], install_name))
install_name = install_name.replace(
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)',
self._StandardizePath(self.GetInstallNameBase()))
if self._IsBundle():
# These are only valid for bundles, hence the |if|.
install_name = install_name.replace(
'$(WRAPPER_NAME)', self.GetWrapperName())
install_name = install_name.replace(
'$(PRODUCT_NAME)', self.GetProductName())
else:
assert '$(WRAPPER_NAME)' not in install_name
assert '$(PRODUCT_NAME)' not in install_name
install_name = install_name.replace(
'$(EXECUTABLE_PATH)', self.GetExecutablePath())
return install_name
def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path):
"""Checks if ldflag contains a filename and if so remaps it from
gyp-directory-relative to build-directory-relative."""
# This list is expanded on demand.
# They get matched as:
# -exported_symbols_list file
# -Wl,exported_symbols_list file
# -Wl,exported_symbols_list,file
LINKER_FILE = '(\S+)'
WORD = '\S+'
linker_flags = [
['-exported_symbols_list', LINKER_FILE], # Needed for NaCl.
['-unexported_symbols_list', LINKER_FILE],
['-reexported_symbols_list', LINKER_FILE],
['-sectcreate', WORD, WORD, LINKER_FILE], # Needed for remoting.
]
for flag_pattern in linker_flags:
regex = re.compile('(?:-Wl,)?' + '[ ,]'.join(flag_pattern))
m = regex.match(ldflag)
if m:
ldflag = ldflag[:m.start(1)] + gyp_to_build_path(m.group(1)) + \
ldflag[m.end(1):]
# Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS,
# TODO(thakis): Update ffmpeg.gyp):
if ldflag.startswith('-L'):
ldflag = '-L' + gyp_to_build_path(ldflag[len('-L'):])
return ldflag
def GetLdflags(self, configname, product_dir, gyp_to_build_path):
"""Returns flags that need to be passed to the linker.
Args:
configname: The name of the configuration to get ld flags for.
product_dir: The directory where products such static and dynamic
libraries are placed. This is added to the library search path.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
self.configname = configname
ldflags = []
# The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS
# can contain entries that depend on this. Explicitly absolutify these.
for ldflag in self._Settings().get('OTHER_LDFLAGS', []):
ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path))
if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'):
ldflags.append('-Wl,-dead_strip')
if self._Test('PREBINDING', 'YES', default='NO'):
ldflags.append('-Wl,-prebind')
self._Appendf(
ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s')
self._Appendf(
ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s')
self._AppendPlatformVersionMinFlags(ldflags)
if 'SDKROOT' in self._Settings():
ldflags.append('-isysroot ' + self._SdkPath())
for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []):
ldflags.append('-L' + gyp_to_build_path(library_path))
if 'ORDER_FILE' in self._Settings():
ldflags.append('-Wl,-order_file ' +
'-Wl,' + gyp_to_build_path(
self._Settings()['ORDER_FILE']))
archs = self._Settings().get('ARCHS', ['i386'])
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
ldflags.append('-arch ' + archs[0])
# Xcode adds the product directory by default.
ldflags.append('-L' + product_dir)
install_name = self.GetInstallName()
if install_name:
ldflags.append('-install_name ' + install_name.replace(' ', r'\ '))
for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []):
ldflags.append('-Wl,-rpath,' + rpath)
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
ldflags.append('-F' + directory.replace('$(SDKROOT)', self._SdkPath()))
self.configname = None
return ldflags
def GetLibtoolflags(self, configname):
"""Returns flags that need to be passed to the static linker.
Args:
configname: The name of the configuration to get ld flags for.
"""
self.configname = configname
libtoolflags = []
for libtoolflag in self._Settings().get('OTHER_LDFLAGS', []):
libtoolflags.append(libtoolflag)
# TODO(thakis): ARCHS?
self.configname = None
return libtoolflags
def GetPerTargetSettings(self):
"""Gets a list of all the per-target settings. This will only fetch keys
whose values are the same across all configurations."""
first_pass = True
result = {}
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = dict(self.xcode_settings[configname])
first_pass = False
else:
for key, value in self.xcode_settings[configname].iteritems():
if key not in result:
continue
elif result[key] != value:
del result[key]
return result
def GetPerTargetSetting(self, setting, default=None):
"""Tries to get xcode_settings.setting from spec. Assumes that the setting
has the same value in all configurations and throws otherwise."""
first_pass = True
result = None
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = self.xcode_settings[configname].get(setting, None)
first_pass = False
else:
assert result == self.xcode_settings[configname].get(setting, None), (
"Expected per-target setting for '%s', got per-config setting "
"(target %s)" % (setting, spec['target_name']))
if result is None:
return default
return result
def _GetStripPostbuilds(self, configname, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to strip this target's binary. These should be run as postbuilds
before the actual postbuilds run."""
self.configname = configname
result = []
if (self._Test('DEPLOYMENT_POSTPROCESSING', 'YES', default='NO') and
self._Test('STRIP_INSTALLED_PRODUCT', 'YES', default='NO')):
default_strip_style = 'debugging'
if self._IsBundle():
default_strip_style = 'non-global'
elif self.spec['type'] == 'executable':
default_strip_style = 'all'
strip_style = self._Settings().get('STRIP_STYLE', default_strip_style)
strip_flags = {
'all': '',
'non-global': '-x',
'debugging': '-S',
}[strip_style]
explicit_strip_flags = self._Settings().get('STRIPFLAGS', '')
if explicit_strip_flags:
strip_flags += ' ' + _NormalizeEnvVarReferences(explicit_strip_flags)
if not quiet:
result.append('echo STRIP\\(%s\\)' % self.spec['target_name'])
result.append('strip %s %s' % (strip_flags, output_binary))
self.configname = None
return result
def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to massage this target's debug information. These should be run
as postbuilds before the actual postbuilds run."""
self.configname = configname
# For static libraries, no dSYMs are created.
result = []
if (self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES') and
self._Test(
'DEBUG_INFORMATION_FORMAT', 'dwarf-with-dsym', default='dwarf') and
self.spec['type'] != 'static_library'):
if not quiet:
result.append('echo DSYMUTIL\\(%s\\)' % self.spec['target_name'])
result.append('dsymutil %s -o %s' % (output_binary, output + '.dSYM'))
self.configname = None
return result
def GetTargetPostbuilds(self, configname, output, output_binary, quiet=False):
"""Returns a list of shell commands that contain the shell commands
to run as postbuilds for this target, before the actual postbuilds."""
# dSYMs need to build before stripping happens.
return (
self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) +
self._GetStripPostbuilds(configname, output_binary, quiet))
def _AdjustLibrary(self, library):
if library.endswith('.framework'):
l = '-framework ' + os.path.splitext(os.path.basename(library))[0]
else:
m = self.library_re.match(library)
if m:
l = '-l' + m.group(1)
else:
l = library
return l.replace('$(SDKROOT)', self._SdkPath())
def AdjustLibraries(self, libraries):
"""Transforms entries like 'Cocoa.framework' in libraries into entries like
'-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc.
"""
libraries = [ self._AdjustLibrary(library) for library in libraries]
return libraries
class MacPrefixHeader(object):
"""A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature.
This feature consists of several pieces:
* If GCC_PREFIX_HEADER is present, all compilations in that project get an
additional |-include path_to_prefix_header| cflag.
* If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is
instead compiled, and all other compilations in the project get an
additional |-include path_to_compiled_header| instead.
+ Compiled prefix headers have the extension gch. There is one gch file for
every language used in the project (c, cc, m, mm), since gch files for
different languages aren't compatible.
+ gch files themselves are built with the target's normal cflags, but they
obviously don't get the |-include| flag. Instead, they need a -x flag that
describes their language.
+ All o files in the target need to depend on the gch file, to make sure
it's built before any o file is built.
This class helps with some of these tasks, but it needs help from the build
system for writing dependencies to the gch files, for writing build commands
for the gch files, and for figuring out the location of the gch files.
"""
def __init__(self, xcode_settings,
gyp_path_to_build_path, gyp_path_to_build_output):
"""If xcode_settings is None, all methods on this class are no-ops.
Args:
gyp_path_to_build_path: A function that takes a gyp-relative path,
and returns a path relative to the build directory.
gyp_path_to_build_output: A function that takes a gyp-relative path and
a language code ('c', 'cc', 'm', or 'mm'), and that returns a path
to where the output of precompiling that path for that language
should be placed (without the trailing '.gch').
"""
# This doesn't support per-configuration prefix headers. Good enough
# for now.
self.header = None
self.compile_headers = False
if xcode_settings:
self.header = xcode_settings.GetPerTargetSetting('GCC_PREFIX_HEADER')
self.compile_headers = xcode_settings.GetPerTargetSetting(
'GCC_PRECOMPILE_PREFIX_HEADER', default='NO') != 'NO'
self.compiled_headers = {}
if self.header:
if self.compile_headers:
for lang in ['c', 'cc', 'm', 'mm']:
self.compiled_headers[lang] = gyp_path_to_build_output(
self.header, lang)
self.header = gyp_path_to_build_path(self.header)
def GetInclude(self, lang):
"""Gets the cflags to include the prefix header for language |lang|."""
if self.compile_headers and lang in self.compiled_headers:
return '-include %s' % self.compiled_headers[lang]
elif self.header:
return '-include %s' % self.header
else:
return ''
def _Gch(self, lang):
"""Returns the actual file name of the prefix header for language |lang|."""
assert self.compile_headers
return self.compiled_headers[lang] + '.gch'
def GetObjDependencies(self, sources, objs):
"""Given a list of source files and the corresponding object files, returns
a list of (source, object, gch) tuples, where |gch| is the build-directory
relative path to the gch file each object file depends on. |compilable[i]|
has to be the source file belonging to |objs[i]|."""
if not self.header or not self.compile_headers:
return []
result = []
for source, obj in zip(sources, objs):
ext = os.path.splitext(source)[1]
lang = {
'.c': 'c',
'.cpp': 'cc', '.cc': 'cc', '.cxx': 'cc',
'.m': 'm',
'.mm': 'mm',
}.get(ext, None)
if lang:
result.append((source, obj, self._Gch(lang)))
return result
def GetPchBuildCommands(self):
"""Returns [(path_to_gch, language_flag, language, header)].
|path_to_gch| and |header| are relative to the build directory.
"""
if not self.header or not self.compile_headers:
return []
return [
(self._Gch('c'), '-x c-header', 'c', self.header),
(self._Gch('cc'), '-x c++-header', 'cc', self.header),
(self._Gch('m'), '-x objective-c-header', 'm', self.header),
(self._Gch('mm'), '-x objective-c++-header', 'mm', self.header),
]
def MergeGlobalXcodeSettingsToSpec(global_dict, spec):
"""Merges the global xcode_settings dictionary into each configuration of the
target represented by spec. For keys that are both in the global and the local
xcode_settings dict, the local key gets precendence.
"""
# The xcode generator special-cases global xcode_settings and does something
# that amounts to merging in the global xcode_settings into each local
# xcode_settings dict.
global_xcode_settings = global_dict.get('xcode_settings', {})
for config in spec['configurations'].values():
if 'xcode_settings' in config:
new_settings = global_xcode_settings.copy()
new_settings.update(config['xcode_settings'])
config['xcode_settings'] = new_settings
def IsMacBundle(flavor, spec):
"""Returns if |spec| should be treated as a bundle.
Bundles are directories with a certain subdirectory structure, instead of
just a single file. Bundle rules do not produce a binary but also package
resources into that directory."""
is_mac_bundle = (int(spec.get('mac_bundle', 0)) != 0 and flavor == 'mac')
if is_mac_bundle:
assert spec['type'] != 'none', (
'mac_bundle targets cannot have type none (target "%s")' %
spec['target_name'])
return is_mac_bundle
def GetMacBundleResources(product_dir, xcode_settings, resources):
"""Yields (output, resource) pairs for every resource in |resources|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
resources: A list of bundle resources, relative to the build directory.
"""
dest = os.path.join(product_dir,
xcode_settings.GetBundleResourceFolder())
for res in resources:
output = dest
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in res, (
"Spaces in resource filenames not supported (%s)" % res)
# Split into (path,file).
res_parts = os.path.split(res)
# Now split the path into (prefix,maybe.lproj).
lproj_parts = os.path.split(res_parts[0])
# If the resource lives in a .lproj bundle, add that to the destination.
if lproj_parts[1].endswith('.lproj'):
output = os.path.join(output, lproj_parts[1])
output = os.path.join(output, res_parts[1])
# Compiled XIB files are referred to by .nib.
if output.endswith('.xib'):
output = output[0:-3] + 'nib'
yield output, res
def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path):
"""Returns (info_plist, dest_plist, defines, extra_env), where:
* |info_plist| is the sourc plist path, relative to the
build directory,
* |dest_plist| is the destination plist path, relative to the
build directory,
* |defines| is a list of preprocessor defines (empty if the plist
shouldn't be preprocessed,
* |extra_env| is a dict of env variables that should be exported when
invoking |mac_tool copy-info-plist|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
info_plist = xcode_settings.GetPerTargetSetting('INFOPLIST_FILE')
if not info_plist:
return None, None, [], {}
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in info_plist, (
"Spaces in Info.plist filenames not supported (%s)" % info_plist)
info_plist = gyp_path_to_build_path(info_plist)
# If explicitly set to preprocess the plist, invoke the C preprocessor and
# specify any defines as -D flags.
if xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESS', default='NO') == 'YES':
# Create an intermediate file based on the path.
defines = shlex.split(xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESSOR_DEFINITIONS', default=''))
else:
defines = []
dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath())
extra_env = xcode_settings.GetPerTargetSettings()
return info_plist, dest_plist, defines, extra_env
def _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings=None):
"""Return the environment variables that Xcode would set. See
http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153
for a full list.
Args:
xcode_settings: An XcodeSettings object. If this is None, this function
returns an empty dict.
built_products_dir: Absolute path to the built products dir.
srcroot: Absolute path to the source root.
configuration: The build configuration name.
additional_settings: An optional dict with more values to add to the
result.
"""
if not xcode_settings: return {}
# This function is considered a friend of XcodeSettings, so let it reach into
# its implementation details.
spec = xcode_settings.spec
# These are filled in on a as-needed basis.
env = {
'BUILT_PRODUCTS_DIR' : built_products_dir,
'CONFIGURATION' : configuration,
'PRODUCT_NAME' : xcode_settings.GetProductName(),
# See /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec for FULL_PRODUCT_NAME
'SRCROOT' : srcroot,
'SOURCE_ROOT': '${SRCROOT}',
# This is not true for static libraries, but currently the env is only
# written for bundles:
'TARGET_BUILD_DIR' : built_products_dir,
'TEMP_DIR' : '${TMPDIR}',
}
if xcode_settings.GetPerTargetSetting('SDKROOT'):
env['SDKROOT'] = xcode_settings._SdkPath()
else:
env['SDKROOT'] = ''
if spec['type'] in (
'executable', 'static_library', 'shared_library', 'loadable_module'):
env['EXECUTABLE_NAME'] = xcode_settings.GetExecutableName()
env['EXECUTABLE_PATH'] = xcode_settings.GetExecutablePath()
env['FULL_PRODUCT_NAME'] = xcode_settings.GetFullProductName()
mach_o_type = xcode_settings.GetMachOType()
if mach_o_type:
env['MACH_O_TYPE'] = mach_o_type
env['PRODUCT_TYPE'] = xcode_settings.GetProductType()
if xcode_settings._IsBundle():
env['CONTENTS_FOLDER_PATH'] = \
xcode_settings.GetBundleContentsFolderPath()
env['UNLOCALIZED_RESOURCES_FOLDER_PATH'] = \
xcode_settings.GetBundleResourceFolder()
env['INFOPLIST_PATH'] = xcode_settings.GetBundlePlistPath()
env['WRAPPER_NAME'] = xcode_settings.GetWrapperName()
install_name = xcode_settings.GetInstallName()
if install_name:
env['LD_DYLIB_INSTALL_NAME'] = install_name
install_name_base = xcode_settings.GetInstallNameBase()
if install_name_base:
env['DYLIB_INSTALL_NAME_BASE'] = install_name_base
if not additional_settings:
additional_settings = {}
else:
# Flatten lists to strings.
for k in additional_settings:
if not isinstance(additional_settings[k], str):
additional_settings[k] = ' '.join(additional_settings[k])
additional_settings.update(env)
for k in additional_settings:
additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k])
return additional_settings
def _NormalizeEnvVarReferences(str):
"""Takes a string containing variable references in the form ${FOO}, $(FOO),
or $FOO, and returns a string with all variable references in the form ${FOO}.
"""
# $FOO -> ${FOO}
str = re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', r'${\1}', str)
# $(FOO) -> ${FOO}
matches = re.findall(r'(\$\(([a-zA-Z0-9\-_]+)\))', str)
for match in matches:
to_replace, variable = match
assert '$(' not in match, '$($(FOO)) variables not supported: ' + match
str = str.replace(to_replace, '${' + variable + '}')
return str
def ExpandEnvVars(string, expansions):
"""Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the
expansions list. If the variable expands to something that references
another variable, this variable is expanded as well if it's in env --
until no variables present in env are left."""
for k, v in reversed(expansions):
string = string.replace('${' + k + '}', v)
string = string.replace('$(' + k + ')', v)
string = string.replace('$' + k, v)
return string
def _TopologicallySortedEnvVarKeys(env):
"""Takes a dict |env| whose values are strings that can refer to other keys,
for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of
env such that key2 is after key1 in L if env[key2] refers to env[key1].
Throws an Exception in case of dependency cycles.
"""
# Since environment variables can refer to other variables, the evaluation
# order is important. Below is the logic to compute the dependency graph
# and sort it.
regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}')
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
# We can then reverse the result of the topological sort at the end.
# Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
matches = set([v for v in regex.findall(env[node]) if v in env])
for dependee in matches:
assert '${' not in dependee, 'Nested variables not supported: ' + dependee
return matches
try:
# Topologically sort, and then reverse, because we used an edge definition
# that's inverted from the expected result of this function (see comment
# above).
order = gyp.common.TopologicallySorted(env.keys(), GetEdges)
order.reverse()
return order
except gyp.common.CycleError, e:
raise GypError(
'Xcode environment variables are cyclically dependent: ' + str(e.nodes))
def GetSortedXcodeEnv(xcode_settings, built_products_dir, srcroot,
configuration, additional_settings=None):
env = _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings)
return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)]
def GetSpecPostbuildCommands(spec, quiet=False):
"""Returns the list of postbuilds explicitly defined on |spec|, in a form
executable by a shell."""
postbuilds = []
for postbuild in spec.get('postbuilds', []):
if not quiet:
postbuilds.append('echo POSTBUILD\\(%s\\) %s' % (
spec['target_name'], postbuild['postbuild_name']))
postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild['action']))
return postbuilds
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import os
import json
from frappe import _
from frappe.modules import scrub, get_module_path
from frappe.utils import (
flt,
cint,
get_html_format,
get_url_to_form,
gzip_decompress,
format_duration,
)
from frappe.model.utils import render_include
from frappe.translate import send_translations
import frappe.desk.reportview
from frappe.permissions import get_role_permissions
from six import string_types, iteritems
from datetime import timedelta
from frappe.core.utils import ljust_list
def get_report_doc(report_name):
doc = frappe.get_doc("Report", report_name)
doc.custom_columns = []
if doc.report_type == "Custom Report":
custom_report_doc = doc
reference_report = custom_report_doc.reference_report
doc = frappe.get_doc("Report", reference_report)
doc.custom_report = report_name
doc.custom_columns = custom_report_doc.json
doc.is_custom_report = True
if not doc.is_permitted():
frappe.throw(
_("You don't have access to Report: {0}").format(report_name),
frappe.PermissionError,
)
if not frappe.has_permission(doc.ref_doctype, "report"):
frappe.throw(
_("You don't have permission to get a report on: {0}").format(
doc.ref_doctype
),
frappe.PermissionError,
)
if doc.disabled:
frappe.throw(_("Report {0} is disabled").format(report_name))
return doc
def generate_report_result(report, filters=None, user=None, custom_columns=None):
user = user or frappe.session.user
filters = filters or []
if filters and isinstance(filters, string_types):
filters = json.loads(filters)
res = []
if report.report_type == "Query Report":
res = report.execute_query_report(filters)
elif report.report_type == "Script Report":
res = report.execute_script_report(filters)
columns, result, message, chart, report_summary, skip_total_row = ljust_list(res, 6)
if report.custom_columns:
# Original query columns, needed to reorder data as per custom columns
query_columns = columns
# Reordered columns
columns = json.loads(report.custom_columns)
result = reorder_data_for_custom_columns(columns, query_columns, result)
result = add_data_to_custom_columns(columns, result)
if custom_columns:
result = add_data_to_custom_columns(custom_columns, result)
for custom_column in custom_columns:
columns.insert(custom_column["insert_after_index"] + 1, custom_column)
if result:
result = get_filtered_data(report.ref_doctype, columns, result, user)
if cint(report.add_total_row) and result and not skip_total_row:
result = add_total_row(result, columns)
return {
"result": result,
"columns": columns,
"message": message,
"chart": chart,
"report_summary": report_summary,
"skip_total_row": skip_total_row or 0,
"status": None,
"execution_time": frappe.cache().hget("report_execution_time", report.name)
or 0,
}
@frappe.whitelist()
def background_enqueue_run(report_name, filters=None, user=None):
"""run reports in background"""
if not user:
user = frappe.session.user
report = get_report_doc(report_name)
track_instance = frappe.get_doc(
{
"doctype": "Prepared Report",
"report_name": report_name,
# This looks like an insanity but, without this it'd be very hard to find Prepared Reports matching given condition
# We're ensuring that spacing is consistent. e.g. JS seems to put no spaces after ":", Python on the other hand does.
"filters": json.dumps(json.loads(filters)),
"ref_report_doctype": report_name,
"report_type": report.report_type,
"query": report.query,
"module": report.module,
}
)
track_instance.insert(ignore_permissions=True)
frappe.db.commit()
track_instance.enqueue_report()
return {
"name": track_instance.name,
"redirect_url": get_url_to_form("Prepared Report", track_instance.name),
}
@frappe.whitelist()
def get_script(report_name):
report = get_report_doc(report_name)
module = report.module or frappe.db.get_value(
"DocType", report.ref_doctype, "module"
)
module_path = get_module_path(module)
report_folder = os.path.join(module_path, "report", scrub(report.name))
script_path = os.path.join(report_folder, scrub(report.name) + ".js")
print_path = os.path.join(report_folder, scrub(report.name) + ".html")
script = None
if os.path.exists(script_path):
with open(script_path, "r") as f:
script = f.read()
html_format = get_html_format(print_path)
if not script and report.javascript:
script = report.javascript
if not script:
script = "frappe.query_reports['%s']={}" % report_name
# load translations
if frappe.lang != "en":
send_translations(frappe.get_lang_dict("report", report_name))
return {
"script": render_include(script),
"html_format": html_format,
"execution_time": frappe.cache().hget("report_execution_time", report_name)
or 0,
}
@frappe.whitelist()
@frappe.read_only()
def run(
report_name,
filters=None,
user=None,
ignore_prepared_report=False,
custom_columns=None,
):
report = get_report_doc(report_name)
if not user:
user = frappe.session.user
if not frappe.has_permission(report.ref_doctype, "report"):
frappe.msgprint(
_("Must have report permission to access this report."),
raise_exception=True,
)
result = None
if (
report.prepared_report
and not report.disable_prepared_report
and not ignore_prepared_report
and not custom_columns
):
if filters:
if isinstance(filters, string_types):
filters = json.loads(filters)
dn = filters.get("prepared_report_name")
filters.pop("prepared_report_name", None)
else:
dn = ""
result = get_prepared_report_result(report, filters, dn, user)
else:
result = generate_report_result(report, filters, user, custom_columns)
result["add_total_row"] = report.add_total_row and not result.get(
"skip_total_row", False
)
return result
def add_data_to_custom_columns(columns, result):
custom_fields_data = get_data_for_custom_report(columns)
data = []
for row in result:
row_obj = {}
if isinstance(row, tuple):
row = list(row)
if isinstance(row, list):
for idx, column in enumerate(columns):
if column.get("link_field"):
row_obj[column["fieldname"]] = None
row.insert(idx, None)
else:
row_obj[column["fieldname"]] = row[idx]
data.append(row_obj)
else:
data.append(row)
for row in data:
for column in columns:
if column.get("link_field"):
fieldname = column["fieldname"]
key = (column["doctype"], fieldname)
link_field = column["link_field"]
row[fieldname] = custom_fields_data.get(key, {}).get(
row.get(link_field)
)
return data
def reorder_data_for_custom_columns(custom_columns, columns, result):
if not result:
return []
columns = [get_column_as_dict(col) for col in columns]
if isinstance(result[0], list) or isinstance(result[0], tuple):
# If the result is a list of lists
custom_column_names = [col["label"] for col in custom_columns]
original_column_names = [col["label"] for col in columns]
return get_columns_from_list(custom_column_names, original_column_names, result)
else:
# columns do not need to be reordered if result is a list of dicts
return result
def get_columns_from_list(columns, target_columns, result):
reordered_result = []
for res in result:
r = []
for col_name in columns:
try:
idx = target_columns.index(col_name)
r.append(res[idx])
except ValueError:
pass
reordered_result.append(r)
return reordered_result
def get_prepared_report_result(report, filters, dn="", user=None):
latest_report_data = {}
doc = None
if dn:
# Get specified dn
doc = frappe.get_doc("Prepared Report", dn)
else:
# Only look for completed prepared reports with given filters.
doc_list = frappe.get_all(
"Prepared Report",
filters={
"status": "Completed",
"filters": json.dumps(filters),
"owner": user,
"report_name": report.get("custom_report") or report.get("report_name"),
},
order_by="creation desc",
)
if doc_list:
# Get latest
doc = frappe.get_doc("Prepared Report", doc_list[0])
if doc:
try:
# Prepared Report data is stored in a GZip compressed JSON file
attached_file_name = frappe.db.get_value(
"File",
{"attached_to_doctype": doc.doctype, "attached_to_name": doc.name},
"name",
)
attached_file = frappe.get_doc("File", attached_file_name)
compressed_content = attached_file.get_content()
uncompressed_content = gzip_decompress(compressed_content)
data = json.loads(uncompressed_content.decode("utf-8"))
if data:
columns = json.loads(doc.columns) if doc.columns else data[0]
for column in columns:
if isinstance(column, dict) and column.get("label"):
column["label"] = _(column["label"])
latest_report_data = {"columns": columns, "result": data}
except Exception:
frappe.log_error(frappe.get_traceback())
frappe.delete_doc("Prepared Report", doc.name)
frappe.db.commit()
doc = None
latest_report_data.update({"prepared_report": True, "doc": doc})
return latest_report_data
@frappe.whitelist()
def export_query():
"""export from query reports"""
data = frappe._dict(frappe.local.form_dict)
del data["cmd"]
if "csrf_token" in data:
del data["csrf_token"]
if isinstance(data.get("filters"), string_types):
filters = json.loads(data["filters"])
if isinstance(data.get("report_name"), string_types):
report_name = data["report_name"]
frappe.permissions.can_export(
frappe.get_cached_value("Report", report_name, "ref_doctype"),
raise_exception=True,
)
if isinstance(data.get("file_format_type"), string_types):
file_format_type = data["file_format_type"]
custom_columns = frappe.parse_json(data["custom_columns"])
include_indentation = data["include_indentation"]
if isinstance(data.get("visible_idx"), string_types):
visible_idx = json.loads(data.get("visible_idx"))
else:
visible_idx = None
if file_format_type == "Excel":
data = run(report_name, filters, custom_columns=custom_columns)
data = frappe._dict(data)
if not data.columns:
frappe.respond_as_web_page(
_("No data to export"),
_("You can try changing the filters of your report."),
)
return
columns = get_columns_dict(data.columns)
from frappe.utils.xlsxutils import make_xlsx
data["result"] = handle_duration_fieldtype_values(
data.get("result"), data.get("columns")
)
xlsx_data = build_xlsx_data(columns, data, visible_idx, include_indentation)
xlsx_file = make_xlsx(xlsx_data, "Query Report")
frappe.response["filename"] = report_name + ".xlsx"
frappe.response["filecontent"] = xlsx_file.getvalue()
frappe.response["type"] = "binary"
def handle_duration_fieldtype_values(result, columns):
for i, col in enumerate(columns):
fieldtype = None
if isinstance(col, string_types):
col = col.split(":")
if len(col) > 1:
if col[1]:
fieldtype = col[1]
if "/" in fieldtype:
fieldtype, options = fieldtype.split("/")
else:
fieldtype = "Data"
else:
fieldtype = col.get("fieldtype")
if fieldtype == "Duration":
for entry in range(0, len(result)):
val_in_seconds = result[entry][i]
if val_in_seconds:
duration_val = format_duration(val_in_seconds)
result[entry][i] = duration_val
return result
def build_xlsx_data(columns, data, visible_idx, include_indentation):
result = [[]]
# add column headings
for idx in range(len(data.columns)):
if not columns[idx].get("hidden"):
result[0].append(columns[idx]["label"])
# build table from result
for i, row in enumerate(data.result):
# only pick up rows that are visible in the report
if i in visible_idx:
row_data = []
if isinstance(row, dict) and row:
for idx in range(len(data.columns)):
# check if column is not hidden
if not columns[idx].get("hidden"):
label = columns[idx]["label"]
fieldname = columns[idx]["fieldname"]
cell_value = row.get(fieldname, row.get(label, ""))
if cint(include_indentation) and "indent" in row and idx == 0:
cell_value = (" " * cint(row["indent"])) + cell_value
row_data.append(cell_value)
else:
row_data = row
result.append(row_data)
return result
def add_total_row(result, columns, meta=None):
total_row = [""] * len(columns)
has_percent = []
for i, col in enumerate(columns):
fieldtype, options, fieldname = None, None, None
if isinstance(col, string_types):
if meta:
# get fieldtype from the meta
field = meta.get_field(col)
if field:
fieldtype = meta.get_field(col).fieldtype
fieldname = meta.get_field(col).fieldname
else:
col = col.split(":")
if len(col) > 1:
if col[1]:
fieldtype = col[1]
if "/" in fieldtype:
fieldtype, options = fieldtype.split("/")
else:
fieldtype = "Data"
else:
fieldtype = col.get("fieldtype")
fieldname = col.get("fieldname")
options = col.get("options")
for row in result:
if i >= len(row):
continue
cell = row.get(fieldname) if isinstance(row, dict) else row[i]
if fieldtype in ["Currency", "Int", "Float", "Percent", "Duration"] and flt(
cell
):
total_row[i] = flt(total_row[i]) + flt(cell)
if fieldtype == "Percent" and i not in has_percent:
has_percent.append(i)
if fieldtype == "Time" and cell:
if not total_row[i]:
total_row[i] = timedelta(hours=0, minutes=0, seconds=0)
total_row[i] = total_row[i] + cell
if fieldtype == "Link" and options == "Currency":
total_row[i] = (
result[0].get(fieldname)
if isinstance(result[0], dict)
else result[0][i]
)
for i in has_percent:
total_row[i] = flt(total_row[i]) / len(result)
first_col_fieldtype = None
if isinstance(columns[0], string_types):
first_col = columns[0].split(":")
if len(first_col) > 1:
first_col_fieldtype = first_col[1].split("/")[0]
else:
first_col_fieldtype = columns[0].get("fieldtype")
if first_col_fieldtype not in ["Currency", "Int", "Float", "Percent", "Date"]:
total_row[0] = _("Total")
result.append(total_row)
return result
@frappe.whitelist()
def get_data_for_custom_field(doctype, field):
if not frappe.has_permission(doctype, "read"):
frappe.throw(_("Not Permitted"), frappe.PermissionError)
value_map = frappe._dict(frappe.get_all(doctype, fields=["name", field], as_list=1))
return value_map
def get_data_for_custom_report(columns):
doc_field_value_map = {}
for column in columns:
if column.get("link_field"):
fieldname = column.get("fieldname")
doctype = column.get("doctype")
doc_field_value_map[(doctype, fieldname)] = get_data_for_custom_field(
doctype, fieldname
)
return doc_field_value_map
@frappe.whitelist()
def save_report(reference_report, report_name, columns):
report_doc = get_report_doc(reference_report)
docname = frappe.db.exists(
"Report",
{
"report_name": report_name,
"is_standard": "No",
"report_type": "Custom Report",
},
)
if docname:
report = frappe.get_doc("Report", docname)
report.update({"json": columns})
report.save()
frappe.msgprint(_("Report updated successfully"))
return docname
else:
new_report = frappe.get_doc(
{
"doctype": "Report",
"report_name": report_name,
"json": columns,
"ref_doctype": report_doc.ref_doctype,
"is_standard": "No",
"report_type": "Custom Report",
"reference_report": reference_report,
}
).insert(ignore_permissions=True)
frappe.msgprint(_("{0} saved successfully").format(new_report.name))
return new_report.name
def get_filtered_data(ref_doctype, columns, data, user):
result = []
linked_doctypes = get_linked_doctypes(columns, data)
match_filters_per_doctype = get_user_match_filters(linked_doctypes, user=user)
shared = frappe.share.get_shared(ref_doctype, user)
columns_dict = get_columns_dict(columns)
role_permissions = get_role_permissions(frappe.get_meta(ref_doctype), user)
if_owner = role_permissions.get("if_owner", {}).get("report")
if match_filters_per_doctype:
for row in data:
# Why linked_doctypes.get(ref_doctype)? because if column is empty, linked_doctypes[ref_doctype] is removed
if (
linked_doctypes.get(ref_doctype)
and shared
and row[linked_doctypes[ref_doctype]] in shared
):
result.append(row)
elif has_match(
row,
linked_doctypes,
match_filters_per_doctype,
ref_doctype,
if_owner,
columns_dict,
user,
):
result.append(row)
else:
result = list(data)
return result
def has_match(
row,
linked_doctypes,
doctype_match_filters,
ref_doctype,
if_owner,
columns_dict,
user,
):
"""Returns True if after evaluating permissions for each linked doctype
- There is an owner match for the ref_doctype
- `and` There is a user permission match for all linked doctypes
Returns True if the row is empty
Note:
Each doctype could have multiple conflicting user permission doctypes.
Hence even if one of the sets allows a match, it is true.
This behavior is equivalent to the trickling of user permissions of linked doctypes to the ref doctype.
"""
resultant_match = True
if not row:
# allow empty rows :)
return resultant_match
for doctype, filter_list in doctype_match_filters.items():
matched_for_doctype = False
if doctype == ref_doctype and if_owner:
idx = linked_doctypes.get("User")
if (
idx is not None
and row[idx] == user
and columns_dict[idx] == columns_dict.get("owner")
):
# owner match is true
matched_for_doctype = True
if not matched_for_doctype:
for match_filters in filter_list:
match = True
for dt, idx in linked_doctypes.items():
# case handled above
if dt == "User" and columns_dict[idx] == columns_dict.get("owner"):
continue
cell_value = None
if isinstance(row, dict):
cell_value = row.get(idx)
elif isinstance(row, (list, tuple)):
cell_value = row[idx]
if (
dt in match_filters
and cell_value not in match_filters.get(dt)
and frappe.db.exists(dt, cell_value)
):
match = False
break
# each doctype could have multiple conflicting user permission doctypes, hence using OR
# so that even if one of the sets allows a match, it is true
matched_for_doctype = matched_for_doctype or match
if matched_for_doctype:
break
# each doctype's user permissions should match the row! hence using AND
resultant_match = resultant_match and matched_for_doctype
if not resultant_match:
break
return resultant_match
def get_linked_doctypes(columns, data):
linked_doctypes = {}
columns_dict = get_columns_dict(columns)
for idx, col in enumerate(columns):
df = columns_dict[idx]
if df.get("fieldtype") == "Link":
if data and isinstance(data[0], (list, tuple)):
linked_doctypes[df["options"]] = idx
else:
# dict
linked_doctypes[df["options"]] = df["fieldname"]
# remove doctype if column is empty
columns_with_value = []
for row in data:
if row:
if len(row) != len(columns_with_value):
if isinstance(row, (list, tuple)):
row = enumerate(row)
elif isinstance(row, dict):
row = row.items()
for col, val in row:
if val and col not in columns_with_value:
columns_with_value.append(col)
items = list(iteritems(linked_doctypes))
for doctype, key in items:
if key not in columns_with_value:
del linked_doctypes[doctype]
return linked_doctypes
def get_columns_dict(columns):
"""Returns a dict with column docfield values as dict
The keys for the dict are both idx and fieldname,
so either index or fieldname can be used to search for a column's docfield properties
"""
columns_dict = frappe._dict()
for idx, col in enumerate(columns):
col_dict = get_column_as_dict(col)
columns_dict[idx] = col_dict
columns_dict[col_dict["fieldname"]] = col_dict
return columns_dict
def get_column_as_dict(col):
col_dict = frappe._dict()
# string
if isinstance(col, string_types):
col = col.split(":")
if len(col) > 1:
if "/" in col[1]:
col_dict["fieldtype"], col_dict["options"] = col[1].split("/")
else:
col_dict["fieldtype"] = col[1]
col_dict["label"] = col[0]
col_dict["fieldname"] = frappe.scrub(col[0])
# dict
else:
col_dict.update(col)
if "fieldname" not in col_dict:
col_dict["fieldname"] = frappe.scrub(col_dict["label"])
return col_dict
def get_user_match_filters(doctypes, user):
match_filters = {}
for dt in doctypes:
filter_list = frappe.desk.reportview.build_match_conditions(dt, user, False)
if filter_list:
match_filters[dt] = filter_list
return match_filters
|
|
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-task task sampling schedules."""
import lingvo.compat as tf
from lingvo.core import base_layer
from lingvo.core import early_stop
import numpy as np
class TaskScheduler(base_layer.BaseLayer):
"""Generic multi-task scheduler.
Subclasses should override the `Sample` method to return a task string given
a step. All of the task strings as well as additional hyperparameters needed
by `Sample` should be exposed and stored in the params. `Sample` should also
update `cur_probs`.
"""
@classmethod
def Params(cls):
"""Parameters for this task scheduler."""
p = super().Params()
p.name = 'task_scheduler'
return p
def __init__(self, params):
super().__init__(params)
self.cur_probs = None
def Sample(self, current_step):
raise NotImplementedError('Abstract method')
class AdaptiveScheduler(TaskScheduler):
"""Tasks with low scores will be sampled more often.
Scores are expected to be non-negative. Larger scores are better."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('tasks', [], 'List of tasks')
p.Define('expected', [], 'List of final expected scores')
p.Define('mh_a', early_stop.MetricHistory.Params(), '')
p.Define('mh_b', early_stop.MetricHistory.Params(), '')
p.Define(
'epsilon', 0.05, 'Regularizarion term. A large epsilon will lead'
'to a more uniform task distribution.')
p.Define('alpha', 1.0, 'Normalized task scores are raised to this power.')
return p
def __init__(self, params):
super().__init__(params)
if len(self.params.tasks) != 2 or len(self.params.expected) != 2:
raise ValueError('Only two tasks are supported by this scheduler.')
if self.params.epsilon < 0:
raise ValueError('Epsilon should be positive.')
self.tasks = self.params.tasks
self.last_scores = [0.0] * 2
self._metric_histories = [
early_stop.MetricHistory(self.params.mh_a),
early_stop.MetricHistory(self.params.mh_b)
]
def getMetricHistories(self):
# If too slow, consider another implementation.
# TODO(sebjean) Time file reading and change behaviour if too long.
for index, mh in enumerate(self._metric_histories):
try:
with tf.io.gfile.GFile(mh.hist_file) as f:
lines = f.readlines()
except tf.errors.NotFoundError:
tf.logging.warning('File not found. '
'Expected at start of training only.')
score, lines = 0.0, []
if lines:
try:
score = lines[-1].split()[-1]
except IndexError:
tf.logging.warning(
'IndexError. Your history file may be corrupted.')
score = 0.0
self.last_scores[index] = float(score)
class SimpleAdaptiveScheduler(AdaptiveScheduler):
"""Simple adaptive scheduler.
A task with a normalized score of `s` is approximately weighted as `1 - s`.
"""
def Sample(self, current_step):
"""Sample a task.
The unnormalized probability of a task if given by
1 + epsilon - min(1, score / expected)**alpha.
Args:
current_step: Unused.
Returns:
str, the name of the sampled task.
"""
del current_step # Unused
self.getMetricHistories()
alpha, eps = self.params.alpha, self.params.epsilon
probs = [
1 + eps - min(1, score / self.params.expected[index])**alpha
for index, score in enumerate(self.last_scores)
]
probs = tuple(probs / np.sum(probs))
sampled_task = np.random.choice(self.params.tasks, p=probs)
self.cur_probs = probs
return sampled_task
class InverseRatioAdaptiveScheduler(AdaptiveScheduler):
"""Inverse ratio adaptive scheduler.
Tasks are approximately weighed as the inverse of their normalized scores.
"""
def Sample(self, current_step):
"""Sample a task.
The unnormalized probability of a task if given by
1 / (min(1, score / expected)**alpha + epsilon)
Args:
current_step: Unused.
Returns:
str, the name of the sampled task.
"""
del current_step # Unused
self.getMetricHistories()
alpha, eps = self.params.alpha, self.params.epsilon
probs = [
1.0 / (min(1, score / self.params.expected[index])**alpha + eps)
for index, score in enumerate(self.last_scores)
]
probs = tuple(probs / np.sum(probs))
sampled_task = np.random.choice(self.params.tasks, p=probs)
self.cur_probs = probs
return sampled_task
class ShiftedExponentialScheduler(TaskScheduler):
"""The unnormalized score of each task follows a shifted exponential function.
Generalizes the constant, exponential and sigmoid
schedules described in "Scheduled Multi-Task Learning: From Syntax to
Translation" (Kiperwasser and Ballesteros).
https://arxiv.org/pdf/1804.08915.pdf
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'alpha', 0, 'Controls the rate at which the schedule changes. '
'A large alpha will lead to fast convergence toward final values.')
p.Define(
'task_probs', [], 'List of 2-tuples (task, prob). For non-constant'
'schedulers, prob is a tuple of the form (init_prob, final_prob).')
return p
def __init__(self, params):
super().__init__(params)
assert isinstance(self.params.task_probs, list)
self.tasks = []
self._descriptors = []
def Sample(self, current_step):
"""Sample a task.
Given an input [a, b] and a rate `alpha`, the unnormalized
score of eack task is a + b * exp(-alpha * t).
Args:
current_step: int. Current time step.
Returns:
str, the name of the sampled task.
"""
probs = [
a + b * np.exp(-self.params.alpha * current_step)
for a, b in self._descriptors
]
probs = tuple(probs / np.sum(probs))
sampled_task = np.random.choice(self.tasks, p=probs)
self.cur_probs = probs
return sampled_task
class ConstantScheduler(ShiftedExponentialScheduler):
"""Constant schedule. Tasks are sampled from a fixed probability distribution.
"""
def __init__(self, params):
super().__init__(params)
for key, value in self.params.task_probs:
self.tasks.append(key)
self._descriptors.append((value, 0))
class ExponentialScheduler(ShiftedExponentialScheduler):
"""Exponential schedule.
For a task with initial and final probabilities p_0 and p_1 respectively,
its unnormalized score is given by
`p_1 + (p_0 - p_1) * exp(-alpha * current_step)`.
"""
def __init__(self, params):
super().__init__(params)
for key, value in self.params.task_probs:
self.tasks.append(key)
self._descriptors.append((value[1], value[0] - value[1]))
class SigmoidScheduler(ShiftedExponentialScheduler):
"""Sigmoid schedule.
For a task with initial and final probabilities p_0 and p_1 respectively,
its unnormalized score is given by
`p_1 + (2 * p_0 - p_1) * exp(-alpha * current_step)`.
"""
def __init__(self, params):
super().__init__(params)
for key, value in self.params.task_probs:
self.tasks.append(key)
self._descriptors.append((value[1], 2 * value[0] - value[1]))
class RoundRobinScheduler(TaskScheduler):
"""Deterministic sequential schedule."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('tasks', [], 'List of task names. No repetitions allowed.')
return p
def __init__(self, params):
super().__init__(params)
assert isinstance(self.params.tasks, list)
self.tasks = self.params.tasks
self.n_tasks = len(self.tasks)
self.cur_probs = [1. / self.n_tasks] * self.n_tasks # For summary
self.next_task_idx = 0
def Sample(self, current_step):
"""Sample a task."""
sampled_task = self.tasks[self.next_task_idx]
self.next_task_idx = (self.next_task_idx + 1) % self.n_tasks
return sampled_task
class SequentialScheduler(TaskScheduler):
"""Deterministic schedule that stays a fixed number of steps on each task."""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'task_steps', [], 'List of tuples of (task_name, steps_for_task). Goes '
'through list sequentially in the specified order, staying '
'steps_for_task steps on task_name. On completing the schedule, '
'remains on the final task for the rest of the time. Assumes '
'p.task_global_step is False.')
return p
def __init__(self, params):
super().__init__(params)
assert isinstance(self.params.task_steps, list)
assert self.params.task_steps
self.task_steps = []
for (name, steps) in self.params.task_steps:
assert steps > 0
if self.task_steps:
self.task_steps.append((name, steps + self.task_steps[-1][1]))
else:
self.task_steps.append((name, steps))
self.n_tasks = len(self.task_steps)
self.task_idx = 0
self.cur_probs = [1] + [0] * (self.n_tasks - 1) # For summary
def Sample(self, current_step):
"""Sample a task."""
sampled_task, to_step = self.task_steps[self.task_idx]
if current_step >= to_step and self.task_idx < self.n_tasks - 1:
self.task_idx += 1
sampled_task = self.task_steps[self.task_idx][0]
self.cur_probs[self.task_idx - 1] = 0
self.cur_probs[self.task_idx] = 1
return sampled_task
class PieceWiseScheduler(TaskScheduler):
"""Piecewise scheduler using different scheduling strategies."""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'schedule_steps', [], 'List of tuples of (schedule_class_params, '
'number of steps to use this schedule class)')
return p
def __init__(self, params):
super().__init__(params)
assert isinstance(self.params.schedule_steps, list)
self.schedule_steps = []
self.schedule_params = []
for (cls_params, steps) in self.params.schedule_steps:
if self.schedule_steps:
self.schedule_steps.append(steps + self.schedule_steps[-1])
else:
self.schedule_steps.append(steps)
self.schedule_params.append(cls_params)
self.CreateChildren('schedules', self.schedule_params)
self.n_schedules = len(self.schedule_steps)
self.schedule_idx = 0
self.task_step_offset = 0
self.cur_probs = self.schedules[0].cur_probs
def Sample(self, current_step):
"""Sample a task."""
to_step = self.schedule_steps[self.schedule_idx]
if current_step >= to_step and self.schedule_idx < self.n_schedules - 1:
self.task_step_offset = to_step
self.schedule_idx += 1
cur_schedule = self.schedules[self.schedule_idx]
sampled_task = cur_schedule.Sample(current_step - self.task_step_offset)
self.cur_probs = cur_schedule.cur_probs
return sampled_task
|
|
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
def test_compute_class_weight_dict():
classes = np.arange(3)
class_weights = {0: 1.0, 1: 2.0, 2: 3.0}
y = np.asarray([0, 0, 1, 2])
cw = compute_class_weight(class_weights, classes, y)
# When the user specifies class weights, compute_class_weights should just
# return them.
assert_array_almost_equal(np.asarray([1.0, 2.0, 3.0]), cw)
# When a class weight is specified that isn't in classes, a ValueError
# should get raised
msg = 'Class label 4 not present.'
class_weights = {0: 1.0, 1: 2.0, 2: 3.0, 4: 1.5}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
msg = 'Class label -1 not present.'
class_weights = {-1: 5.0, 0: 1.0, 1: 2.0, 2: 3.0}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# duplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This sphinx extension adds two directives for summarizing the public
members of a module or package.
These directives are primarily for use with the `automodapi`_ extension,
but can be used independently.
.. _automodsumm:
=======================
automodsumm directive
=======================
This directive will produce an "autosummary"-style table for public
attributes of a specified module. See the `sphinx.ext.autosummary`_ extension
for details on this process. The main difference from the `autosummary`_
directive is that `autosummary`_ requires manually inputting all attributes
that appear in the table, while this captures the entries automatically.
This directive requires a single argument that must be a module or
package.
It also accepts any options supported by the `autosummary`_ directive-
see `sphinx.ext.autosummary`_ for details. It also accepts two additional
options:
* ``:classes-only:``
If present, the autosummary table will only contain entries for
classes. This cannot be used at the same time with
``:functions-only:`` .
* ``:functions-only:``
If present, the autosummary table will only contain entries for
functions. This cannot be used at the same time with
``:classes-only:`` .
* ``:skip: obj1, [obj2, obj3, ...]``
If present, specifies that the listed objects should be skipped
and not have their documentation generated, nor be included in
the summary table.
* ``:allowed-package-names: pkgormod1, [pkgormod2, pkgormod3, ...]``
Specifies the packages that functions/classes documented here are
allowed to be from, as comma-separated list of package names. If not
given, only objects that are actually in a subpackage of the package
currently being documented are included.
This extension also adds one sphinx configuration option:
* ``automodsumm_writereprocessed``
Should be a bool, and if True, will cause `automodsumm`_ to write files
with any ``automodsumm`` sections replaced with the content Sphinx
processes after ``automodsumm`` has run. The output files are not
actually used by sphinx, so this option is only for figuring out the
cause of sphinx warnings or other debugging. Defaults to `False`.
.. _sphinx.ext.autosummary: http://sphinx-doc.org/latest/ext/autosummary.html
.. _autosummary: http://sphinx-doc.org/latest/ext/autosummary.html#directive-autosummary
.. _automod-diagram:
===========================
automod-diagram directive
===========================
This directive will produce an inheritance diagram like that of the
`sphinx.ext.inheritance_diagram`_ extension.
This directive requires a single argument that must be a module or
package. It accepts no options.
.. note::
Like 'inheritance-diagram', 'automod-diagram' requires
`graphviz <http://www.graphviz.org/>`_ to generate the inheritance diagram.
.. _sphinx.ext.inheritance_diagram: http://sphinx-doc.org/latest/ext/inheritance.html
"""
import inspect
import os
import re
from distutils.version import LooseVersion
import sphinx
from sphinx.ext.autosummary import Autosummary
from sphinx.ext.inheritance_diagram import InheritanceDiagram
from docutils.parsers.rst.directives import flag
from .utils import find_mod_objs
from .astropyautosummary import AstropyAutosummary
# Don't use AstropyAutosummary with newer versions of Sphinx
# See https://github.com/astropy/astropy-helpers/pull/129
if LooseVersion(sphinx.__version__) < LooseVersion('1.2.0'):
BaseAutosummary = AstropyAutosummary
else:
BaseAutosummary = Autosummary
def _str_list_converter(argument):
"""
A directive option conversion function that converts the option into a list
of strings. Used for 'skip' option.
"""
if argument is None:
return []
else:
return [s.strip() for s in argument.split(',')]
class Automodsumm(BaseAutosummary):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
has_content = False
option_spec = dict(Autosummary.option_spec)
option_spec['functions-only'] = flag
option_spec['classes-only'] = flag
option_spec['skip'] = _str_list_converter
option_spec['allowed-package-names'] = _str_list_converter
def run(self):
env = self.state.document.settings.env
modname = self.arguments[0]
self.warnings = []
nodelist = []
try:
localnames, fqns, objs = find_mod_objs(modname)
except ImportError:
self.warnings = []
self.warn("Couldn't import module " + modname)
return self.warnings
try:
# set self.content to trick the Autosummary internals.
# Be sure to respect functions-only and classes-only.
funconly = 'functions-only' in self.options
clsonly = 'classes-only' in self.options
skipnames = []
if 'skip' in self.options:
option_skipnames = set(self.options['skip'])
for lnm in localnames:
if lnm in option_skipnames:
option_skipnames.remove(lnm)
skipnames.append(lnm)
if len(option_skipnames) > 0:
self.warn('Tried to skip objects {objs} in module {mod}, '
'but they were not present. Ignoring.'.format(
objs=option_skipnames, mod=modname))
if funconly and not clsonly:
cont = []
for nm, obj in zip(localnames, objs):
if nm not in skipnames and inspect.isroutine(obj):
cont.append(nm)
elif clsonly:
cont = []
for nm, obj in zip(localnames, objs):
if nm not in skipnames and inspect.isclass(obj):
cont.append(nm)
else:
if clsonly and funconly:
self.warning('functions-only and classes-only both '
'defined. Skipping.')
cont = [nm for nm in localnames if nm not in skipnames]
self.content = cont
# for some reason, even though ``currentmodule`` is substituted in,
# sphinx doesn't necessarily recognize this fact. So we just force
# it internally, and that seems to fix things
env.temp_data['py:module'] = modname
# can't use super because Sphinx/docutils has trouble return
# super(Autosummary,self).run()
nodelist.extend(Autosummary.run(self))
return self.warnings + nodelist
finally: # has_content = False for the Automodsumm
self.content = []
def get_items(self, names):
self.genopt['imported-members'] = True
return Autosummary.get_items(self, names)
#<-------------------automod-diagram stuff------------------------------------>
class Automoddiagram(InheritanceDiagram):
option_spec = dict(InheritanceDiagram.option_spec)
option_spec['allowed-package-names'] = _str_list_converter
def run(self):
try:
ols = self.options.get('allowed-package-names', [])
ols = True if len(ols) == 0 else ols # if none are given, assume only local
nms, objs = find_mod_objs(self.arguments[0], onlylocals=ols)[1:]
except ImportError:
self.warnings = []
self.warn("Couldn't import module " + self.arguments[0])
return self.warnings
clsnms = []
for n, o in zip(nms, objs):
if inspect.isclass(o):
clsnms.append(n)
oldargs = self.arguments
try:
if len(clsnms) > 0:
self.arguments = [' '.join(clsnms)]
return InheritanceDiagram.run(self)
finally:
self.arguments = oldargs
#<---------------------automodsumm generation stuff--------------------------->
def process_automodsumm_generation(app):
env = app.builder.env
filestosearch = []
for docname in env.found_docs:
filename = env.doc2path(docname)
if os.path.isfile(filename):
filestosearch.append(docname + os.path.splitext(filename)[1])
liness = []
for sfn in filestosearch:
lines = automodsumm_to_autosummary_lines(sfn, app)
liness.append(lines)
if app.config.automodsumm_writereprocessed:
if lines: # empty list means no automodsumm entry is in the file
outfn = os.path.join(app.srcdir, sfn) + '.automodsumm'
with open(outfn, 'w') as f:
for l in lines:
f.write(l)
f.write('\n')
for sfn, lines in zip(filestosearch, liness):
suffix = os.path.splitext(sfn)[1]
if len(lines) > 0:
generate_automodsumm_docs(lines, sfn, builder=app.builder,
warn=app.warn, info=app.info,
suffix=suffix,
base_path=app.srcdir)
#_automodsummrex = re.compile(r'^(\s*)\.\. automodsumm::\s*([A-Za-z0-9_.]+)\s*'
# r'\n\1(\s*)(\S|$)', re.MULTILINE)
_lineendrex = r'(?:\n|$)'
_hdrex = r'^\n?(\s*)\.\. automodsumm::\s*(\S+)\s*' + _lineendrex
_oprex1 = r'(?:\1(\s+)\S.*' + _lineendrex + ')'
_oprex2 = r'(?:\1\4\S.*' + _lineendrex + ')'
_automodsummrex = re.compile(_hdrex + '(' + _oprex1 + '?' + _oprex2 + '*)',
re.MULTILINE)
def automodsumm_to_autosummary_lines(fn, app):
"""
Generates lines from a file with an "automodsumm" entry suitable for
feeding into "autosummary".
Searches the provided file for `automodsumm` directives and returns
a list of lines specifying the `autosummary` commands for the modules
requested. This does *not* return the whole file contents - just an
autosummary section in place of any :automodsumm: entries. Note that
any options given for `automodsumm` are also included in the
generated `autosummary` section.
Parameters
----------
fn : str
The name of the file to search for `automodsumm` entries.
app : sphinx.application.Application
The sphinx Application object
Return
------
lines : list of str
Lines for all `automodsumm` entries with the entries replaced by
`autosummary` and the module's members added.
"""
fullfn = os.path.join(app.builder.env.srcdir, fn)
with open(fullfn) as fr:
if 'astropy_helpers.sphinx.ext.automodapi' in app._extensions:
from astropy_helpers.sphinx.ext.automodapi import automodapi_replace
# Must do the automodapi on the source to get the automodsumm
# that might be in there
docname = os.path.splitext(fn)[0]
filestr = automodapi_replace(fr.read(), app, True, docname, False)
else:
filestr = fr.read()
spl = _automodsummrex.split(filestr)
#0th entry is the stuff before the first automodsumm line
indent1s = spl[1::5]
mods = spl[2::5]
opssecs = spl[3::5]
indent2s = spl[4::5]
remainders = spl[5::5]
# only grab automodsumm sections and convert them to autosummary with the
# entries for all the public objects
newlines = []
#loop over all automodsumms in this document
for i, (i1, i2, modnm, ops, rem) in enumerate(zip(indent1s, indent2s, mods,
opssecs, remainders)):
allindent = i1 + ('' if i2 is None else i2)
#filter out functions-only and classes-only options if present
oplines = ops.split('\n')
toskip = []
allowedpkgnms = []
funcsonly = clssonly = False
for i, ln in reversed(list(enumerate(oplines))):
if ':functions-only:' in ln:
funcsonly = True
del oplines[i]
if ':classes-only:' in ln:
clssonly = True
del oplines[i]
if ':skip:' in ln:
toskip.extend(_str_list_converter(ln.replace(':skip:', '')))
del oplines[i]
if ':allowed-package-names:' in ln:
allowedpkgnms.extend(_str_list_converter(ln.replace(':allowed-package-names:', '')))
del oplines[i]
if funcsonly and clssonly:
msg = ('Defined both functions-only and classes-only options. '
'Skipping this directive.')
lnnum = sum([spl[j].count('\n') for j in range(i * 5 + 1)])
app.warn('[automodsumm]' + msg, (fn, lnnum))
continue
# Use the currentmodule directive so we can just put the local names
# in the autosummary table. Note that this doesn't always seem to
# actually "take" in Sphinx's eyes, so in `Automodsumm.run`, we have to
# force it internally, as well.
newlines.extend([i1 + '.. currentmodule:: ' + modnm,
'',
'.. autosummary::'])
newlines.extend(oplines)
ols = True if len(allowedpkgnms) == 0 else allowedpkgnms
for nm, fqn, obj in zip(*find_mod_objs(modnm, onlylocals=ols)):
if nm in toskip:
continue
if funcsonly and not inspect.isroutine(obj):
continue
if clssonly and not inspect.isclass(obj):
continue
newlines.append(allindent + nm)
# add one newline at the end of the autosummary block
newlines.append('')
return newlines
def generate_automodsumm_docs(lines, srcfn, suffix='.rst', warn=None,
info=None, base_path=None, builder=None,
template_dir=None):
"""
This function is adapted from
`sphinx.ext.autosummary.generate.generate_autosummmary_docs` to
generate source for the automodsumm directives that should be
autosummarized. Unlike generate_autosummary_docs, this function is
called one file at a time.
"""
from sphinx.jinja2glue import BuiltinTemplateLoader
from sphinx.ext.autosummary import import_by_name, get_documenter
from sphinx.ext.autosummary.generate import (find_autosummary_in_lines,
_simple_info, _simple_warn)
from sphinx.util.osutil import ensuredir
from sphinx.util.inspect import safe_getattr
from jinja2 import FileSystemLoader, TemplateNotFound
from jinja2.sandbox import SandboxedEnvironment
if info is None:
info = _simple_info
if warn is None:
warn = _simple_warn
#info('[automodsumm] generating automodsumm for: ' + srcfn)
# Create our own templating environment - here we use Astropy's
# templates rather than the default autosummary templates, in order to
# allow docstrings to be shown for methods.
template_dirs = [os.path.join(os.path.dirname(__file__), 'templates'),
os.path.join(base_path, '_templates')]
if builder is not None:
# allow the user to override the templates
template_loader = BuiltinTemplateLoader()
template_loader.init(builder, dirs=template_dirs)
else:
if template_dir:
template_dirs.insert(0, template_dir)
template_loader = FileSystemLoader(template_dirs)
template_env = SandboxedEnvironment(loader=template_loader)
# read
#items = find_autosummary_in_files(sources)
items = find_autosummary_in_lines(lines, filename=srcfn)
if len(items) > 0:
msg = '[automodsumm] {1}: found {0} automodsumm entries to generate'
info(msg.format(len(items), srcfn))
# gennms = [item[0] for item in items]
# if len(gennms) > 20:
# gennms = gennms[:10] + ['...'] + gennms[-10:]
# info('[automodsumm] generating autosummary for: ' + ', '.join(gennms))
# remove possible duplicates
items = dict([(item, True) for item in items]).keys()
# keep track of new files
new_files = []
# write
for name, path, template_name in sorted(items):
if path is None:
# The corresponding autosummary:: directive did not have
# a :toctree: option
continue
path = os.path.abspath(path)
ensuredir(path)
try:
import_by_name_values = import_by_name(name)
except ImportError as e:
warn('[automodsumm] failed to import %r: %s' % (name, e))
continue
# if block to accommodate Sphinx's v1.2.2 and v1.2.3 respectively
if len(import_by_name_values) == 3:
name, obj, parent = import_by_name_values
elif len(import_by_name_values) == 4:
name, obj, parent, module_name = import_by_name_values
fn = os.path.join(path, name + suffix)
# skip it if it exists
if os.path.isfile(fn):
continue
new_files.append(fn)
f = open(fn, 'w')
try:
doc = get_documenter(obj, parent)
if template_name is not None:
template = template_env.get_template(template_name)
else:
tmplstr = 'autosummary/%s.rst'
try:
template = template_env.get_template(tmplstr % doc.objtype)
except TemplateNotFound:
template = template_env.get_template(tmplstr % 'base')
def get_members_mod(obj, typ, include_public=[]):
"""
typ = None -> all
"""
items = []
for name in dir(obj):
try:
documenter = get_documenter(safe_getattr(obj, name),
obj)
except AttributeError:
continue
if typ is None or documenter.objtype == typ:
items.append(name)
public = [x for x in items
if x in include_public or not x.startswith('_')]
return public, items
def get_members_class(obj, typ, include_public=[],
include_base=False):
"""
typ = None -> all
include_base -> include attrs that are from a base class
"""
items = []
# using dir gets all of the attributes, including the elements
# from the base class, otherwise use __slots__ or __dict__
if include_base:
names = dir(obj)
else:
if hasattr(obj, '__slots__'):
names = tuple(getattr(obj, '__slots__'))
else:
names = getattr(obj, '__dict__').keys()
for name in names:
try:
documenter = get_documenter(safe_getattr(obj, name),
obj)
except AttributeError:
continue
if typ is None or documenter.objtype == typ:
items.append(name)
public = [x for x in items
if x in include_public or not x.startswith('_')]
return public, items
ns = {}
if doc.objtype == 'module':
ns['members'] = get_members_mod(obj, None)
ns['functions'], ns['all_functions'] = \
get_members_mod(obj, 'function')
ns['classes'], ns['all_classes'] = \
get_members_mod(obj, 'class')
ns['exceptions'], ns['all_exceptions'] = \
get_members_mod(obj, 'exception')
elif doc.objtype == 'class':
api_class_methods = ['__init__', '__call__']
ns['members'] = get_members_class(obj, None)
ns['methods'], ns['all_methods'] = \
get_members_class(obj, 'method', api_class_methods)
ns['attributes'], ns['all_attributes'] = \
get_members_class(obj, 'attribute')
ns['methods'].sort()
ns['attributes'].sort()
parts = name.split('.')
if doc.objtype in ('method', 'attribute'):
mod_name = '.'.join(parts[:-2])
cls_name = parts[-2]
obj_name = '.'.join(parts[-2:])
ns['class'] = cls_name
else:
mod_name, obj_name = '.'.join(parts[:-1]), parts[-1]
ns['fullname'] = name
ns['module'] = mod_name
ns['objname'] = obj_name
ns['name'] = parts[-1]
ns['objtype'] = doc.objtype
ns['underline'] = len(name) * '='
# We now check whether a file for reference footnotes exists for
# the module being documented. We first check if the
# current module is a file or a directory, as this will give a
# different path for the reference file. For example, if
# documenting astropy.wcs then the reference file is at
# ../wcs/references.txt, while if we are documenting
# astropy.config.logging_helper (which is at
# astropy/config/logging_helper.py) then the reference file is set
# to ../config/references.txt
if '.' in mod_name:
mod_name_dir = mod_name.replace('.', '/').split('/', 1)[1]
else:
mod_name_dir = mod_name
if not os.path.isdir(os.path.join(base_path, mod_name_dir)) \
and os.path.isdir(os.path.join(base_path, mod_name_dir.rsplit('/', 1)[0])):
mod_name_dir = mod_name_dir.rsplit('/', 1)[0]
# We then have to check whether it exists, and if so, we pass it
# to the template.
if os.path.exists(os.path.join(base_path, mod_name_dir, 'references.txt')):
# An important subtlety here is that the path we pass in has
# to be relative to the file being generated, so we have to
# figure out the right number of '..'s
ndirsback = path.replace(base_path, '').count('/')
ref_file_rel_segments = ['..'] * ndirsback
ref_file_rel_segments.append(mod_name_dir)
ref_file_rel_segments.append('references.txt')
ns['referencefile'] = os.path.join(*ref_file_rel_segments)
rendered = template.render(**ns)
f.write(rendered)
finally:
f.close()
def setup(app):
# need our autosummary
app.setup_extension('astropy_helpers.sphinx.ext.astropyautosummary')
# need inheritance-diagram for automod-diagram
app.setup_extension('sphinx.ext.inheritance_diagram')
app.add_directive('automod-diagram', Automoddiagram)
app.add_directive('automodsumm', Automodsumm)
app.connect('builder-inited', process_automodsumm_generation)
app.add_config_value('automodsumm_writereprocessed', False, True)
|
|
# coding: utf-8
"""
Custom luigi base task definitions.
"""
__all__ = ["Task", "WrapperTask", "ExternalTask"]
import sys
import socket
import time
import logging
from collections import OrderedDict
from contextlib import contextmanager
from abc import ABCMeta, abstractmethod
from inspect import getargspec
import luigi
import six
from law.config import Config
from law.parameter import NO_STR, CSVParameter
from law.target.file import localize_file_targets
from law.parser import root_task, global_cmdline_values
from law.logger import setup_logger
from law.util import (
no_value, abort, law_run, common_task_params, colored, uncolored, make_list, multi_match,
flatten, BaseStream, human_duration, patch_object, round_discrete,
)
from law.logger import get_logger
logger = get_logger(__name__)
class BaseRegister(luigi.task_register.Register):
def __new__(metacls, classname, bases, classdict):
# default attributes, irrespective of inheritance
classdict.setdefault("exclude_index", False)
# unite "exclude_params_*" sets with those of all base classes
for base in bases:
for attr, base_params in vars(base).items():
if attr.startswith("exclude_params_") and isinstance(base_params, set):
params = classdict.setdefault(attr, set())
if isinstance(params, set):
params.update(base_params)
# remove those parameter names from "exclude_params_*" sets which are explicitly
# listed in corresponding "include_params_*" sets defined on the class itself
for attr, include_params in classdict.items():
if attr.startswith("include_params_") and isinstance(include_params, set):
exclude_attr = "exclude" + attr[len("include"):]
if exclude_attr in classdict and isinstance(classdict[exclude_attr], set):
classdict[exclude_attr] -= include_params
# create the class
cls = ABCMeta.__new__(metacls, classname, bases, classdict)
# default attributes, apart from inheritance
if getattr(cls, "update_register", None) is None:
cls.update_register = False
# deregister when requested
if cls.update_register:
cls.deregister()
# add to register (mimic luigi.task_register.Register.__new__)
cls._namespace_at_class_time = metacls._get_namespace(cls.__module__)
metacls._reg.append(cls)
return cls
class BaseTask(six.with_metaclass(BaseRegister, luigi.Task)):
exclude_index = True
exclude_params_index = set()
exclude_params_req = set()
exclude_params_req_set = set()
exclude_params_req_get = set()
prefer_params_cli = set()
@staticmethod
def resource_name(name, host=None):
if host is None:
host = socket.gethostname().partition(".")[0]
return "{}_{}".format(host, name)
@classmethod
def deregister(cls, task_cls=None):
"""
Removes a task class *task_cls* from the luigi task register. When *None*, *this* class is
used. Task family strings and patterns are accepted as well. *True* is returned when at
least one class was successfully removed, and *False* otherwise.
"""
# always compare task families
if task_cls is None:
task_family = cls.get_task_family()
elif isinstance(task_cls, six.string_types):
task_family = task_cls
else:
task_family = task_cls.get_task_family()
success = False
# remove from the register
i = -1
while True:
i += 1
if i >= len(Register._reg):
break
registered_cls = Register._reg[i]
if multi_match(registered_cls.get_task_family(), task_family, mode=any):
Register._reg.pop(i)
i -= 1
success = True
logger.debug("removed task class {} from register".format(registered_cls))
return success
@classmethod
def get_param_values(cls, params, args, kwargs):
# try to modify the values before values are assigned
if callable(cls.modify_param_args):
params, args, kwargs = cls.modify_param_args(params, args, kwargs)
# assign to actual parameters
values = super(BaseTask, cls).get_param_values(params, args, kwargs)
# try to modify the values afterwards
if callable(cls.modify_param_values):
values = list(cls.modify_param_values(OrderedDict(values)).items())
return values
# method that can be implemented to update parameter objects, args and kwargs before before task
# instantiation and before parameter values are assined in the super get_param_values
# example:
# @classmethod
# def modify_param_args(cls, params, args, kwargs):
# return params, args, kwargs
modify_param_args = None
# method that can be implemented to update parameters via get_param_values before instantiation
# example:
# @classmethod
# def modify_param_values(cls, values):
# values["some_name"] = "some_value"
# return values
modify_param_values = None
@classmethod
def req(cls, *args, **kwargs):
return cls(**cls.req_params(*args, **kwargs))
@classmethod
def req_params(cls, inst, _exclude=None, _prefer_cli=None, _skip_task_excludes=False,
_skip_task_excludes_get=None, _skip_task_excludes_set=None, **kwargs):
# common/intersection params
params = common_task_params(inst, cls)
# determine parameters to exclude
_exclude = set() if _exclude is None else set(make_list(_exclude))
# also use this class' req and req_get sets
# and the req and req_set sets of the instance's class
# unless explicitly skipped
if _skip_task_excludes_get is None:
_skip_task_excludes_get = _skip_task_excludes
if not _skip_task_excludes_get:
_exclude.update(cls.exclude_params_req, cls.exclude_params_req_get)
if _skip_task_excludes_set is None:
_skip_task_excludes_set = _skip_task_excludes
if not _skip_task_excludes_set:
_exclude.update(inst.exclude_params_req, inst.exclude_params_req_set)
# remove excluded parameters
for name in list(params.keys()):
if multi_match(name, _exclude, any):
del params[name]
# add kwargs
params.update(kwargs)
# remove params that are preferably set via cli class arguments
prefer_cli = set(cls.prefer_params_cli or ()) if _prefer_cli is None else set(_prefer_cli)
if prefer_cli:
cls_args = []
prefix = cls.get_task_family() + "_"
if luigi.cmdline_parser.CmdlineParser.get_instance():
for key in global_cmdline_values().keys():
if key.startswith(prefix):
cls_args.append(key[len(prefix):])
for name in make_list(prefer_cli):
if name in params and name in cls_args:
del params[name]
return params
def __init__(self, *args, **kwargs):
super(BaseTask, self).__init__(*args, **kwargs)
# task level logger, created lazily
self._task_logger = None
def complete(self):
outputs = [t for t in flatten(self.output()) if not t.optional]
if len(outputs) == 0:
logger.warning("task {!r} has either no non-optional outputs or no custom complete() "
"method".format(self))
return False
return all(t.exists() for t in outputs)
@abstractmethod
def run(self):
return
def get_logger_name(self):
return self.task_id
def _create_logger(self, name, level=None):
return setup_logger(name, level=level)
@property
def logger(self):
if not self._task_logger:
name = self.get_logger_name()
existing = name in logging.root.manager.loggerDict
self._task_logger = logging.getLogger(name) if existing else self._create_logger(name)
return self._task_logger
@property
def live_task_id(self):
"""
The task id depends on the task family and parameters, and is generated by luigi once in the
constructor. As the latter may change, this property returns to the id with the current set
of parameters.
"""
# create a temporary dictionary of param_kwargs that is patched for the duration of the
# call to create the string representation of the parameters
param_kwargs = {attr: getattr(self, attr) for attr in self.param_kwargs}
# only_public was introduced in 2.8.0, so check if that arg exists
str_params_kwargs = {"only_significant": True}
if "only_public" in getargspec(self.to_str_params).args:
str_params_kwargs["only_public"] = True
with patch_object(self, "param_kwargs", param_kwargs):
str_params = self.to_str_params(**str_params_kwargs)
# create the task id
task_id = luigi.task.task_id_str(self.get_task_family(), str_params)
return task_id
def walk_deps(self, max_depth=-1, order="level"):
# see https://en.wikipedia.org/wiki/Tree_traversal
if order not in ("level", "pre"):
raise ValueError("unknown traversal order '{}', use 'level' or 'pre'".format(order))
tasks = [(self, 0)]
while len(tasks):
task, depth = tasks.pop(0)
if max_depth >= 0 and depth > max_depth:
continue
deps = flatten(task.requires())
yield (task, deps, depth)
deps = ((d, depth + 1) for d in deps)
if order == "level":
tasks[len(tasks):] = deps
elif order == "pre":
tasks[:0] = deps
def cli_args(self, exclude=None, replace=None):
exclude = set() if exclude is None else set(make_list(exclude))
if replace is None:
replace = {}
args = OrderedDict()
for name, param in self.get_params():
if multi_match(name, exclude, any):
continue
raw = replace.get(name, getattr(self, name))
val = param.serialize(raw)
args["--" + name.replace("_", "-")] = str(val)
return args
class Register(BaseRegister):
def __call__(cls, *args, **kwargs):
inst = super(Register, cls).__call__(*args, **kwargs)
# check for interactive parameters
for param in inst.interactive_params:
value = getattr(inst, param)
if value:
skip_abort = False
try:
logger.debug("evaluating interactive parameter '{}' with value {}".format(
param, value))
skip_abort = getattr(inst, "_" + param)(value)
# reset the interactive parameter
setattr(inst, param, ())
except KeyboardInterrupt:
print("\naborted")
# abort the process if not explicitly skipped
if not skip_abort:
abort(exitcode=0)
print("")
return inst
class Task(six.with_metaclass(Register, BaseTask)):
log_file = luigi.Parameter(default=NO_STR, significant=False, description="a custom log file; "
"default: <task.default_log_file>")
print_deps = CSVParameter(default=(), significant=False, description="print task dependencies "
"but do not run any task; this CSV parameter accepts a single integer value which sets the "
"task recursion depth (0 means non-recursive)")
print_status = CSVParameter(default=(), significant=False, description="print the task status "
"but do not run any task; this CSV parameter accepts up to three values: 1. the task "
"recursion depth (0 means non-recursive), 2. the depth of the status text of target "
"collections (default: 0), 3. a flag that is passed to the status text creation (default: "
"'')")
print_output = CSVParameter(default=(), significant=False, description="print a flat list of "
"output targets but do not run any task; this CSV parameter accepts up to two values: 1. "
"the task recursion depth (0 means non-recursive), 2. a boolean flag that decides whether "
"paths of file targets should contain file system schemes (default: True)")
remove_output = CSVParameter(default=(), significant=False, description="remove task outputs "
"but do not run any task by default; this CSV parameter accepts up to three values: 1. the "
"task recursion depth (0 means non-recursive), 2. one of the modes 'i' (interactive), 'a' "
"(all), 'd' (dry run) (default: 'i'), 3. a boolean flag that decides whether the task is "
"run after outputs were removed (default: False)")
fetch_output = CSVParameter(default=(), significant=False, description="copy all task outputs "
"into a local directory but do not run any task; this CSV parameter accepts up to four "
"values: 1. the task recursion depth (0 means non-recursive), 2. one of the modes 'i' "
"(interactive), 'a' (all), 'd' (dry run) (default: 'i'), 3. the target directory (default: "
"'.'), 4. a boolean flag that decides whether external outputs and outputs of external "
"tasks should be fetched (default: False)")
interactive_params = [
"print_deps", "print_status", "print_output", "fetch_output", "remove_output",
]
# cache size for published messages
message_cache_size = 10
# force skipping this task when remove_output is set to "all" mode
skip_output_removal = False
exclude_index = True
exclude_params_req = set()
exclude_params_repr = set()
@classmethod
def req_params(cls, inst, _exclude=None, _prefer_cli=None, **kwargs):
_exclude = set() if _exclude is None else set(make_list(_exclude))
# always exclude interactive parameters
_exclude |= set(inst.interactive_params)
return super(Task, cls).req_params(inst, _exclude=_exclude, _prefer_cli=_prefer_cli,
**kwargs)
def __init__(self, *args, **kwargs):
super(Task, self).__init__(*args, **kwargs)
# cache for messages published to the scheduler
self._message_cache = []
# cache for the last progress published to the scheduler
self._last_progress_percentage = None
@property
def default_log_file(self):
return "-"
def is_root_task(self):
return root_task() == self
def publish_message(self, msg, stdout=sys.stdout, scheduler=True, **kwargs):
msg = str(msg)
# write to stdout
if stdout:
stdout.write(msg + "\n")
stdout.flush()
# publish to the scheduler
if scheduler:
self._publish_message(msg, **kwargs)
def _publish_message(self, msg, flush_cache=False, silent=False):
msg = uncolored(str(msg))
# flush the message cache?
if flush_cache:
del self._message_cache[:]
# add to message cache and handle overflow
self._message_cache.append(msg)
if self.message_cache_size >= 0:
end = max(len(self._message_cache) - self.message_cache_size, 0)
del self._message_cache[:end]
# set status message based on the full, current message cache
if callable(getattr(self, "set_status_message", None)):
self.set_status_message("\n".join(self._message_cache))
elif not silent:
logger.warning("set_status_message not set, cannot send task message to scheduler")
def _create_message_stream(self, *args, **kwargs):
return TaskMessageStream(self, *args, **kwargs)
def _create_logger(self, name, level=None, **kwargs):
return setup_logger(name, level=level, add_console_handler={
"handler_kwargs": {"stream": self._create_message_stream(**kwargs)},
})
@contextmanager
def publish_step(self, msg, success_message="done", fail_message="failed", runtime=True,
scheduler=True, flush_cache=False):
self.publish_message(msg, scheduler=scheduler, flush_cache=flush_cache)
success = False
t0 = time.time()
try:
yield
success = True
finally:
msg = success_message if success else fail_message
if runtime:
diff = time.time() - t0
msg = "{} (took {})".format(msg, human_duration(seconds=diff))
self.publish_message(msg, scheduler=scheduler, flush_cache=flush_cache)
def publish_progress(self, percentage, precision=1):
percentage = int(round_discrete(percentage, precision, "floor"))
if percentage != self._last_progress_percentage:
self._last_progress_percentage = percentage
if callable(getattr(self, "set_progress_percentage", None)):
self.set_progress_percentage(percentage)
else:
logger.warning("set_progress_percentage not set, cannot send task progress to "
"scheduler")
def create_progress_callback(self, n_total, reach=(0, 100), precision=1):
def make_callback(n, start, end):
def callback(i):
self.publish_progress(start + (i + 1) / float(n) * (end - start), precision)
return callback
if isinstance(n_total, (list, tuple)):
width = 100. / len(n_total)
reaches = [(width * i, width * (i + 1)) for i in range(len(n_total))]
return n_total.__class__(make_callback(n, *r) for n, r in zip(n_total, reaches))
else:
return make_callback(n_total, *reach)
def cli_args(self, exclude=None, replace=None):
exclude = set() if exclude is None else set(make_list(exclude))
# always exclude interactive parameters
exclude |= set(self.interactive_params)
return super(Task, self).cli_args(exclude=exclude, replace=replace)
def __repr__(self):
color = Config.instance().get_expanded_boolean("task", "colored_repr")
return self.repr(color=color)
def __str__(self):
color = Config.instance().get_expanded_boolean("task", "colored_str")
return self.repr(color=color)
def repr(self, all_params=False, color=None, **kwargs):
if color is None:
color = Config.instance().get_expanded_boolean("task", "colored_repr")
family = self._repr_family(self.get_task_family(), color=color, **kwargs)
parts = [
self._repr_param(name, value, color=color, **kwargs)
for name, value in six.iteritems(self._repr_params(all_params=all_params))
] + [
self._repr_flag(flag, color=color, **kwargs)
for flag in self._repr_flags()
]
return "{}({})".format(family, ", ".join(parts))
def _repr_params(self, all_params=False):
# determine parameters to exclude
exclude = set()
if not all_params:
exclude |= self.exclude_params_repr
exclude |= set(self.interactive_params)
# build a map "name -> value" for all significant parameters
params = OrderedDict()
for name, param in self.get_params():
if param.significant and not multi_match(name, exclude):
params[name] = getattr(self, name)
return params
def _repr_flags(self):
return []
def _repr_family(self, family, color=False, **kwargs):
return colored(family, "green") if color else family
def _repr_param(self, name, value, color=False, serialize=True, **kwargs):
# try to serialize first unless explicitly disabled
if serialize:
param = getattr(self.__class__, name, no_value)
if param != no_value:
value = param.serialize(value)
return "{}={}".format(colored(name, color="blue", style="bright") if color else name, value)
def _repr_flag(self, name, color=False, **kwargs):
return colored(name, color="magenta") if color else name
def _print_deps(self, args):
return print_task_deps(self, *args)
def _print_status(self, args):
return print_task_status(self, *args)
def _print_output(self, args):
return print_task_output(self, *args)
def _remove_output(self, args):
return remove_task_output(self, *args)
def _fetch_output(self, args):
return fetch_task_output(self, *args)
@classmethod
def _law_run_inst(cls, inst, _exclude=None, _replace=None, _global=None, _run_kwargs=None):
# get the cli arguments
args = inst.cli_args(exclude=_exclude, replace=_replace)
args = sum((make_list(tpl) for tpl in args.items()), [])
# add global parameters when given
if _global:
args.extend([str(arg) for arg in make_list(_global)])
# build the full command
cmd = [cls.get_task_family()] + args
# run it
return law_run(cmd, **(_run_kwargs or {}))
@classmethod
def law_run_inst(cls, _exclude=None, _replace=None, _global=None, _run_kwargs=None, **kwargs):
# create a new instance
inst = cls(**kwargs)
return cls._law_run_inst(inst, _exclude=_exclude, _replace=_replace, _global=_global,
_run_kwargs=_run_kwargs)
def law_run(self, _exclude=None, _replace=None, _global=None, _run_kwargs=None, **kwargs):
# when kwargs are given, create a new instance
inst = self.req(self, **kwargs) if kwargs else self
return self._law_run_inst(inst, _exclude=_exclude, _replace=_replace, _global=_global,
_run_kwargs=_run_kwargs)
def localize_input(self, *args, **kwargs):
return localize_file_targets(self.input(), *args, **kwargs)
def localize_output(self, *args, **kwargs):
return localize_file_targets(self.output(), *args, **kwargs)
class WrapperTask(Task):
"""
Use for tasks that only wrap other tasks and that by definition are done
if all their requirements exist.
"""
exclude_index = True
def _repr_flags(self):
return super(WrapperTask, self)._repr_flags() + ["wrapper"]
def complete(self):
return all(task.complete() for task in flatten(self.requires()))
def run(self):
return
class ExternalTask(Task):
exclude_index = True
run = None
def _repr_flags(self):
return super(ExternalTask, self)._repr_flags() + ["external"]
class TaskMessageStream(BaseStream):
def __init__(self, task, stdout=sys.stdout, scheduler=True, flush_cache=False, **kwargs):
super(TaskMessageStream, self).__init__(**kwargs)
self.task = task
self.stdout = stdout
self.scheduler = scheduler
self.flush_cache = flush_cache
def _write(self, msg):
# foward to publish_message
self.task.publish_message(msg.rstrip("\n"), stdout=self.stdout, scheduler=self.scheduler,
flush_cache=self.flush_cache, silent=True)
# trailing imports
from law.task.interactive import (
print_task_deps, print_task_status, print_task_output, remove_task_output, fetch_task_output,
)
|
|
# PythonJS Low Level Runtime
# by Amirouche Boubekki and Brett Hartshorn - copyright 2013
# License: "New BSD"
__NULL_OBJECT__ = Object.create( null )
__WEBWORKER__ = False
__NODEJS__ = False
__BROWSER__ = False
## note browser and nodejs can both be true in the case of NodeWebkit
if typeof(process) != 'undefined': ## TODO check if this is true inside a nodejs webworker
__NODEJS__ = True
if typeof(window) != 'undefined':
__BROWSER__ = True
if typeof(importScripts) == 'function':
__WEBWORKER__ = True
def __create_array__(): ## DEPRECATED
"""Used to fix a bug/feature of Javascript where new Array(number)
created a array with number of undefined elements which is not
what we want"""
var(i, array)
array = []
i = 0
while i < arguments.length:
array.push(arguments[i])
i += 1
return array
def __get__(object, attribute, error_message):
"""Retrieve an attribute, method, property, or wrapper function.
method are actually functions which are converted to methods by
prepending their arguments with the current object. Properties are
not functions!
DOM support:
http://stackoverflow.com/questions/14202699/document-createelement-not-working
https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/instanceof
Direct JavaScript Calls:
if an external javascript function is found, and it was not a wrapper that was generated here,
check the function for a 'cached_wrapper' attribute, if none is found then generate a new
wrapper, cache it on the function, and return the wrapper.
"""
if object is None:
if error_message:
raise AttributeError('(`null` has no attributes) ' +error_message)
else:
raise AttributeError('null object (None) has no attribute: '+attribute)
elif object is undefined:
if error_message:
raise AttributeError('(`undefined` has no attributes) ' +error_message)
else:
raise AttributeError('undefined has no attribute: ' +attribute)
if attribute == '__call__':
if object.pythonscript_function or object.is_wrapper: ## common case
return object
elif object.cached_wrapper: ## rare case
return object.cached_wrapper
elif JS("{}.toString.call(object) === '[object Function]'"):
## TODO double check that this is not a pythonjs function
def wrapper(args,kwargs): ## dyanmically wrap external javascript function
var(i, arg, keys)
if args != None:
i = 0
while i < args.length:
arg = args[i]
#if instanceof(arg, Object): ## fails on objects created by Object.create(null)
if arg and typeof(arg) == 'object':
if arg.jsify:
args[i] = arg.jsify()
i += 1
if kwargs != None:
keys = __object_keys__(kwargs)
if keys.length != 0:
args.push( kwargs )
i = 0
while i < keys.length:
arg = kwargs[ keys[i] ]
if arg and typeof(arg) == 'object':
if arg.jsify:
kwargs[ keys[i] ] = arg.jsify()
i += 1
return object.apply(None, args)
wrapper.is_wrapper = True
object.cached_wrapper = wrapper
return wrapper
if Object.hasOwnProperty.call(object, '__getattribute__'):
return object.__getattribute__( attribute )
var(attr)
attr = object[attribute] ## this could be a javascript object with cached method
if __NODEJS__ is False and __WEBWORKER__ is False:
if JS("object instanceof HTMLDocument"):
#print 'DYNAMIC wrapping HTMLDocument'
if JS("typeof(attr) === 'function'"):
def wrapper(args,kwargs): return attr.apply(object, args)
wrapper.is_wrapper = True
return wrapper
else:
return attr
elif JS("object instanceof HTMLElement"):
#print 'DYNAMIC wrapping HTMLElement'
if JS("typeof(attr) === 'function'"):
def wrapper(args,kwargs): return attr.apply(object, args)
wrapper.is_wrapper = True
return wrapper
else:
return attr
## attr can be null and will return, undefined will raise AttributeError ##
if attr is not undefined:
if typeof(attr) == 'function':
if JS("attr.pythonscript_function === undefined && attr.is_wrapper === undefined"):
## if there is a prototype with methods, then we can be sure that the user indends to call `new` on it,
## however rare, it is still possible that it is a constructor without a prototype of any length,
## in that case the user must call `new` and using the full scope, because things inside a `new`
## call are not wrapped, ie: `new(A.B.C.xxx(args))`
if instanceof(attr.prototype, Object) and Object.keys(attr.prototype).length > 0:
return attr
def wrapper(args,kwargs):
#if instanceof(args, Array):
var(i, arg, keys)
if args != None:
i = 0
while i < args.length:
arg = args[i]
if arg and typeof(arg) == 'object':
if arg.jsify:
args[i] = arg.jsify()
i += 1
if kwargs != None:
keys = __object_keys__(kwargs)
if keys.length != 0:
args.push( kwargs )
i = 0
while i < keys.length:
arg = kwargs[ keys[i] ]
if arg and typeof(arg) == 'object':
if arg.jsify:
kwargs[ keys[i] ] = arg.jsify()
i += 1
return attr.apply(object, args)
#else: ## TODO are there cases where this is needed?
# return attr.apply(object, arguments)
wrapper.is_wrapper = True
wrapper.wrapped = attr ## this is required because some javascript API's `class-method-style` helper functions on the constructor
return wrapper
elif attr.is_classmethod:
def method():
var(args)
args = Array.prototype.slice.call(arguments)
if (JS('args[0] instanceof Array') and JS("{}.toString.call(args[1]) === '[object Object]'") and args.length == 2):
pass
else:
args = [args, JSObject()]
if object.__class__: ## if classmethod is called from an instance, force class as first argument
args[0].splice(0, 0, object.__class__)
else:
args[0].splice(0, 0, object)
return attr.apply(this, args) ## this is bound so that callback methods can use `this` from the caller
method.is_wrapper = True
object[attribute] = method ## cache method - we assume that class methods do not change
return method
else:
return attr
else:
return attr
var(__class__, bases)
#attr = object[ attribute ]
#if attr != None:
# return attr
# next check for object.__class__
__class__ = object.__class__
if __class__: ## at this point we can assume we are dealing with a pythonjs class instance
if attribute in __class__.__properties__: ## @property decorators - TODO support PythonJSJS classes
return __class__.__properties__[ attribute ]['get']( [object], JSObject() )
if attribute in __class__.__unbound_methods__:
attr = __class__.__unbound_methods__[ attribute ]
if attr.fastdef:
def method(args,kwargs):
if arguments and arguments[0]:
arguments[0].splice(0,0,object)
return attr.apply(this, arguments)
else:
return attr( [object], {} )
else:
def method(args,kwargs):
if arguments.length == 0:
return attr( [object], __NULL_OBJECT__ )
elif instanceof(args,Array) and typeof(kwargs) is "object" and arguments.length==2:
args.splice(0, 0, object)
if kwargs is undefined:
return attr( args, __NULL_OBJECT__ )
else:
return attr( args, kwargs )
else:
args = Array.prototype.slice.call(arguments)
args.splice(0, 0, object)
args = [args, __NULL_OBJECT__] ## TODO - way to pass keyword args from javascript?
return attr.apply(this, args) ## this is bound here so that callback methods can use `this` from the caller
method.is_wrapper = True
object[attribute] = method ## cache method - we assume that methods do not change
return method
attr = __class__[ attribute ]
if attribute in __class__:
if JS("{}.toString.call(attr) === '[object Function]'"):
if attr.is_wrapper:
return attr
elif attr.fastdef:
def method(args,kwargs):
if arguments and arguments[0]:
arguments[0].splice(0,0,object)
return attr.apply(this, arguments)
else:
return attr( [object], {} )
else:
def method(args,kwargs):
if arguments.length == 0:
return attr( [object], __NULL_OBJECT__ )
elif instanceof(args,Array) and typeof(kwargs) is "object" and arguments.length==2:
args.splice(0, 0, object)
if kwargs is undefined:
return attr( args, __NULL_OBJECT__ )
else:
return attr( args, kwargs )
else:
args = Array.prototype.slice.call(arguments)
args.splice(0, 0, object)
args = [args, __NULL_OBJECT__] ## TODO - way to pass keyword args from javascript?
return attr.apply(this, args) ## this is bound here so that callback methods can use `this` from the caller
method.is_wrapper = True
object[attribute] = method ## cache method - we assume that methods do not change
return method
else:
return attr
bases = __class__.__bases__
for base in bases:
attr = _get_upstream_attribute(base, attribute)
if attr is not undefined:
if JS("{}.toString.call(attr) === '[object Function]'"):
if attr.fastdef:
def method(args,kwargs):
if arguments and arguments[0]:
arguments[0].splice(0,0,object)
return attr.apply(this, arguments)
else:
return attr( [object], {} )
else:
def method(args,kwargs):
if arguments.length == 0:
return attr( [object], __NULL_OBJECT__ )
elif instanceof(args,Array) and typeof(kwargs) is "object" and arguments.length==2:
args.splice(0, 0, object)
if kwargs is undefined:
return attr( args, __NULL_OBJECT__ )
else:
return attr( args, kwargs )
else:
args = Array.prototype.slice.call(arguments)
args.splice(0, 0, object)
args = [args, __NULL_OBJECT__] ## TODO - way to pass keyword args from javascript?
return attr.apply(this, args) ## this is bound here so that callback methods can use `this` from the caller
method.is_wrapper = True
object[attribute] = method ## cache method - we assume that methods do not change
return method
else:
return attr
for base in bases: ## upstream property getters come before __getattr__
var( prop )
prop = _get_upstream_property(base, attribute)
if prop is not undefined:
return prop['get']( [object], JSObject() )
if '__getattr__' in __class__:
return __class__['__getattr__']( [object, attribute], JSObject() )
for base in bases:
var( f )
f = _get_upstream_attribute(base, '__getattr__')
if f is not undefined:
return f( [object, attribute], JSObject() )
## getting/setting from a normal JavaScript Object ##
if attribute == '__getitem__':
def wrapper(args,kwargs): return object[ args[0] ]
wrapper.is_wrapper = True
return wrapper
elif attribute == '__setitem__':
def wrapper(args,kwargs): object[ args[0] ] = args[1]
wrapper.is_wrapper = True
return wrapper
if typeof(object, 'function') and object.is_wrapper:
return object.wrapped[ attribute ]
if attribute == '__iter__' and instanceof(object, Object):
def wrapper(args, kwargs): return new( __ArrayIterator(Object.keys( object ),0) )
wrapper.is_wrapper = True
return wrapper
if attribute == '__contains__' and instanceof(object, Object):
def wrapper(args, kwargs): return (Object.keys( object )).indexOf( args[0] ) != -1
wrapper.is_wrapper = True
return wrapper
if attr is undefined:
if error_message:
raise AttributeError(error_message)
else:
raise AttributeError(attribute)
else:
return attr
def _get_upstream_attribute(base, attr):
if attr in base:
return base[ attr ]
for parent in base.__bases__:
return _get_upstream_attribute(parent, attr)
def _get_upstream_property(base, attr): ## no longer required
if attr in base.__properties__:
return base.__properties__[ attr ]
for parent in base.__bases__:
return _get_upstream_property(parent, attr)
def __set__(object, attribute, value):
'''
__setattr__ is always called when an attribute is set,
unlike __getattr__ that only triggers when an attribute is not found,
this asymmetry is in fact part of the Python spec.
note there is no __setattribute__
In normal Python a property setter is not called before __setattr__,
this is bad language design because the user has been more explicit
in having the property setter.
In PythonJS, property setters are called instead of __setattr__.
'''
if '__class__' in object and object.__class__.__setters__.indexOf(attribute) != -1:
object[attribute] = value
elif '__setattr__' in object:
object.__setattr__( attribute, value )
else:
object[attribute] = value
def __getargs__(func_name, signature, args, kwargs):
"""Based on ``signature`` and ``args``, ``kwargs`` parameters retrieve
the actual parameters.
This will set default keyword arguments and retrieve positional arguments
in kwargs if their called as such"""
if args is None: args = []
if kwargs is None: kwargs = {}
out = {}
# if the caller did not specify supplemental positional arguments e.g. *args in the signature
# raise an error
if args.length > signature.args.length:
if signature.vararg:
pass
else:
print 'Error in function->' + func_name
print 'args:', args, 'kwargs:', kwargs, 'sig:', signature
raise TypeError("Supplemental positional arguments provided but signature doesn't accept them")
j = 0
while j < signature.args.length:
name = signature.args[j]
if name in kwargs:
# value is provided as a keyword argument
out[name] = kwargs[name]
elif j < args.length:
# value is positional and within the signature length
out[name] = args[j]
elif name in signature.kwargs:
# value is not found before and is in signature.length
out[name] = signature.kwargs[name]
j += 1
args = args.slice(j) ## note that if this fails because args is not an array, then a pythonjs function was called from javascript in a bad way.
#args = Array.prototype.slice.call(args, j) ## this fix should not be required
if signature.vararg:
out[signature.vararg] = args
if signature.varkwarg:
out[signature.varkwarg] = kwargs
return out
|
|
# Copyright (c) 2010-2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2012-2014 Mark D. Hill and David A. Wood
# Copyright (c) 2009-2011 Advanced Micro Devices, Inc.
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
# Brad Beckmann
import optparse
import sys
import m5
from m5.defines import buildEnv
from m5.objects import *
from m5.util import addToPath, fatal
addToPath('../common')
addToPath('../ruby')
import Ruby
from FSConfig import *
from SysPaths import *
from Benchmarks import *
import Simulation
import CacheConfig
import MemConfig
from Caches import *
import Options
# Check if KVM support has been enabled, we might need to do VM
# configuration if that's the case.
have_kvm_support = 'BaseKvmCPU' in globals()
def is_kvm_cpu(cpu_class):
return have_kvm_support and cpu_class != None and \
issubclass(cpu_class, BaseKvmCPU)
def initO3params(options, num_cpus, testsys):
for i in range(0, num_cpus):
testsys.cpu[i].rob_scale_enabled = options.rob_scale_enabled
print 'for cpu:%d rob_scale_enabled:%d' % (i, testsys.cpu[i].rob_scale_enabled)
testsys.cpu[i].btb_scale_enabled = options.btb_scale_enabled
print 'for cpu:%d btb_scale_enabled:%d' % (i, testsys.cpu[i].btb_scale_enabled)
testsys.cpu[i].tlb_scale_enabled = options.tlb_scale_enabled
print 'for cpu:%d tlb_scale_enabled:%d' % (i, testsys.cpu[i].tlb_scale_enabled)
testsys.cpu[i].iq_scale_enabled = options.iq_scale_enabled
print 'for cpu:%d iq_scale_enabled:%d' % (i, testsys.cpu[i].iq_scale_enabled)
testsys.cpu[i].regfile_scale_enabled = options.regfile_scale_enabled
print 'for cpu:%d regfile_scale_enabled:%d' % (i, testsys.cpu[i].regfile_scale_enabled)
testsys.cpu[i].lsq_scale_enabled = options.lsq_scale_enabled
print 'for cpu:%d lsq_scale_enabled:%d' % (i, testsys.cpu[i].lsq_scale_enabled)
testsys.cpu[i].alu_scale_enabled = options.alu_scale_enabled
print 'for cpu:%d alu_scale_enabled:%d' % (i, testsys.cpu[i].alu_scale_enabled)
testsys.cpu[i].fpu_scale_enabled = options.fpu_scale_enabled
print 'for cpu:%d fpu_scale_enabled:%d' % (i, testsys.cpu[i].fpu_scale_enabled)
testsys.cpu[i].dcache_scale_enabled = options.dcache_scale_enabled
print 'for cpu:%d dcache_scale_enabled:%d' % (i, testsys.cpu[i].dcache_scale_enabled)
testsys.cpu[i].icache_scale_enabled = options.icache_scale_enabled
print 'for cpu:%d icache_scale_enabled:%d' % (i, testsys.cpu[i].icache_scale_enabled)
def build_test_system(np):
if buildEnv['TARGET_ISA'] == "alpha":
test_sys = makeLinuxAlphaSystem(test_mem_mode, bm[0], options.ruby)
elif buildEnv['TARGET_ISA'] == "mips":
test_sys = makeLinuxMipsSystem(test_mem_mode, bm[0])
elif buildEnv['TARGET_ISA'] == "sparc":
test_sys = makeSparcSystem(test_mem_mode, bm[0])
elif buildEnv['TARGET_ISA'] == "x86":
test_sys = makeLinuxX86System(test_mem_mode, options.num_cpus, bm[0],
options.ruby)
elif buildEnv['TARGET_ISA'] == "arm":
test_sys = makeArmSystem(test_mem_mode, options.machine_type, bm[0],
options.dtb_filename,
bare_metal=options.bare_metal,
sdcard_image=options.sdcard_image)
if options.enable_context_switch_stats_dump:
test_sys.enable_context_switch_stats_dump = True
else:
fatal("Incapable of building %s full system!", buildEnv['TARGET_ISA'])
# Set the cache line size for the entire system
test_sys.cache_line_size = options.cacheline_size
# Create a top-level voltage domain
test_sys.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
# Create a source clock for the system and set the clock period
test_sys.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = test_sys.voltage_domain)
#Create a clk running contantly at 1.4GHz for L2
test_sys.clk_domain_const = SrcClockDomain(clock = ["1.4GHz"],
voltage_domain = test_sys.voltage_domain)
# Create a CPU voltage domain
test_sys.cpu_voltage_domain = VoltageDomain()
# Create a source clock for the CPUs and set the clock period
#test_sys.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock,
# voltage_domain =
# test_sys.cpu_voltage_domain)
#test_sys.cpu_clk_domain = SrcClockDomain(clock = ["3GHz","2GHz","1GHz"],
test_sys.cpu_clk_domain = SrcClockDomain(clock = ["0.9GHz"],
voltage_domain =
test_sys.cpu_voltage_domain,
domain_id=0)
test_sys.cpu_clk_domain1 = SrcClockDomain(clock = ["0.9GHz"],
voltage_domain =
test_sys.cpu_voltage_domain,
domain_id=1)
test_sys.cpu_clk_domain2 = SrcClockDomain(clock = ["0.9GHz"],
voltage_domain =
test_sys.cpu_voltage_domain,
domain_id=2)
test_sys.cpu_clk_domain3 = SrcClockDomain(clock = ["0.9GHz"],
voltage_domain =
test_sys.cpu_voltage_domain,
domain_id=3)
if options.kernel is not None:
test_sys.kernel = binary(options.kernel)
if options.script is not None:
test_sys.readfile = options.script
if options.lpae:
test_sys.have_lpae = True
if options.virtualisation:
test_sys.have_virtualization = True
test_sys.init_param = options.init_param
# For now, assign all the CPUs to the same clock domain
#test_sys.cpu = [TestCPUClass(clk_domain=test_sys.cpu_clk_domain, cpu_id=i)
# for i in xrange(np)]
test_sys.cpu = [TestCPUClass(clk_domain=test_sys.cpu_clk_domain, cpu_id=0, socket_id=0), TestCPUClass(clk_domain=test_sys.cpu_clk_domain1, cpu_id=1, socket_id=1), TestCPUClass(clk_domain=test_sys.cpu_clk_domain2, cpu_id=2, socket_id=2), TestCPUClass(clk_domain=test_sys.cpu_clk_domain3, cpu_id=3, socket_id=3)]
if is_kvm_cpu(TestCPUClass) or is_kvm_cpu(FutureClass):
test_sys.vm = KvmVM()
test_sys.dvfs_handler.transition_latency = '40us'
test_sys.dvfs_handler.enable = True
test_sys.dvfs_handler.transform_enable = False # We do not want atomic CPU to transform
test_sys.dvfs_handler.domains = [test_sys.cpu_clk_domain, test_sys.cpu_clk_domain1, test_sys.cpu_clk_domain2, test_sys.cpu_clk_domain3]
if options.ruby:
# Check for timing mode because ruby does not support atomic accesses
if not (options.cpu_type == "detailed" or options.cpu_type == "timing"):
print >> sys.stderr, "Ruby requires TimingSimpleCPU or O3CPU!!"
sys.exit(1)
Ruby.create_system(options, test_sys, test_sys.iobus, test_sys._dma_ports)
# Create a seperate clock domain for Ruby
test_sys.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock,
voltage_domain = test_sys.voltage_domain)
for (i, cpu) in enumerate(test_sys.cpu):
#
# Tie the cpu ports to the correct ruby system ports
#
cpu.clk_domain = test_sys.cpu_clk_domain
cpu.createThreads()
cpu.createInterruptController()
cpu.icache_port = test_sys.ruby._cpu_ports[i].slave
cpu.dcache_port = test_sys.ruby._cpu_ports[i].slave
if buildEnv['TARGET_ISA'] == "x86":
cpu.itb.walker.port = test_sys.ruby._cpu_ports[i].slave
cpu.dtb.walker.port = test_sys.ruby._cpu_ports[i].slave
cpu.interrupts.pio = test_sys.ruby._cpu_ports[i].master
cpu.interrupts.int_master = test_sys.ruby._cpu_ports[i].slave
cpu.interrupts.int_slave = test_sys.ruby._cpu_ports[i].master
test_sys.ruby._cpu_ports[i].access_phys_mem = True
# Create the appropriate memory controllers
# and connect them to the IO bus
test_sys.mem_ctrls = [TestMemClass(range = r) for r in test_sys.mem_ranges]
for i in xrange(len(test_sys.mem_ctrls)):
test_sys.mem_ctrls[i].port = test_sys.iobus.master
else:
if options.caches or options.l2cache:
# By default the IOCache runs at the system clock
test_sys.iocache = IOCache(addr_ranges = test_sys.mem_ranges)
test_sys.iocache.cpu_side = test_sys.iobus.master
test_sys.iocache.mem_side = test_sys.membus.slave
else:
test_sys.iobridge = Bridge(delay='50ns', ranges = test_sys.mem_ranges)
test_sys.iobridge.slave = test_sys.iobus.master
test_sys.iobridge.master = test_sys.membus.slave
# Sanity check
if options.fastmem:
if TestCPUClass != AtomicSimpleCPU:
fatal("Fastmem can only be used with atomic CPU!")
if (options.caches or options.l2cache):
fatal("You cannot use fastmem in combination with caches!")
for i in xrange(np):
if options.fastmem:
test_sys.cpu[i].fastmem = True
if options.checker:
test_sys.cpu[i].addCheckerCpu()
test_sys.cpu[i].createThreads()
CacheConfig.config_cache(options, test_sys)
MemConfig.config_mem(options, test_sys)
return test_sys
def build_drive_system(np):
# driver system CPU is always simple, so is the memory
# Note this is an assignment of a class, not an instance.
DriveCPUClass = AtomicSimpleCPU
drive_mem_mode = 'atomic'
DriveMemClass = SimpleMemory
if buildEnv['TARGET_ISA'] == 'alpha':
drive_sys = makeLinuxAlphaSystem(drive_mem_mode, bm[1])
elif buildEnv['TARGET_ISA'] == 'mips':
drive_sys = makeLinuxMipsSystem(drive_mem_mode, bm[1])
elif buildEnv['TARGET_ISA'] == 'sparc':
drive_sys = makeSparcSystem(drive_mem_mode, bm[1])
elif buildEnv['TARGET_ISA'] == 'x86':
drive_sys = makeLinuxX86System(drive_mem_mode, np, bm[1])
elif buildEnv['TARGET_ISA'] == 'arm':
drive_sys = makeArmSystem(drive_mem_mode, options.machine_type, bm[1])
# Create a top-level voltage domain
drive_sys.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
# Create a source clock for the system and set the clock period
drive_sys.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = drive_sys.voltage_domain)
# Create a CPU voltage domain
drive_sys.cpu_voltage_domain = VoltageDomain()
# Create a source clock for the CPUs and set the clock period
drive_sys.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock,
voltage_domain =
drive_sys.cpu_voltage_domain)
drive_sys.cpu = DriveCPUClass(clk_domain=drive_sys.cpu_clk_domain,
cpu_id=0)
drive_sys.cpu.createThreads()
drive_sys.cpu.createInterruptController()
drive_sys.cpu.connectAllPorts(drive_sys.membus)
if options.fastmem:
drive_sys.cpu.fastmem = True
if options.kernel is not None:
drive_sys.kernel = binary(options.kernel)
if is_kvm_cpu(DriveCPUClass):
drive_sys.vm = KvmVM()
drive_sys.iobridge = Bridge(delay='50ns',
ranges = drive_sys.mem_ranges)
drive_sys.iobridge.slave = drive_sys.iobus.master
drive_sys.iobridge.master = drive_sys.membus.slave
# Create the appropriate memory controllers and connect them to the
# memory bus
drive_sys.mem_ctrls = [DriveMemClass(range = r)
for r in drive_sys.mem_ranges]
for i in xrange(len(drive_sys.mem_ctrls)):
drive_sys.mem_ctrls[i].port = drive_sys.membus.master
drive_sys.init_param = options.init_param
return drive_sys
# Add options
parser = optparse.OptionParser()
Options.addCommonOptions(parser)
Options.addFSOptions(parser)
# Add the ruby specific and protocol specific options
if '--ruby' in sys.argv:
Ruby.define_options(parser)
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
# system under test can be any CPU
(TestCPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(options)
#lokeshjindal15
if (TestCPUClass == DerivO3CPU):
print ("**** TestCpuClass is: DerivO3CPU")
else:
print ("**** TestCpuClass is NOT DerivO3CPU")
# Match the memories with the CPUs, based on the options for the test system
TestMemClass = Simulation.setMemClass(options)
if options.benchmark:
try:
bm = Benchmarks[options.benchmark]
except KeyError:
print "Error benchmark %s has not been defined." % options.benchmark
print "Valid benchmarks are: %s" % DefinedBenchmarks
sys.exit(1)
else:
if options.dual:
bm = [SysConfig(disk=options.disk_image, mem=options.mem_size),
SysConfig(disk=options.disk_image, mem=options.mem_size)]
else:
bm = [SysConfig(disk=options.disk_image, mem=options.mem_size)]
np = options.num_cpus
test_sys = build_test_system(np)
print "cpu_type is: " + options.cpu_type
if (options.cpu_type == "detailed" or options.cpu_type == "arm_detailed" or options.cpu_type == "DerivO3CPU" or options.cpu_type == "atomic"):
print "########## Running initO3params for various scaling switches"
initO3params(options, np, test_sys)
else:
print "########## NOT Running initO3params for various scaling switches"
if len(bm) == 2:
drive_sys = build_drive_system(np)
root = makeDualRoot(True, test_sys, drive_sys, options.etherdump)
elif len(bm) == 1:
root = Root(full_system=True, system=test_sys)
else:
print "Error I don't know how to create more than 2 systems."
sys.exit(1)
if options.timesync:
root.time_sync_enable = True
if options.frame_capture:
VncServer.frame_capture = True
#m5.disableAllListeners()#lokesh to suppress gdb read error
Simulation.setWorkCountOptions(test_sys, options)
Simulation.run(options, root, test_sys, FutureClass)
|
|
# /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: Vojtech Burian
@summary: Common selenium webdriver related functions.
Helper functions that abstract often basic webdriver operations into more usable functional blocks.
"""
import inspect
import time
import os
import glob
import requests
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException, \
ElementNotVisibleException, TimeoutException
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from unittestzero import Assert
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from shishito.runtime.shishito_support import ShishitoSupport
class SeleniumTest(object):
def __init__(self, driver):
self.driver = driver
self.shishito_support = ShishitoSupport()
self.base_url = self.shishito_support.get_opt('base_url')
self.default_implicit_wait = int(self.shishito_support.get_opt('default_implicit_wait'))
self.timeout = int(self.shishito_support.get_opt('timeout'))
def save_screenshot(self, name=None, project_root=None):
""" Saves application screenshot """
if not name:
# Use the name of browser and caller function (e.g. 'chrome_test_google_search'
name = self.driver.name + "_" + inspect.stack()[1][3]
if not project_root:
project_root = self.shishito_support.project_root
screenshot_folder = os.path.join(project_root, 'screenshots')
if not os.path.exists(screenshot_folder):
os.makedirs(screenshot_folder)
existing_images = glob.glob(os.path.join(screenshot_folder, name + '_*.png'))
actual_pic_nr = len(existing_images) + 1
self.driver.save_screenshot(os.path.join(screenshot_folder, '{}_{}.png'.format(name, actual_pic_nr)))
def save_file_from_url(self, file_path, url):
""" Saves file from url """
if os.path.isfile(file_path):
print('File %s already exists.' % file_path)
return
response = requests.get(url, stream=True)
response.raise_for_status()
with open(file_path, 'wb') as save_file:
for block in response.iter_content(1024):
if not block:
break
save_file.write(block)
# Deprecated use property directly
def get_base_url(self):
return self.base_url
# Deprecated use property directly
def get_current_url(self):
return self.current_url
@property
def current_url(self):
""" Return the url for the current page."""
return self.driver.current_url
def hover_on(self, element):
""" Mouse over specific element """
mouse_over = ActionChains(self.driver).move_to_element(element)
mouse_over.perform()
def go_to_page(self, url):
""" Opens url in currently active window """
self.driver.get(url)
self.driver.implicitly_wait(self.default_implicit_wait)
def click_and_wait(self, element, locator=None):
""" clicks on a element and then waits for specific element to be present or simply waits implicitly """
element.click()
if locator:
self.wait_for_element_ready(locator)
else:
self.driver.implicitly_wait(10)
def check_images_are_loaded(self):
""" checks all images on the pages and verifies if they are properly loaded """
script = 'return arguments[0].complete && typeof arguments[0].naturalWidth' \
' != "undefined" && arguments[0].naturalWidth > 0'
images_not_loaded = []
for image in self.driver.find_elements_by_tag_name('img'):
loaded = self.driver.execute_script(script, image)
if not loaded and image.get_attribute('src'):
images_not_loaded.append('%s: %s' % (self.driver.title, image.get_attribute('src')))
return images_not_loaded
def is_element_present(self, locator):
"""
True if the element at the specified locator is present in the DOM.
Note: It returns false immediately if the element is not found.
"""
self.driver.implicitly_wait(0)
try:
self.driver.find_element(*locator)
return True
except NoSuchElementException:
return False
finally:
# set the implicit wait back
self.driver.implicitly_wait(self.default_implicit_wait)
def is_element_visible(self, locator):
"""
True if the element at the specified locator is visible in the browser.
Note: It uses an implicit wait if element is not immediately found.
"""
try:
return self.driver.find_element(*locator).is_displayed()
except (NoSuchElementException, ElementNotVisibleException):
return False
def is_element_not_visible(self, locator):
"""
True if the element at the specified locator is not visible.
Note: It returns true immediately if the element is not found.
"""
self.driver.implicitly_wait(0)
try:
return not self.driver.find_element(*locator).is_displayed()
except (NoSuchElementException, ElementNotVisibleException):
return True
finally:
# set the implicit wait back
self.driver.implicitly_wait(self.default_implicit_wait)
def wait_for_element_present(self, locator, timeout=None):
""" Wait for the element at the specified locator
to be present in the DOM. """
timeout = timeout or self.timeout
count = 0
while not self.is_element_present(locator):
time.sleep(1)
count += 1
if count == timeout:
raise Exception('{0} has not loaded'.format(locator))
def wait_for_element_visible(self, locator, timeout=None):
"""
Wait for the element at the specified locator to be visible.
"""
timeout = timeout or self.timeout
count = 0
while not self.is_element_visible(locator):
time.sleep(1)
count += 1
if count == timeout:
raise Exception("{0} is not visible".format(locator))
def wait_for_element_not_visible(self, locator, timeout=None):
"""
Wait for the element at the specified locator not to be visible anymore.
"""
timeout = timeout or self.timeout
count = 0
while self.is_element_visible(locator):
time.sleep(1)
count += 1
if count == timeout:
raise Exception("{0} is still visible".format(locator))
def wait_for_element_not_present(self, locator, timeout=None):
""" Wait for the element at the specified locator
not to be present in the DOM. """
timeout = timeout or self.timeout
self.driver.implicitly_wait(0)
try:
WebDriverWait(self.driver, timeout).until(
lambda s: len(self.find_elements(*locator)) < 1)
return True
except TimeoutException:
Assert.fail(TimeoutException)
finally:
self.driver.implicitly_wait(self.default_implicit_wait)
def wait_for_text_to_match(self, text, locator, max_count=20, delay=0.25):
""" Waits for element text to match specified text, until certain deadline """
element = self.driver.find_element(*locator)
counter = 0
while element.text != text:
if counter < max_count:
time.sleep(delay)
counter += 1
element = self.driver.find_element(*locator)
else:
Assert.fail('"' + text + '" text did not match "' + element.text
+ '" after ' + str(counter * delay) + ' seconds')
break
def wait_for_attribute_value(self, attribute, attribute_text, locator, max_count=20, delay=0.25):
""" Waits for element attribute value to match specified text, until certain deadline """
element = self.driver.find_element(*locator)
counter = 0
while element.get_attribute(attribute) != attribute_text:
if counter < max_count:
time.sleep(delay)
counter += 1
else:
Assert.fail('"' + attribute_text + '" text did not match "' + element.get_attribute(attribute)
+ '" after ' + str(counter * delay) + ' seconds')
break
def wait_for_element_ready(self, locator, timeout=None):
""" Waits until certain element is present and clickable """
timeout = timeout or self.timeout
WebDriverWait(self.driver, timeout).until(EC.presence_of_element_located(locator),
'Element specified by {0} was not present!'.format(locator))
WebDriverWait(self.driver, timeout).until(EC.element_to_be_clickable(locator),
'Element specified by {0} did not become clickable!'.format(locator))
def find_element(self, locator):
""" Return the element at the specified locator."""
return self.driver.find_element(*locator)
def find_elements(self, locator):
""" Return a list of elements at the specified locator."""
return self.driver.find_elements(*locator)
def find_elements_with_text(self, text, locator):
""" Find elements that have specified text """
elements = self.driver.find_elements(*locator)
selected = [item for item in elements if item.text == text]
return selected[0] if len(selected) == 1 else selected
def link_destination(self, locator):
""" Return the href attribute of the element at the specified locator."""
link = self.driver.find_element(*locator)
return link.get_attribute('href')
def image_source(self, locator):
""" Return the src attribute of the element at the specified locator."""
link = self.driver.find_element(*locator)
return link.get_attribute('src')
def select_dropdown_value(self, select, value):
""" Set 'select' dropdown value """
select = Select(select)
option = [option for option in select.options if option.text == value][0]
option.click()
def upload_file(self, file_path, input_field_locator, delay=5):
""" uploads file through the file input field
@file_path: path to file (including the file name) relative to test project root
@input_field_locator: locator of input element with type="file"
@delay: seconds to wait for file to upload
"""
file_path = os.path.join(self.shishito_support.project_root, file_path)
self.driver.find_element(*input_field_locator).send_keys(file_path)
time.sleep(delay)
def download_path(self):
""":return value of variable download_path"""
return self.shishito_support.get_opt('download_path')
def wait_for_file_to_be_downloaded(self, file_path: str, timeout: int = None):
timeout = timeout or self.timeout
while not os.path.exists(file_path):
if timeout < 0:
raise FileNotFoundError(f'file not found in {timeout} seconds, make sure you specified download_path')
time.sleep(0.5)
timeout -= 0.5
def execute_js_script(self, script, arguments=None):
"""execute any js command with arguments or without it"""
script_value = self.driver.execute_script(script, arguments)
return script_value
def open_new_tab(self, url):
"""Open new tab using keyboard, for now work only in Firefox and IE, in Chrome use js script to open tab """
ActionChains(self.driver).send_keys(Keys.CONTROL, "t").perform()
windows = self.driver.window_handles
self.driver.switch_to_window(windows[-1])
self.driver.get(url)
def switch_new_tab(self):
"""switch to new tab/window"""
windows = self.driver.window_handles
self.driver.switch_to_window(windows[-1])
def switch_first_tab(self):
"""Close current tab, switch to first tab/window"""
windows = self.driver.window_handles
self.driver.close()
self.driver.switch_to_window(windows[0])
class ClickDelay:
"""
Class to be used as a decorator that would add sleep after click() calls
see click_delay() function for details
"""
def __init__(self, obj):
self.obj = obj
def click_delay(self, delay = 2):
self.obj.click()
time.sleep(delay)
def __getattr__(self, name):
if (name == 'click'):
return self.click_delay
return getattr(self.obj, name)
def click_delay(function):
"""
Function to be used as a decorator that would add sleep after click() calls.
Usage scenario:
this function will decorate a PageDefiniton property/method that returns a webpage Element object.
The Element object is encapsulated by ClickDelay class (see above) which redefines click() method by adding sleep() to it.
e.g:
class LoginPage:
@property
@click_delay
def submit_button(self):
return self.driver.find_element_by_css_selector('form button')
class TestLogin:
def test_login_err(self):
loginPage.submit_button.click() # -- will click and sleep(2)
"""
name = function.__name__
def wrapper(*args, **kwargs):
obj = function(*args, **kwargs)
if (obj == None):
raise Exception("Error: " + name + "() returned None")
return ClickDelay(obj)
return wrapper
|
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library handling the model agnostic TensorFlow graph.
Model Agnostic Prediction is the flow to generate FeaturesPredictionLabels
when the training eval model is not available. Currently, this flow supports
converting tf.Example protos to FPL provided an explicit key -> [F,P,L] mapping
and a parsing spec. This represents the minimum amount of information needed
to derive FeaturesPredictionLabels. This feature is useful when a user wants to
run tf.Metrics or postExportMetrics when the training eval model is not
available.
An example set of inputs is:
tf.Example{ features {
feature {
key: "age" value { float_list { value: 29.0 } } }
feature {
key: "language" value { bytes_list { value: "English" } } }
feature {
key: "predictions" value { float_list { value: 1.0 } } }
feature {
key: "labels" value { float_list { value: 2.0 } } }
}
}
feature_spec = {
'age':
tf.FixedLenFeature([], tf.float32),
'language':
tf.VarLenFeature(tf.string),
'predictions':
tf.FixedLenFeature([], tf.float32),
'labels':
tf.FixedLenFeature([], tf.float32)
}
model_agnostic_config = model_agnostic_predict.ModelAgnosticConfig(
label_keys=['labels'],
prediction_keys=['predictions'],
feature_spec=feature_spec)
Then the expected output is:
FPL.features = {'age' : np.array[29.0],
'language': SparseTensorValue('English')}
FPL.predictions = {'predictions' : np.array[1.0]}
FPL.labels = {'labels' : np.array[2.0]}
"""
from typing import Any, Dict, List, NamedTuple # pytype: disable=not-supported-yet
import tensorflow as tf
from tensorflow_model_analysis import types
from tensorflow_model_analysis.eval_saved_model import encoding
from tensorflow_model_analysis.eval_saved_model import util
from tensorflow_model_analysis.utils import util as general_util
class ModelAgnosticConfig(
NamedTuple( # pylint: disable=invalid-name
'ModelAgnosticConfig', [
('label_keys', List[str]),
('prediction_keys', List[str]),
('feature_spec', Dict[str, Any]),
])):
"""A config spec for running ModelAgnostic evaluation."""
def __new__(cls, label_keys: List[str], prediction_keys: List[str],
feature_spec: Dict[str, Any]):
"""Creates a ModelAgnosticConfig instance.
Creates a config spec for doing ModelAgnostic evaluation (Model evaluation
without the training eval saved model). This spec defines the basic
parameters with which to define Features, Predictions, and Labels from
input Examples.
Args:
label_keys: A list of Text, the keys in the input examples which should be
treated as labels. Currently, this cannot be empty.
prediction_keys: A list of Text, the keys in the input examples which
should be treated as predictions. Currently, this cannot be empty.
feature_spec: In the case only FPL is provided (via Examples), a dict
defining how to parse the example. This should be of the form "key" ->
FixedLenFeature or VarLenFeature. This is required to parse input
examples.
Returns:
A ModelAgnosticConfig instance.
Raises:
ValueError: This inputs supplied are properly defined..
"""
if not label_keys:
raise ValueError('ModelAgnosticConfig must have label keys set.')
if not prediction_keys:
raise ValueError('ModelAgnosticConfig must have prediction keys set.')
if not feature_spec:
raise ValueError('ModelAgnosticConfig must have feature_spec set.')
for key in prediction_keys:
if key not in feature_spec:
raise ValueError('Prediction key %s not defined in feature_spec.' % key)
for key in label_keys:
if key not in feature_spec:
raise ValueError('Label key %s not defined in feature_spec.' % key)
return super(ModelAgnosticConfig, cls).__new__(
cls,
label_keys=label_keys,
prediction_keys=prediction_keys,
feature_spec=feature_spec)
class ModelAgnosticPredict:
"""Abstraction for using a model agnostic evaluation.
This class is an API interface to interact with the with Model Agnostic graph
to do evaluation without needing an eval_saved_model.
It serves two primary functions:
1) Be able to generate an FPL given FPLs encoded in the tf.Examples input.
2) Be able to do metric evaluations against the FPLs generated.
Design Doc: go/model-agnostic-tfma
"""
def __init__(self, model_agnostic_config: ModelAgnosticConfig):
self._graph = tf.Graph()
self._session = tf.compat.v1.Session(graph=self._graph)
self._config = model_agnostic_config
try:
self._create_graph()
except (RuntimeError, ValueError) as exception:
general_util.reraise_augmented(exception,
'Failed to initialize agnostic model')
def _create_graph(self):
"""Creates the graph for which we use to generate FPL and metrics.
Create a pass-through graph which parses the input examples using the
feature spec.
"""
with self._graph.as_default():
serialized_example = tf.compat.v1.placeholder(dtype=tf.string)
features = tf.io.parse_example(
serialized=serialized_example, features=self._config.feature_spec)
self._get_features_fn = self._session.make_callable(
fetches=features, feed_list=[serialized_example])
def get_fpls_from_examples(self, input_example_bytes_list: List[bytes]
) -> List[Any]:
"""Generates FPLs from serialized examples using a ModelAgnostic graph.
Args:
input_example_bytes_list: A string representing the serialized tf.example
protos to be parsed by the graph.
Returns:
A list of FeaturesPredictionsLabels generated from the input examples.
"""
# Call the graph via the created session callable _get_features_fn and
# get the tensor representation of the features.
features = self._get_features_fn(input_example_bytes_list)
split_features = {}
num_examples = 0
# Split the features by the example keys. Also verify all each example
# key has the same number of total examples.
for key in features.keys():
split_features[key] = util.split_tensor_value(features[key])
if num_examples == 0:
num_examples = len(split_features[key])
elif num_examples != len(split_features[key]):
raise ValueError(
'Different keys unexpectedly had different number of '
'examples. Key %s unexpectedly had %s elements.' % key,
len(split_features[key]))
# Sort out the examples into individual FPLs: one example -> one FPL.
# Sort them into Features, Predictions, or Labels according to the input
# config.
result = []
for i in range(num_examples):
labels = {}
predictions = {}
features = {}
for key in split_features:
if key in self._config.label_keys:
labels[key] = {encoding.NODE_SUFFIX: split_features[key][i]}
if key in self._config.prediction_keys:
predictions[key] = {encoding.NODE_SUFFIX: split_features[key][i]}
features[key] = {encoding.NODE_SUFFIX: split_features[key][i]}
result.append(
types.FeaturesPredictionsLabels(
input_ref=i,
features=features,
predictions=predictions,
labels=labels))
return result
|
|
# -*- coding: utf-8 -*-
import datetime
import json
import requests
from reversion import revisions as reversion
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.db.models import Q
from django.http import HttpResponse, HttpResponseForbidden
from django.shortcuts import render, get_object_or_404
from django.utils.translation import ugettext as _
from django.utils.timezone import make_aware
from django.db.models import Sum
from django.views.decorators.http import require_http_methods
from anycontest.common import get_contest_info
from common.timezone import get_datetime_with_tz, convert_datetime
from courses.models import Course
from groups.models import Group
from issues.model_issue_status import IssueStatus
from issues.models import Issue
from tasks.models import Task
from pytz import timezone
HEADERS = {'Authorization': 'OAuth ' + settings.CONTEST_OAUTH}
PROBLEMS_API = settings.CONTEST_API_URL + 'problems?locale={lang}&contestId={cont_id}'
def merge_two_dicts(x, y):
z = x.copy()
z.update(y)
return z
@login_required
def task_create_page(request, course_id):
course = get_object_or_404(Course, id=course_id)
if not course.user_is_teacher(request.user):
return HttpResponseForbidden()
if request.method == 'POST':
return task_create_or_edit(request, course)
schools = course.school_set.all()
seminar_tasks = Task.objects.filter(type=Task().TYPE_SEMINAR).filter(course=course)
not_seminar_tasks = Task.objects.filter(~Q(type=Task().TYPE_SEMINAR)).filter(course=course)
has_seminar = course.issue_status_system.statuses.filter(tag=IssueStatus.STATUS_SEMINAR).count()
task_types = Task.TASK_TYPE_CHOICES
if not has_seminar:
task_types = filter(lambda x: not x[0] == Task.TYPE_SEMINAR, task_types)
context = {
'is_create': True,
'course': course,
'task_types': task_types,
'seminar_tasks': seminar_tasks,
'not_seminar_tasks': not_seminar_tasks,
'contest_integrated': course.contest_integrated,
'rb_integrated': course.rb_integrated,
'hide_contest_settings': True if not course.contest_integrated else False,
'school': schools[0] if schools else '',
'user_location': request.user.profile.location,
'geo_suggest_url': settings.GEO_SUGGEST_URL
}
return render(request, 'task_create.html', context)
@login_required
def task_import_page(request, course_id):
course = get_object_or_404(Course, id=course_id)
if not course.user_is_teacher(request.user):
return HttpResponseForbidden()
schools = course.school_set.all()
seminar_tasks = Task.objects.filter(type=Task().TYPE_SEMINAR).filter(course=course)
context = {
'is_create': True,
'course': course,
'rb_integrated': course.rb_integrated,
'school': schools[0] if schools else '',
'seminar_tasks': seminar_tasks,
'user_location': request.user.profile.location,
'geo_suggest_url': settings.GEO_SUGGEST_URL
}
return render(request, 'task_import.html', context)
@login_required
def contest_import_page(request, course_id):
course = get_object_or_404(Course, id=course_id)
if not course.user_is_teacher(request.user):
return HttpResponseForbidden()
schools = course.school_set.all()
seminar_tasks = Task.objects.filter(type=Task().TYPE_SEMINAR).filter(course=course)
context = {
'is_create': True,
'course': course,
'rb_integrated': course.rb_integrated,
'seminar_tasks': seminar_tasks,
'school': schools[0] if schools else '',
'contest_import': True,
'user_location': request.user.profile.location,
'geo_suggest_url': settings.GEO_SUGGEST_URL
}
return render(request, 'contest_import.html', context)
@login_required
def task_edit_page(request, task_id):
task = get_object_or_404(Task, id=task_id)
if not task.course.user_is_teacher(request.user):
return HttpResponseForbidden()
if request.method == 'POST':
return task_create_or_edit(request, task.course, task_id)
groups_required = []
groups = task.groups.all()
if task.type == task.TYPE_SEMINAR:
children_groups = reduce(lambda x, y: x + y, [list(child.groups.all()) for child in task.children.all()], [])
groups_required = set(children_groups).intersection(groups)
else:
for group in groups:
if Issue.objects.filter(task=task, student__in=group.students.all()).count():
groups_required.append(group)
schools = task.course.school_set.all()
seminar_tasks = Task.objects.filter(type=Task().TYPE_SEMINAR).filter(course=task.course)
not_seminar_tasks = Task.objects.filter(~Q(type=Task().TYPE_SEMINAR)).filter(course=task.course)
task_types = task.TASK_TYPE_CHOICES
if task.type == task.TYPE_SEMINAR:
task_types = filter(lambda x: not x[0] == task.TYPE_FULL, task_types)
else:
task_types = filter(lambda x: not x[0] == task.TYPE_SEMINAR, task_types)
context = {
'is_create': False,
'course': task.course,
'task': task,
'task_types': task_types,
'groups_required': groups_required,
'show_help_msg_task_group': True if groups_required else False,
'seminar_tasks': seminar_tasks,
'not_seminar_tasks': not_seminar_tasks,
'contest_integrated': task.contest_integrated,
'rb_integrated': task.rb_integrated,
'hide_contest_settings': True if not task.contest_integrated or task.type in [task.TYPE_SIMPLE,
task.TYPE_MATERIAL] else False,
'school': schools[0] if schools else '',
'user_location': request.user.profile.location,
'geo_suggest_url': settings.GEO_SUGGEST_URL
}
return render(request, 'task_edit.html', context)
def get_task_params(request, check_score_after_deadline=False):
user = request.user
task_title = request.POST.get('task_title', '').strip()
task_short_title = request.POST.get('task_short_title', task_title).strip()
max_score = int(request.POST.get('max_score') or 0)
max_students = int(request.POST.get('max_students') or 0)
task_groups = Group.objects.filter(id__in=dict(request.POST)['task_group_id[]'])
parent_id = request.POST.get('parent_id')
parent = None
if parent_id and parent_id != 'null':
parent = get_object_or_404(Task, id=int(parent_id))
children = request.POST.getlist('children[]') or None
if children == 'null':
children = None
task_deadline = request.POST.get('deadline') or None
if task_deadline:
task_deadline = get_datetime_with_tz(task_deadline, request.POST.get('geoid', None), user)
score_after_deadline = True
if check_score_after_deadline:
score_after_deadline = 'score_after_deadline' in request.POST
changed_task = 'changed_task' in request.POST
task_type = request.POST.get('task_type', Task().TYPE_FULL).strip()
contest_integrated = False
contest_id = 0
problem_id = None
simple_task_types = [Task().TYPE_SIMPLE, Task().TYPE_MATERIAL]
if 'contest_integrated' in request.POST and task_type not in simple_task_types:
contest_integrated = True
contest_id = int(request.POST['contest_id'])
problem_id = request.POST['problem_id'].strip()
rb_integrated = 'rb_integrated' in request.POST and task_type not in simple_task_types
one_file_upload = 'one_file_upload' in request.POST and rb_integrated
accepted_after_contest_ok = 'accepted_after_contest_ok' in request.POST
hidden_task = 'hidden_task' in request.POST
task_text = request.POST.get('task_text', '').strip()
nb_assignment_name = request.POST.get('nb_assignment_name')
return {'attrs': {
'updated_by': user,
'title': task_title,
'short_title': task_short_title,
'score_max': max_score,
'max_students': max_students,
'parent_task': parent,
'deadline_time': task_deadline,
'send_to_users': changed_task,
'sended_notify': not changed_task,
'type': task_type,
'contest_integrated': contest_integrated,
'contest_id': contest_id,
'problem_id': problem_id,
'rb_integrated': rb_integrated,
'one_file_upload': one_file_upload,
'accepted_after_contest_ok': accepted_after_contest_ok,
'score_after_deadline': score_after_deadline,
'is_hidden': hidden_task,
'task_text': task_text,
'nb_assignment_name': nb_assignment_name,
},
'children': children,
'groups': task_groups
}
def task_create_or_edit(request, course, task_id=None):
params = get_task_params(request, course.issue_status_system.has_accepted_after_deadline())
lang = request.user.profile.language
changed_score_after_deadline = False
if task_id:
task = get_object_or_404(Task, id=task_id)
task_text = task.is_text_json()
if task_text:
task_title = json.loads(task.title, strict=False)
task_title[lang] = params['attrs']['title']
task_text[lang] = params['attrs']['task_text']
params['attrs']['title'] = json.dumps(task_title, ensure_ascii=False)
params['attrs']['task_text'] = json.dumps(task_text, ensure_ascii=False)
changed_score_after_deadline = task.score_after_deadline != params['attrs']['score_after_deadline']
params['attrs']['nb_assignment_name'] = task.nb_assignment_name
else:
task = Task()
task.course = course
for attr_name, attr_value in params['attrs'].items():
setattr(task, attr_name, attr_value)
if task.parent_task:
if task.parent_task.is_hidden:
task.is_hidden = True
task.save()
for subtask in Task.objects.filter(parent_task=task):
subtask.is_hidden = task.is_hidden
subtask.save()
children = params['children']
for course_task in Task.objects.filter(course=course):
if children and course_task.id in map(int, children):
course_task.parent_task = task
course_task.save()
elif course_task.parent_task == task:
course_task.parent_task = None
course_task.save()
task_groups = params['groups']
task.groups = task_groups
task.set_position_in_new_group(task_groups)
if task_id and changed_score_after_deadline and task.parent_task:
student_ids = User.objects.filter(group__in=task_groups).values_list('id', flat=True)
for student_id in student_ids:
parent_issue, created = Issue.objects.get_or_create(task_id=task.parent_task.id, student_id=student_id)
total_mark = Issue.objects \
.filter(task=task, student_id=student_id) \
.exclude(task__is_hiddne=True) \
.exclude(
task__score_after_deadline=False,
status_field__tag=IssueStatus.STATUS_ACCEPTED_AFTER_DEADLINE) \
.aggregate(Sum('mark'))['mark__sum'] or 0
if task.score_after_deadline:
parent_issue.mark += total_mark
else:
parent_issue.mark -= total_mark
parent_issue.set_status_seminar()
if task.type == task.TYPE_SEMINAR:
student_ids = User.objects.filter(group__in=task_groups).values_list('id', flat=True)
for student_id in student_ids:
issue, created = Issue.objects.get_or_create(task_id=task.id, student_id=student_id)
issue.mark = Issue.objects \
.filter(task__parent_task=task, student_id=student_id) \
.exclude(task__is_hidden=True) \
.exclude(
task__score_after_deadline=False,
status_field__tag=IssueStatus.STATUS_ACCEPTED_AFTER_DEADLINE) \
.aggregate(Sum('mark'))['mark__sum'] or 0
issue.set_status_seminar()
task.save()
reversion.set_user(request.user)
if task_id:
reversion.set_comment("Edit task")
else:
reversion.set_comment("Create task")
return HttpResponse(json.dumps({'page_title': task.get_title(lang) + ' | ' + course.name + ' | ' + str(course.year),
'redirect_page': '/task/edit/' + str(task.id) if not task_id else None}),
content_type="application/json")
@login_required
def get_contest_problems(request):
if request.method != 'POST':
return HttpResponseForbidden()
lang = request.user.profile.language
course = get_object_or_404(Course, id=request.POST['course_id'])
if not course.user_can_edit_course(request.user):
return HttpResponseForbidden()
contest_id = request.POST['contest_id']
is_error = False
error = ''
problems = []
got_info, contest_info = get_contest_info(contest_id, lang=lang)
if "You're not allowed to view this contest." in contest_info:
return HttpResponse(json.dumps({'problems': problems,
'is_error': True,
'error': _(u"net_prav_na_kontest")}),
content_type="application/json")
problem_req = requests.get(PROBLEMS_API.format(lang=lang, cont_id=str(contest_id)),
headers=HEADERS)
problem_req = problem_req.json()
if 'error' in problem_req:
is_error = True
if 'IndexOutOfBoundsException' in problem_req['error']['name']:
error = _(u'kontesta_ne_sushestvuet')
else:
error = _(u'oshibka_kontesta') + ' ' + problem_req['error']['message']
else:
problems = problem_req['result']['problems']
contest_info_problems = contest_info['problems']
if 'endTime' in contest_info:
deadline_msk = datetime.datetime.strptime(contest_info['endTime'][:-9], '%Y-%m-%dT%H:%M:%S')
contest_info_deadline = convert_datetime(deadline_msk, settings.CONTEST_TIME_ZONE,
request.user.profile.time_zone)
contest_info_deadline = contest_info_deadline.strftime('%Y,%m,%d,%H,%M')
else:
contest_info_deadline = None
problems = problems + contest_info_problems + [{'deadline': contest_info_deadline}]
return HttpResponse(json.dumps({'problems': problems,
'is_error': is_error,
'error': error}),
content_type="application/json")
@login_required
def contest_task_import(request):
if not request.method == 'POST':
return HttpResponseForbidden()
course_id = int(request.POST['course_id'])
course = get_object_or_404(Course, id=course_id)
contest_id = int(request.POST['contest_id_for_task'])
tasks = []
common_params = get_task_params(request, course.issue_status_system.has_accepted_after_deadline())
got_info, contest_info = get_contest_info(contest_id)
problem_req = requests.get(PROBLEMS_API.format(lang='ru', cont_id=str(contest_id)),
headers=HEADERS)
problems = []
if 'result' in problem_req.json():
problems = problem_req.json()['result']['problems']
problems_with_score = {problem['id']: problem.get('score') for problem in problems}
if got_info:
if problems:
sort_order = [problem['id'] for problem in problems]
contest_info['problems'].sort(key=lambda x: sort_order.index(x['problemId']))
contest_problems = dict(request.POST)['contest_problems[]']
for problem in contest_info['problems']:
if problem['problemId'] in contest_problems:
current_params = common_params['attrs'].copy()
current_params.update({
'title': problem['problemTitle'],
'task_text': problem['statement'],
'short_title': current_params['short_title'] or problem['alias'],
'contest_integrated': True,
'contest_id': contest_id,
'problem_id': problem['alias']
})
if not current_params['score_max'] and problems_with_score:
current_params['score_max'] = problems_with_score[problem['problemId']] or 0
if not current_params['deadline_time'] and 'endTime' in contest_info:
current_params['deadline_time'] = make_aware(
datetime.datetime.strptime(contest_info['endTime'][:-12], '%Y-%m-%dT%H:%M'),
timezone(settings.CONTEST_TIME_ZONE)
)
tasks.append(current_params)
elif "You're not allowed to view this contest." in contest_info:
return HttpResponse(json.dumps({'is_error': True,
'error': _(u"net_prav_na_kontest")}),
content_type="application/json")
else:
return HttpResponseForbidden()
if not course.user_can_edit_course(request.user):
return HttpResponseForbidden()
for task in tasks:
real_task = Task()
real_task.course = course
for attr_name, attr_value in task.items():
setattr(real_task, attr_name, attr_value)
real_task.save()
task_groups = common_params['groups']
real_task.groups = task_groups
real_task.set_position_in_new_group(task_groups)
reversion.set_user(request.user)
reversion.set_comment("Import task")
return HttpResponse("OK")
def get_task_text_popup(request, task_id):
task = get_object_or_404(Task, id=task_id)
context = {
'task': task,
}
return render(request, 'task_text_popup.html', context)
@login_required
@require_http_methods(['GET'])
def validate_nb_assignment_name(request):
name = request.GET['nb_assignment_name']
task_exists = Task.objects.filter(nb_assignment_name=name).exists()
if not task_exists:
return HttpResponse(json.dumps(True))
# else:
# return HttpResponse(_('Assignment {} already exists'.format(name)))
return HttpResponse(json.dumps(False))
|
|
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands that can be used to operate on skills."""
from __future__ import annotations
import collections
import logging
from core import feconf
from core.constants import constants
from core.domain import caching_services
from core.domain import config_domain
from core.domain import html_cleaner
from core.domain import opportunity_services
from core.domain import role_services
from core.domain import skill_domain
from core.domain import skill_fetchers
from core.domain import state_domain
from core.domain import suggestion_services
from core.domain import taskqueue_services
from core.domain import topic_domain
from core.domain import topic_fetchers
from core.domain import topic_services
from core.domain import user_services
from core.platform import models
(skill_models, user_models, question_models, topic_models) = (
models.Registry.import_models([
models.NAMES.skill, models.NAMES.user, models.NAMES.question,
models.NAMES.topic]))
datastore_services = models.Registry.import_datastore_services()
# Repository GET methods.
def get_merged_skill_ids():
"""Returns the skill IDs of skills that have been merged.
Returns:
list(str). List of skill IDs of merged skills.
"""
return [skill.id for skill in skill_models.SkillModel.get_merged_skills()]
def get_all_skill_summaries():
"""Returns the summaries of all skills present in the datastore.
Returns:
list(SkillSummary). The list of summaries of all skills present in the
datastore.
"""
skill_summaries_models = skill_models.SkillSummaryModel.get_all()
skill_summaries = [
get_skill_summary_from_model(summary)
for summary in skill_summaries_models]
return skill_summaries
def _get_skill_summaries_in_batches(
num_skills_to_fetch, urlsafe_start_cursor, sort_by):
"""Returns the summaries of skills present in the datastore.
Args:
num_skills_to_fetch: int. Number of skills to fetch.
urlsafe_start_cursor: str or None. The cursor to the next page.
sort_by: str. A string indicating how to sort the result.
Returns:
3-tuple(skill_summaries, new_urlsafe_start_cursor, more). where:
skill_summaries: list(SkillSummary). The list of skill summaries.
The number of returned skill summaries might include more than
the requested number. Hence, the cursor returned will represent
the point to which those results were fetched (and not the
"num_skills_to_fetch" point).
urlsafe_start_cursor: str or None. A query cursor pointing to the
next batch of results. If there are no more results, this might
be None.
more: bool. If True, there are (probably) more results after this
batch. If False, there are no further results after this batch.
"""
# The fetched skills will be filtered afterwards and filtering may result
# in having less number of skills than requested. Hence, fetching twice
# the number of requested skills will help reduce the number of datastore
# calls.
skill_summaries_models, new_urlsafe_start_cursor, more = (
skill_models.SkillSummaryModel.fetch_page(
2 * num_skills_to_fetch, urlsafe_start_cursor, sort_by))
skill_summaries = [
get_skill_summary_from_model(summary)
for summary in skill_summaries_models]
return skill_summaries, new_urlsafe_start_cursor, more
def get_filtered_skill_summaries(
num_skills_to_fetch, status, classroom_name, keywords,
sort_by, urlsafe_start_cursor):
"""Returns all the skill summary dicts after filtering.
Args:
num_skills_to_fetch: int. Number of skills to fetch.
status: str. The status of the skill.
classroom_name: str. The classroom_name of the topic to which the skill
is assigned to.
keywords: list(str). The keywords to look for
in the skill description.
sort_by: str. A string indicating how to sort the result.
urlsafe_start_cursor: str or None. The cursor to the next page.
Returns:
3-tuple(augmented_skill_summaries, new_urlsafe_start_cursor, more).
Where:
augmented_skill_summaries: list(AugmentedSkillSummary). The list of
augmented skill summaries. The number of returned skills might
include more than the requested number. Hence, the cursor
returned will represent the point to which those results were
fetched (and not the "num_skills_to_fetch" point).
new_urlsafe_start_cursor: str or None. A query cursor pointing to
the next batch of results. If there are no more results, this
might be None.
more: bool. If True, there are (probably) more results after this
batch. If False, there are no further results after this batch.
"""
augmented_skill_summaries = []
new_urlsafe_start_cursor = urlsafe_start_cursor
more = True
while len(augmented_skill_summaries) < num_skills_to_fetch and more:
augmented_skill_summaries_batch, new_urlsafe_start_cursor, more = (
_get_augmented_skill_summaries_in_batches(
num_skills_to_fetch, new_urlsafe_start_cursor, sort_by))
filtered_augmented_skill_summaries = _filter_skills_by_status(
augmented_skill_summaries_batch, status)
filtered_augmented_skill_summaries = _filter_skills_by_classroom(
filtered_augmented_skill_summaries, classroom_name)
filtered_augmented_skill_summaries = _filter_skills_by_keywords(
filtered_augmented_skill_summaries, keywords)
augmented_skill_summaries.extend(filtered_augmented_skill_summaries)
return augmented_skill_summaries, new_urlsafe_start_cursor, more
def _get_augmented_skill_summaries_in_batches(
num_skills_to_fetch, urlsafe_start_cursor, sort_by):
"""Returns all the Augmented skill summaries after attaching
topic and classroom.
Returns:
3-tuple(augmented_skill_summaries, urlsafe_start_cursor, more). Where:
augmented_skill_summaries: list(AugmentedSkillSummary). The list of
skill summaries.
urlsafe_start_cursor: str or None. A query cursor pointing to the
next batch of results. If there are no more results, this might
be None.
more: bool. If True, there are (probably) more results after this
batch. If False, there are no further results after this batch.
"""
skill_summaries, new_urlsafe_start_cursor, more = (
_get_skill_summaries_in_batches(
num_skills_to_fetch, urlsafe_start_cursor, sort_by))
assigned_skill_ids = collections.defaultdict(lambda: {
'topic_names': [],
'classroom_names': []
})
all_topic_models = topic_models.TopicModel.get_all()
all_topics = [topic_fetchers.get_topic_from_model(topic_model)
if topic_model is not None else None
for topic_model in all_topic_models]
topic_classroom_dict = {}
all_classrooms_dict = config_domain.CLASSROOM_PAGES_DATA.value
for classroom in all_classrooms_dict:
for topic_id in classroom['topic_ids']:
topic_classroom_dict[topic_id] = classroom['name']
for topic in all_topics:
for skill_id in topic.get_all_skill_ids():
assigned_skill_ids[skill_id]['topic_names'].append(topic.name)
assigned_skill_ids[skill_id]['classroom_names'].append(
topic_classroom_dict.get(topic.id, None))
augmented_skill_summaries = []
for skill_summary in skill_summaries:
topic_names = []
classroom_names = []
if skill_summary.id in assigned_skill_ids:
topic_names = assigned_skill_ids[skill_summary.id]['topic_names']
classroom_names = (
assigned_skill_ids[skill_summary.id]['classroom_names'])
augmented_skill_summary = skill_domain.AugmentedSkillSummary(
skill_summary.id,
skill_summary.description,
skill_summary.language_code,
skill_summary.version,
skill_summary.misconception_count,
skill_summary.worked_examples_count,
topic_names,
classroom_names,
skill_summary.skill_model_created_on,
skill_summary.skill_model_last_updated)
augmented_skill_summaries.append(augmented_skill_summary)
return augmented_skill_summaries, new_urlsafe_start_cursor, more
def _filter_skills_by_status(augmented_skill_summaries, status):
"""Returns the skill summary dicts after filtering by status.
Args:
augmented_skill_summaries: list(AugmentedSkillSummary). The list
of augmented skill summaries.
status: str. The status of the skill.
Returns:
list(AugmentedSkillSummary). The list of AugmentedSkillSummaries
matching the given status.
"""
if status is None or status == constants.SKILL_STATUS_OPTIONS['ALL']:
return augmented_skill_summaries
elif status == constants.SKILL_STATUS_OPTIONS['UNASSIGNED']:
unassigned_augmented_skill_summaries = []
for augmented_skill_summary in augmented_skill_summaries:
if not augmented_skill_summary.topic_names:
unassigned_augmented_skill_summaries.append(
augmented_skill_summary)
return unassigned_augmented_skill_summaries
elif status == constants.SKILL_STATUS_OPTIONS['ASSIGNED']:
assigned_augmented_skill_summaries = []
for augmented_skill_summary in augmented_skill_summaries:
if augmented_skill_summary.topic_names:
assigned_augmented_skill_summaries.append(
augmented_skill_summary)
return assigned_augmented_skill_summaries
def _filter_skills_by_classroom(augmented_skill_summaries, classroom_name):
"""Returns the skill summary dicts after filtering by classroom_name.
Args:
augmented_skill_summaries: list(AugmentedSkillSummary).
The list of augmented skill summaries.
classroom_name: str. The classroom_name of the topic to which the skill
is assigned to.
Returns:
list(AugmentedSkillSummary). The list of augmented skill summaries with
the given classroom name.
"""
if classroom_name is None or classroom_name == 'All':
return augmented_skill_summaries
augmented_skill_summaries_with_classroom_name = []
for augmented_skill_summary in augmented_skill_summaries:
if classroom_name in augmented_skill_summary.classroom_names:
augmented_skill_summaries_with_classroom_name.append(
augmented_skill_summary)
return augmented_skill_summaries_with_classroom_name
def _filter_skills_by_keywords(augmented_skill_summaries, keywords):
"""Returns whether the keywords match the skill description.
Args:
augmented_skill_summaries: list(AugmentedSkillSummary). The augmented
skill summaries.
keywords: list(str). The keywords to match.
Returns:
list(AugmentedSkillSummary). The list of augmented skill summaries
matching the given keywords.
"""
if not keywords:
return augmented_skill_summaries
filtered_augmented_skill_summaries = []
for augmented_skill_summary in augmented_skill_summaries:
if any((augmented_skill_summary.description.lower().find(
keyword.lower()) != -1) for keyword in keywords):
filtered_augmented_skill_summaries.append(augmented_skill_summary)
return filtered_augmented_skill_summaries
def get_multi_skill_summaries(skill_ids):
"""Returns a list of skill summaries matching the skill IDs provided.
Args:
skill_ids: list(str). List of skill IDs to get skill summaries for.
Returns:
list(SkillSummary). The list of summaries of skills matching the
provided IDs.
"""
skill_summaries_models = skill_models.SkillSummaryModel.get_multi(skill_ids)
skill_summaries = [
get_skill_summary_from_model(skill_summary_model)
for skill_summary_model in skill_summaries_models
if skill_summary_model is not None]
return skill_summaries
def get_rubrics_of_skills(skill_ids):
"""Returns a list of rubrics corresponding to given skills.
Args:
skill_ids: list(str). The list of skill IDs.
Returns:
dict, list(str). The skill rubrics of skills keyed by their
corresponding ids and the list of deleted skill ids, if any.
"""
skills = skill_fetchers.get_multi_skills(skill_ids, strict=False)
skill_id_to_rubrics_dict = {}
for skill in skills:
if skill is not None:
rubric_dicts = [rubric.to_dict() for rubric in skill.rubrics]
skill_id_to_rubrics_dict[skill.id] = rubric_dicts
deleted_skill_ids = []
for skill_id in skill_ids:
if skill_id not in skill_id_to_rubrics_dict:
skill_id_to_rubrics_dict[skill_id] = None
deleted_skill_ids.append(skill_id)
return skill_id_to_rubrics_dict, deleted_skill_ids
def get_descriptions_of_skills(skill_ids):
"""Returns a list of skill descriptions corresponding to the given skills.
Args:
skill_ids: list(str). The list of skill ids.
Returns:
dict, list(str). The skill descriptions of skills keyed by their
corresponding ids and the list of deleted skill ids, if any.
"""
skill_summaries = get_multi_skill_summaries(skill_ids)
skill_id_to_description_dict = {}
for skill_summary in skill_summaries:
if skill_summary is not None:
skill_id_to_description_dict[skill_summary.id] = (
skill_summary.description)
deleted_skill_ids = []
for skill_id in skill_ids:
if skill_id not in skill_id_to_description_dict:
skill_id_to_description_dict[skill_id] = None
deleted_skill_ids.append(skill_id)
return skill_id_to_description_dict, deleted_skill_ids
def get_skill_summary_from_model(skill_summary_model):
"""Returns a domain object for an Oppia skill summary given a
skill summary model.
Args:
skill_summary_model: SkillSummaryModel. The skill summary model object
to get corresponding domain object.
Returns:
SkillSummary. The domain object corresponding to given skill summmary
model.
"""
return skill_domain.SkillSummary(
skill_summary_model.id, skill_summary_model.description,
skill_summary_model.language_code,
skill_summary_model.version,
skill_summary_model.misconception_count,
skill_summary_model.worked_examples_count,
skill_summary_model.skill_model_created_on,
skill_summary_model.skill_model_last_updated
)
def get_image_filenames_from_skill(skill):
"""Get the image filenames from the skill.
Args:
skill: Skill. The skill itself.
Returns:
list(str). List containing the name of the image files in skill.
"""
html_list = skill.get_all_html_content_strings()
return html_cleaner.get_image_filenames_from_html_strings(html_list)
def get_all_topic_assignments_for_skill(skill_id):
"""Returns a list containing all the topics to which the given skill is
assigned along with topic details.
Args:
skill_id: str. ID of the skill.
Returns:
list(TopicAssignment). A list of TopicAssignment domain objects.
"""
topic_assignments = []
topics = topic_fetchers.get_all_topics()
for topic in topics:
if skill_id in topic.get_all_skill_ids():
subtopic_id = None
for subtopic in topic.subtopics:
if skill_id in subtopic.skill_ids:
subtopic_id = subtopic.id
break
topic_assignments.append(skill_domain.TopicAssignment(
topic.id, topic.name, topic.version, subtopic_id))
return topic_assignments
def replace_skill_id_in_all_topics(user_id, old_skill_id, new_skill_id):
"""Replaces the old skill id with the new one in all the associated topics.
Args:
user_id: str. The unique user ID of the user.
old_skill_id: str. The old skill id.
new_skill_id: str. The new skill id.
Raises:
Exception. The new skill already present.
"""
all_topics = topic_fetchers.get_all_topics()
for topic in all_topics:
change_list = []
if old_skill_id in topic.get_all_skill_ids():
if new_skill_id in topic.get_all_skill_ids():
raise Exception(
'Found topic \'%s\' contains the two skills to be merged. '
'Please unassign one of these skills from topic '
'and retry this operation.' % topic.name)
if old_skill_id in topic.uncategorized_skill_ids:
change_list.extend([topic_domain.TopicChange({
'cmd': 'remove_uncategorized_skill_id',
'uncategorized_skill_id': old_skill_id
}), topic_domain.TopicChange({
'cmd': 'add_uncategorized_skill_id',
'new_uncategorized_skill_id': new_skill_id
})])
for subtopic in topic.subtopics:
if old_skill_id in subtopic.skill_ids:
change_list.extend([topic_domain.TopicChange({
'cmd': topic_domain.CMD_REMOVE_SKILL_ID_FROM_SUBTOPIC,
'subtopic_id': subtopic.id,
'skill_id': old_skill_id
}), topic_domain.TopicChange({
'cmd': 'remove_uncategorized_skill_id',
'uncategorized_skill_id': old_skill_id
}), topic_domain.TopicChange({
'cmd': 'add_uncategorized_skill_id',
'new_uncategorized_skill_id': new_skill_id
}), topic_domain.TopicChange({
'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC,
'old_subtopic_id': None,
'new_subtopic_id': subtopic.id,
'skill_id': new_skill_id
})])
break
topic_services.update_topic_and_subtopic_pages(
user_id, topic.id, change_list,
'Replace skill id %s with skill id %s in the topic' % (
old_skill_id, new_skill_id))
def remove_skill_from_all_topics(user_id, skill_id):
"""Deletes the skill with the given id from all the associated topics.
Args:
user_id: str. The unique user ID of the user.
skill_id: str. ID of the skill.
"""
all_topics = topic_fetchers.get_all_topics()
for topic in all_topics:
change_list = []
if skill_id in topic.get_all_skill_ids():
for subtopic in topic.subtopics:
if skill_id in subtopic.skill_ids:
change_list.append(topic_domain.TopicChange({
'cmd': 'remove_skill_id_from_subtopic',
'subtopic_id': subtopic.id,
'skill_id': skill_id
}))
break
change_list.append(topic_domain.TopicChange({
'cmd': 'remove_uncategorized_skill_id',
'uncategorized_skill_id': skill_id
}))
skill_name = get_skill_summary_by_id(skill_id).description
topic_services.update_topic_and_subtopic_pages(
user_id, topic.id, change_list,
'Removed skill with id %s and name %s from the topic' % (
skill_id, skill_name))
def get_skill_summary_by_id(skill_id, strict=True):
"""Returns a domain object representing a skill summary.
Args:
skill_id: str. ID of the skill summary.
strict: bool. Whether to fail noisily if no skill summary with the given
id exists in the datastore.
Returns:
SkillSummary. The skill summary domain object corresponding to a skill
with the given skill_id.
"""
skill_summary_model = skill_models.SkillSummaryModel.get(
skill_id, strict=strict)
if skill_summary_model:
skill_summary = get_skill_summary_from_model(
skill_summary_model)
return skill_summary
else:
return None
def get_new_skill_id():
"""Returns a new skill id.
Returns:
str. A new skill id.
"""
return skill_models.SkillModel.get_new_id('')
def _create_skill(committer_id, skill, commit_message, commit_cmds):
"""Creates a new skill.
Args:
committer_id: str. ID of the committer.
skill: Skill. The skill domain object.
commit_message: str. A description of changes made to the skill.
commit_cmds: list(SkillChange). A list of change commands made to the
given skill.
"""
skill.validate()
model = skill_models.SkillModel(
id=skill.id,
description=skill.description,
language_code=skill.language_code,
misconceptions=[
misconception.to_dict()
for misconception in skill.misconceptions
],
rubrics=[
rubric.to_dict()
for rubric in skill.rubrics
],
skill_contents=skill.skill_contents.to_dict(),
next_misconception_id=skill.next_misconception_id,
misconceptions_schema_version=skill.misconceptions_schema_version,
rubric_schema_version=skill.rubric_schema_version,
skill_contents_schema_version=skill.skill_contents_schema_version,
superseding_skill_id=skill.superseding_skill_id,
all_questions_merged=skill.all_questions_merged,
prerequisite_skill_ids=skill.prerequisite_skill_ids
)
commit_cmd_dicts = [commit_cmd.to_dict() for commit_cmd in commit_cmds]
model.commit(committer_id, commit_message, commit_cmd_dicts)
skill.version += 1
create_skill_summary(skill.id)
opportunity_services.create_skill_opportunity(
skill.id,
skill.description)
def does_skill_with_description_exist(description):
"""Checks if skill with provided description exists.
Args:
description: str. The description for the skill.
Returns:
bool. Whether the the description for the skill exists.
"""
existing_skill = (
skill_fetchers.get_skill_by_description(description))
return existing_skill is not None
def save_new_skill(committer_id, skill):
"""Saves a new skill.
Args:
committer_id: str. ID of the committer.
skill: Skill. Skill to be saved.
"""
commit_message = 'New skill created.'
_create_skill(
committer_id, skill, commit_message, [skill_domain.SkillChange({
'cmd': skill_domain.CMD_CREATE_NEW
})])
def apply_change_list(skill_id, change_list, committer_id):
"""Applies a changelist to a skill and returns the result.
Args:
skill_id: str. ID of the given skill.
change_list: list(SkillChange). A change list to be applied to the given
skill.
committer_id: str. The ID of the committer of this change list.
Returns:
Skill. The resulting skill domain object.
Raises:
Exception. The user does not have enough rights to edit the
skill description.
Exception. Invalid change dict.
"""
skill = skill_fetchers.get_skill_by_id(skill_id)
user = user_services.get_user_actions_info(committer_id)
try:
for change in change_list:
if change.cmd == skill_domain.CMD_UPDATE_SKILL_PROPERTY:
if (change.property_name ==
skill_domain.SKILL_PROPERTY_DESCRIPTION):
if role_services.ACTION_EDIT_SKILL_DESCRIPTION not in (
user.actions):
raise Exception(
'The user does not have enough rights to edit the '
'skill description.')
skill.update_description(change.new_value)
(
opportunity_services
.update_skill_opportunity_skill_description(
skill.id, change.new_value))
elif (change.property_name ==
skill_domain.SKILL_PROPERTY_LANGUAGE_CODE):
skill.update_language_code(change.new_value)
elif (change.property_name ==
skill_domain.SKILL_PROPERTY_SUPERSEDING_SKILL_ID):
skill.update_superseding_skill_id(change.new_value)
elif (change.property_name ==
skill_domain.SKILL_PROPERTY_ALL_QUESTIONS_MERGED):
skill.record_that_all_questions_are_merged(change.new_value)
elif change.cmd == skill_domain.CMD_UPDATE_SKILL_CONTENTS_PROPERTY:
if (change.property_name ==
skill_domain.SKILL_CONTENTS_PROPERTY_EXPLANATION):
explanation = (
state_domain.SubtitledHtml.from_dict(change.new_value))
explanation.validate()
skill.update_explanation(explanation)
elif (change.property_name ==
skill_domain.SKILL_CONTENTS_PROPERTY_WORKED_EXAMPLES):
worked_examples_list = [
skill_domain.WorkedExample.from_dict(worked_example)
for worked_example in change.new_value]
skill.update_worked_examples(worked_examples_list)
elif change.cmd == skill_domain.CMD_ADD_SKILL_MISCONCEPTION:
misconception = skill_domain.Misconception.from_dict(
change.new_misconception_dict)
skill.add_misconception(misconception)
elif change.cmd == skill_domain.CMD_DELETE_SKILL_MISCONCEPTION:
skill.delete_misconception(change.misconception_id)
elif change.cmd == skill_domain.CMD_ADD_PREREQUISITE_SKILL:
skill.add_prerequisite_skill(change.skill_id)
elif change.cmd == skill_domain.CMD_DELETE_PREREQUISITE_SKILL:
skill.delete_prerequisite_skill(change.skill_id)
elif change.cmd == skill_domain.CMD_UPDATE_RUBRICS:
skill.update_rubric(
change.difficulty, change.explanations)
elif (change.cmd ==
skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY):
if (change.property_name ==
skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_NAME):
skill.update_misconception_name(
change.misconception_id, change.new_value)
elif (change.property_name ==
skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_NOTES):
skill.update_misconception_notes(
change.misconception_id, change.new_value)
elif (change.property_name ==
skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_FEEDBACK):
skill.update_misconception_feedback(
change.misconception_id, change.new_value)
elif (change.property_name ==
skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_MUST_BE_ADDRESSED): # pylint: disable=line-too-long
skill.update_misconception_must_be_addressed(
change.misconception_id, change.new_value)
else:
raise Exception('Invalid change dict.')
elif (change.cmd in (
skill_domain.CMD_MIGRATE_CONTENTS_SCHEMA_TO_LATEST_VERSION,
skill_domain.CMD_MIGRATE_MISCONCEPTIONS_SCHEMA_TO_LATEST_VERSION, # pylint: disable=line-too-long
skill_domain.CMD_MIGRATE_RUBRICS_SCHEMA_TO_LATEST_VERSION
)):
# Loading the skill model from the datastore into a
# skill domain object automatically converts it to use the
# latest schema version. As a result, simply resaving the
# skill is sufficient to apply the schema migration.
continue
return skill
except Exception as e:
logging.error(
'%s %s %s %s' % (
e.__class__.__name__, e, skill_id, change_list)
)
raise e
def populate_skill_model_fields(skill_model, skill):
"""Populate skill model with the data from skill object.
Args:
skill_model: SkillModel. The model to populate.
skill: Skill. The skill domain object which should be used to
populate the model.
Returns:
SkillModel. Populated model.
"""
skill_model.description = skill.description
skill_model.language_code = skill.language_code
skill_model.superseding_skill_id = skill.superseding_skill_id
skill_model.all_questions_merged = skill.all_questions_merged
skill_model.prerequisite_skill_ids = skill.prerequisite_skill_ids
skill_model.misconceptions_schema_version = (
skill.misconceptions_schema_version)
skill_model.rubric_schema_version = (
skill.rubric_schema_version)
skill_model.skill_contents_schema_version = (
skill.skill_contents_schema_version)
skill_model.skill_contents = skill.skill_contents.to_dict()
skill_model.misconceptions = [
misconception.to_dict() for misconception in skill.misconceptions
]
skill_model.rubrics = [
rubric.to_dict() for rubric in skill.rubrics
]
skill_model.next_misconception_id = skill.next_misconception_id
return skill_model
def _save_skill(committer_id, skill, commit_message, change_list):
"""Validates a skill and commits it to persistent storage. If
successful, increments the version number of the incoming skill domain
object by 1.
Args:
committer_id: str. ID of the given committer.
skill: Skill. The skill domain object to be saved.
commit_message: str. The commit message.
change_list: list(SkillChange). List of changes applied to a skill.
Raises:
Exception. The skill model and the incoming skill domain object have
different version numbers.
Exception. Received invalid change list.
"""
if not change_list:
raise Exception(
'Unexpected error: received an invalid change list when trying to '
'save skill %s: %s' % (skill.id, change_list))
skill.validate()
# Skill model cannot be None as skill is passed as parameter here and that
# is only possible if a skill model with that skill id exists.
skill_model = skill_models.SkillModel.get(
skill.id, strict=False)
if skill.version > skill_model.version:
raise Exception(
'Unexpected error: trying to update version %s of skill '
'from version %s. Please reload the page and try again.'
% (skill_model.version, skill.version))
if skill.version < skill_model.version:
raise Exception(
'Trying to update version %s of skill from version %s, '
'which is too old. Please reload the page and try again.'
% (skill_model.version, skill.version))
skill_model = populate_skill_model_fields(skill_model, skill)
change_dicts = [change.to_dict() for change in change_list]
skill_model.commit(committer_id, commit_message, change_dicts)
caching_services.delete_multi(
caching_services.CACHE_NAMESPACE_SKILL, None, [skill.id])
skill.version += 1
def update_skill(committer_id, skill_id, change_list, commit_message):
"""Updates a skill. Commits changes.
Args:
committer_id: str. The id of the user who is performing the update
action.
skill_id: str. The skill id.
change_list: list(SkillChange). These changes are applied in sequence to
produce the resulting skill.
commit_message: str or None. A description of changes made to the
skill. For published skills, this must be present; for
unpublished skills, it may be equal to None.
Raises:
ValueError. No commit message was provided.
"""
if not commit_message:
raise ValueError(
'Expected a commit message, received none.')
skill = apply_change_list(skill_id, change_list, committer_id)
_save_skill(committer_id, skill, commit_message, change_list)
create_skill_summary(skill.id)
misconception_is_deleted = any(
change.cmd == skill_domain.CMD_DELETE_SKILL_MISCONCEPTION
for change in change_list
)
if misconception_is_deleted:
deleted_skill_misconception_ids = [
skill.generate_skill_misconception_id(change.misconception_id)
for change in change_list
if change.cmd == skill_domain.CMD_DELETE_SKILL_MISCONCEPTION
]
taskqueue_services.defer(
taskqueue_services.FUNCTION_ID_UNTAG_DELETED_MISCONCEPTIONS,
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS,
committer_id, skill_id, skill.description,
deleted_skill_misconception_ids)
def delete_skill(committer_id, skill_id, force_deletion=False):
"""Deletes the skill with the given skill_id.
Args:
committer_id: str. ID of the committer.
skill_id: str. ID of the skill to be deleted.
force_deletion: bool. If true, the skill and its history are fully
deleted and are unrecoverable. Otherwise, the skill and all
its history are marked as deleted, but the corresponding models are
still retained in the datastore. This last option is the preferred
one.
"""
skill_models.SkillModel.delete_multi(
[skill_id], committer_id, '', force_deletion=force_deletion)
# This must come after the skill is retrieved. Otherwise the memcache
# key will be reinstated.
caching_services.delete_multi(
caching_services.CACHE_NAMESPACE_SKILL, None, [skill_id])
# Delete the summary of the skill (regardless of whether
# force_deletion is True or not).
delete_skill_summary(skill_id)
opportunity_services.delete_skill_opportunity(skill_id)
suggestion_services.auto_reject_question_suggestions_for_skill_id(
skill_id)
def delete_skill_summary(skill_id):
"""Delete a skill summary model.
Args:
skill_id: str. ID of the skill whose skill summary is to
be deleted.
"""
skill_summary_model = (
skill_models.SkillSummaryModel.get(skill_id, False))
if skill_summary_model is not None:
skill_summary_model.delete()
def compute_summary_of_skill(skill):
"""Create a SkillSummary domain object for a given Skill domain
object and return it.
Args:
skill: Skill. The skill object, for which the summary is to be computed.
Returns:
SkillSummary. The computed summary for the given skill.
"""
skill_model_misconception_count = len(skill.misconceptions)
skill_model_worked_examples_count = len(
skill.skill_contents.worked_examples)
skill_summary = skill_domain.SkillSummary(
skill.id, skill.description, skill.language_code,
skill.version, skill_model_misconception_count,
skill_model_worked_examples_count,
skill.created_on, skill.last_updated
)
return skill_summary
def create_skill_summary(skill_id):
"""Creates and stores a summary of the given skill.
Args:
skill_id: str. ID of the skill.
"""
skill = skill_fetchers.get_skill_by_id(skill_id)
skill_summary = compute_summary_of_skill(skill)
save_skill_summary(skill_summary)
def populate_skill_summary_model_fields(skill_summary_model, skill_summary):
"""Populate skill summary model with the data from skill summary object.
Args:
skill_summary_model: SkillSummaryModel. The model to populate.
skill_summary: SkillSummary. The skill summary domain object which
should be used to populate the model.
Returns:
SkillSummaryModel. Populated model.
"""
skill_summary_dict = {
'description': skill_summary.description,
'language_code': skill_summary.language_code,
'version': skill_summary.version,
'misconception_count': skill_summary.misconception_count,
'worked_examples_count': skill_summary.worked_examples_count,
'skill_model_last_updated': skill_summary.skill_model_last_updated,
'skill_model_created_on': skill_summary.skill_model_created_on
}
if skill_summary_model is not None:
skill_summary_model.populate(**skill_summary_dict)
else:
skill_summary_dict['id'] = skill_summary.id
skill_summary_model = skill_models.SkillSummaryModel(
**skill_summary_dict)
return skill_summary_model
def save_skill_summary(skill_summary):
"""Save a skill summary domain object as a SkillSummaryModel
entity in the datastore.
Args:
skill_summary: SkillSummaryModel. The skill summary object to be saved
in the datastore.
"""
existing_skill_summary_model = (
skill_models.SkillSummaryModel.get_by_id(skill_summary.id))
skill_summary_model = populate_skill_summary_model_fields(
existing_skill_summary_model, skill_summary
)
skill_summary_model.update_timestamps()
skill_summary_model.put()
def create_user_skill_mastery(user_id, skill_id, degree_of_mastery):
"""Creates skill mastery of a user.
Args:
user_id: str. The user ID of the user for whom to create the model.
skill_id: str. The unique id of the skill.
degree_of_mastery: float. The degree of mastery of user in the skill.
"""
user_skill_mastery = skill_domain.UserSkillMastery(
user_id, skill_id, degree_of_mastery)
save_user_skill_mastery(user_skill_mastery)
def save_user_skill_mastery(user_skill_mastery):
"""Stores skill mastery of a user.
Args:
user_skill_mastery: dict. The user skill mastery model of a user.
"""
user_skill_mastery_model = user_models.UserSkillMasteryModel(
id=user_models.UserSkillMasteryModel.construct_model_id(
user_skill_mastery.user_id, user_skill_mastery.skill_id),
user_id=user_skill_mastery.user_id,
skill_id=user_skill_mastery.skill_id,
degree_of_mastery=user_skill_mastery.degree_of_mastery)
user_skill_mastery_model.update_timestamps()
user_skill_mastery_model.put()
def create_multi_user_skill_mastery(user_id, degrees_of_mastery):
"""Creates the mastery of a user in multiple skills.
Args:
user_id: str. The user ID of the user.
degrees_of_mastery: dict(str, float). The keys are the requested
skill IDs. The values are the corresponding mastery degree of
the user.
"""
user_skill_mastery_models = []
for skill_id, degree_of_mastery in degrees_of_mastery.items():
user_skill_mastery_models.append(user_models.UserSkillMasteryModel(
id=user_models.UserSkillMasteryModel.construct_model_id(
user_id, skill_id),
user_id=user_id, skill_id=skill_id,
degree_of_mastery=degree_of_mastery))
user_models.UserSkillMasteryModel.update_timestamps_multi(
user_skill_mastery_models)
user_models.UserSkillMasteryModel.put_multi(user_skill_mastery_models)
def get_user_skill_mastery(user_id, skill_id):
"""Fetches the mastery of user in a particular skill.
Args:
user_id: str. The user ID of the user.
skill_id: str. Unique id of the skill for which mastery degree is
requested.
Returns:
float or None. Mastery degree of the user for the requested skill, or
None if UserSkillMasteryModel does not exist for the skill.
"""
model_id = user_models.UserSkillMasteryModel.construct_model_id(
user_id, skill_id)
user_skill_mastery_model = user_models.UserSkillMasteryModel.get(
model_id, strict=False)
if not user_skill_mastery_model:
return None
return user_skill_mastery_model.degree_of_mastery
def get_multi_user_skill_mastery(user_id, skill_ids):
"""Fetches the mastery of user in multiple skills.
Args:
user_id: str. The user ID of the user.
skill_ids: list(str). Skill IDs of the skill for which mastery degree is
requested.
Returns:
dict(str, float|None). The keys are the requested skill IDs. The values
are the corresponding mastery degree of the user or None if
UserSkillMasteryModel does not exist for the skill.
"""
degrees_of_mastery = {}
model_ids = []
for skill_id in skill_ids:
model_ids.append(user_models.UserSkillMasteryModel.construct_model_id(
user_id, skill_id))
skill_mastery_models = user_models.UserSkillMasteryModel.get_multi(
model_ids)
for skill_id, skill_mastery_model in zip(skill_ids, skill_mastery_models):
if skill_mastery_model is None:
degrees_of_mastery[skill_id] = None
else:
degrees_of_mastery[skill_id] = skill_mastery_model.degree_of_mastery
return degrees_of_mastery
def skill_has_associated_questions(skill_id):
"""Returns whether or not any question has this skill attached.
Args:
skill_id: str. The skill ID of the user.
Returns:
bool. Whether any question has this skill attached.
"""
question_ids = (
question_models.QuestionSkillLinkModel.get_all_question_ids_linked_to_skill_id( # pylint: disable=line-too-long
skill_id))
return len(question_ids) > 0
def get_sorted_skill_ids(degrees_of_mastery):
"""Sort the dict based on the mastery value.
Args:
degrees_of_mastery: dict(str, float|None). Dict mapping
skill ids to mastery level. The mastery level can be
float or None.
Returns:
list. List of the initial skill id's based on the mastery level.
"""
skill_dict_with_float_value = {
skill_id: degree for skill_id, degree in degrees_of_mastery.items()
if degree is not None}
sorted_skill_ids_with_float_value = sorted(
skill_dict_with_float_value, key=skill_dict_with_float_value.get)
skill_ids_with_none_value = [
skill_id for skill_id, degree in degrees_of_mastery.items()
if degree is None]
sorted_skill_ids = (
skill_ids_with_none_value + sorted_skill_ids_with_float_value)
return sorted_skill_ids[:feconf.MAX_NUMBER_OF_SKILL_IDS]
def filter_skills_by_mastery(user_id, skill_ids):
"""Given a list of skill_ids, it returns a list of
feconf.MAX_NUMBER_OF_SKILL_IDS skill_ids in which the user has
the least mastery.(Please note that python 2.7 considers the None
type smaller than any value, so None types will be returned first)
Args:
user_id: str. The unique user ID of the user.
skill_ids: list(str). The skill_ids that are to be filtered.
Returns:
list(str). A list of the filtered skill_ids.
"""
degrees_of_mastery = get_multi_user_skill_mastery(user_id, skill_ids)
filtered_skill_ids = get_sorted_skill_ids(degrees_of_mastery)
# Arranges the skill_ids in the order as it was received.
arranged_filtered_skill_ids = []
for skill_id in skill_ids:
if skill_id in filtered_skill_ids:
arranged_filtered_skill_ids.append(skill_id)
return arranged_filtered_skill_ids
|
|
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import ascii, bytes, chr, dict, filter, hex, input, int, map, next, oct, open, pow, range, round, str, super, zip
import configparser
import collections
import numpy as np
import scipy.stats
import time
import timeit
import json
import sys
from lib.langmod_tools import *
from lib.seqs import *
from lib.dates import *
from lib.files import *
from architecture_list import *
calc_prb = True
calc_gen = True
calc_div = True
calc_ret = True
cfg = configparser.ConfigParser()
cfg.read('config.ini')
raw_input_data_dir = cfg.get('DIRS', 'RawInputDataDir')
mscoco_dir = cfg.get('DIRS', 'MSCOCODir')
processed_input_data_dir = cfg.get('DIRS', 'ProcessedInputDataDir')
prb_generated_data_dir = cfg.get('DIRS', 'PrbGeneratedDataDir')
gen_generated_data_dir = cfg.get('DIRS', 'GenGeneratedDataDir')
ret_generated_data_dir = cfg.get('DIRS', 'RetGeneratedDataDir')
results_dir = cfg.get('DIRS', 'ResultsDir')
num_runs = cfg.getint('TRAIN', 'NumRuns')
create_dir(results_dir)
sys.path.append(mscoco_dir)
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
################################################################
print('============================================')
print('Loading processed data...')
print()
sys.stdout.flush()
with open(processed_input_data_dir+'/test_humancaptions.txt', 'r', encoding='utf-8') as f:
test_humancaptions = [ caption.split(' ') for caption in f.read().strip().split('\n') ]
with open(processed_input_data_dir+'/vocabulary.txt', 'r', encoding='utf-8') as f:
num_known_tokens = len(f.read().split('\n'))
################################################################
print('============================================')
print('Calculating results...')
print()
sys.stdout.flush()
################################################################
if calc_prb:
print('============================================')
print('Probability measures')
print()
sys.stdout.flush()
with open(results_dir+'/results_prb.txt', 'w', encoding='utf-8') as f_out:
print('architecture', 'rnn', 'run', 'pplx_geomean', 'pplx_artmean', 'pplx_median', 'prob_geomean', 'prob_artmean', 'prob_median', sep='\t', file=f_out)
for run in range(1, num_runs+1):
for (architecture_name, rnn_name) in testable_architectures:
if not file_exists(prb_generated_data_dir+'/{}_{}_{}.txt'.format(rnn_name, architecture_name, run)):
print('SKIPPING (no data):', 'run {0:>2} - {1:<8} {2:<30}'.format(run, rnn_name, architecture_name))
continue
run_start_time = timeit.default_timer()
print('run {0:>2} - {1:<8} {2:<30} | {3}'.format(run, rnn_name, architecture_name, time.strftime("%Y/%m/%d %H:%M:%S")))
sys.stdout.flush()
with open(prb_generated_data_dir+'/{}_{}_{}.txt'.format(rnn_name, architecture_name, run), 'r', encoding='utf-8') as f:
caption_probs = list()
caption_pplxs = list()
for line in f:
token_probs = [ float(x) for x in line.strip().split('\t') ]
caption_prob = sequence_probability(token_probs)
caption_probs.append(caption_prob)
caption_pplx = sequence_perplexity(token_probs)
caption_pplxs.append(caption_pplx)
prob_artmean = np.mean(caption_probs, dtype=np.float64)
prob_geomean = scipy.stats.gmean(caption_probs, dtype=np.float64)
prob_median = np.median(caption_probs)
pplx_artmean = np.mean(caption_pplxs, dtype=np.float64)
pplx_geomean = scipy.stats.gmean(caption_pplxs, dtype=np.float64)
pplx_median = np.median(caption_pplxs)
prb_result = [ pplx_geomean, pplx_artmean, pplx_median, prob_geomean, prob_artmean, prob_median ]
print(*[ str(x) for x in [architecture_name, rnn_name, run]+prb_result ], sep='\t', file=f_out)
run_end_time = timeit.default_timer()
print(format_duration(round(run_end_time-run_start_time)))
print()
sys.stdout.flush()
################################################################
if calc_gen:
print('============================================')
print('Generation measures')
print()
sys.stdout.flush()
with open(results_dir+'/results_gen.txt', 'w', encoding='utf-8') as f_out:
print('architecture', 'rnn', 'run', 'cider', 'bleu1', 'bleu2', 'bleu3', 'bleu4', 'meteor', 'rougel', 'wrong_word_pos', sep='\t', file=f_out)
for run in range(1, num_runs+1):
for (architecture_name, rnn_name) in testable_architectures:
if not file_exists(gen_generated_data_dir+'/{}_{}_{}.txt'.format(rnn_name, architecture_name, run)):
print('SKIPPING (no data):', 'run {0:>2} - {1:<8} {2:<30}'.format(run, rnn_name, architecture_name))
continue
run_start_time = timeit.default_timer()
print('run {0:>2} - {1:<8} {2:<30} | {3}'.format(run, rnn_name, architecture_name, time.strftime("%Y/%m/%d %H:%M:%S")))
sys.stdout.flush()
with open(gen_generated_data_dir+'/{}_{}_{}.txt'.format(rnn_name, architecture_name, run), 'r', encoding='utf-8') as f:
generated_captions = f.read().strip().split('\n')
with open(mscoco_dir+'/results/generated_captions.json', 'w', encoding='utf-8') as f_out_tmp:
print(str(json.dumps([
{
'image_id': image_id,
'caption': caption
}
for (image_id, caption) in enumerate(generated_captions)
])), file=f_out_tmp)
coco = COCO(mscoco_dir+'/annotations/captions.json')
cocoRes = coco.loadRes(mscoco_dir+'/results/generated_captions.json')
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.evaluate()
gen_result = [ cocoEval.eval[metric] for metric in [ 'CIDEr', 'Bleu_1', 'Bleu_2', 'Bleu_3', 'Bleu_4', 'METEOR', 'ROUGE_L' ] ]
print(*[ str(x) for x in [architecture_name, rnn_name, run]+gen_result ], sep='\t', file=f_out)
run_end_time = timeit.default_timer()
print(format_duration(round(run_end_time-run_start_time)))
print()
sys.stdout.flush()
################################################################
if calc_div:
print('============================================')
print('Diversity measures')
print()
sys.stdout.flush()
with open(results_dir+'/results_div.txt', 'w', encoding='utf-8') as f_out:
print('architecture', 'rnn', 'run', 'known_vocab_used', 'unigram_entropy', 'bigram_entropy', sep='\t', file=f_out)
for run in range(1, num_runs+1):
for (architecture_name, rnn_name) in testable_architectures:
if not file_exists(gen_generated_data_dir+'/{}_{}_{}.txt'.format(rnn_name, architecture_name, run)):
print('SKIPPING (no data):', 'run {0:>2} - {1:<8} {2:<30}'.format(run, rnn_name, architecture_name))
continue
run_start_time = timeit.default_timer()
print('run {0:>2} - {1:<8} {2:<30} | {3}'.format(run, rnn_name, architecture_name, time.strftime("%Y/%m/%d %H:%M:%S")))
sys.stdout.flush()
with open(gen_generated_data_dir+'/{}_{}_{}.txt'.format(rnn_name, architecture_name, run), 'r', encoding='utf-8') as f:
unigram_freqs = collections.defaultdict(lambda:0)
bigram_freqs = collections.defaultdict(lambda:0)
for line in f:
caption = line.strip().split(' ')
for unigram in caption:
unigram_freqs[unigram] += 1
for bigram in get_bigrams(caption):
bigram_freqs[bigram] += 1
known_vocab_used = len(unigram_freqs) / num_known_tokens
unigram_freqs = np.array(list(unigram_freqs.values()))
unigram_probs = unigram_freqs/unigram_freqs.sum()
unigram_entropy = -(unigram_probs*np.log2(unigram_probs)).sum()
bigram_freqs = np.array(list(bigram_freqs.values()))
bigram_probs = bigram_freqs/bigram_freqs.sum()
bigram_entropy = -(bigram_probs*np.log2(bigram_probs)).sum()
div_result = [ known_vocab_used, unigram_entropy, bigram_entropy ]
print(*[ str(x) for x in [architecture_name, rnn_name, run]+div_result ], sep='\t', file=f_out)
run_end_time = timeit.default_timer()
print(format_duration(round(run_end_time-run_start_time)))
print()
sys.stdout.flush()
for (name, skip) in [ ('human-one', 5), ('human-all', 1) ]:
run_start_time = timeit.default_timer()
print('run {0:>2} - {1:<8} {2:<30} | {3}'.format(1, '', name, time.strftime("%Y/%m/%d %H:%M:%S")))
sys.stdout.flush()
unigram_freqs = collections.defaultdict(lambda:0)
bigram_freqs = collections.defaultdict(lambda:0)
for caption in test_humancaptions[::skip]:
for unigram in caption:
unigram_freqs[unigram] += 1
for bigram in get_bigrams(caption):
bigram_freqs[bigram] += 1
known_vocab_used = len(unigram_freqs) / num_known_tokens
unigram_freqs = np.array(list(unigram_freqs.values()))
unigram_probs = unigram_freqs/unigram_freqs.sum()
unigram_entropy = -(unigram_probs*np.log2(unigram_probs)).sum()
bigram_freqs = np.array(list(bigram_freqs.values()))
bigram_probs = bigram_freqs/bigram_freqs.sum()
bigram_entropy = -(bigram_probs*np.log2(bigram_probs)).sum()
div_result = [ known_vocab_used, unigram_entropy, bigram_entropy ]
print(*[ str(x) for x in [name, '', 1]+div_result ], sep='\t', file=f_out)
run_end_time = timeit.default_timer()
print(format_duration(round(run_end_time-run_start_time)))
print()
sys.stdout.flush()
################################################################
if calc_ret:
print('============================================')
print('Retrieval measures')
print()
sys.stdout.flush()
with open(results_dir+'/results_ret.txt', 'w', encoding='utf-8') as f_out:
print('architecture', 'rnn', 'run', 'R@1', 'R@5', 'R@10', sep='\t', file=f_out)
for run in range(1, num_runs+1):
for (architecture_name, rnn_name) in testable_architectures:
if architecture_name == 'langmodel':
continue
if not file_exists(ret_generated_data_dir+'/{}_{}_{}.npy'.format(rnn_name, architecture_name, run)):
print('SKIPPING (no data):', 'run {0:>2} - {1:<8} {2:<30}'.format(run, rnn_name, architecture_name))
continue
run_start_time = timeit.default_timer()
print('run {0:>2} - {1:<8} {2:<30} | {3}'.format(run, rnn_name, architecture_name, time.strftime("%Y/%m/%d %H:%M:%S")))
sys.stdout.flush()
with open(ret_generated_data_dir+'/{}_{}_{}.npy'.format(rnn_name, architecture_name, run), 'rb') as f:
captionimages_probs = np.load(f)
(r1, r5, r10) = (0, 0, 0)
num_captions = 0
for (row_num, captionimage_probs) in enumerate(captionimages_probs):
target_index = row_num//5
ordered = sorted(range(len(captionimage_probs)), key=lambda i:captionimage_probs[i], reverse=True)
target_found = ordered.index(target_index)
if target_found < 1:
r1 += 1
r5 += 1
r10 += 1
elif target_found < 5:
r5 += 1
r10 += 1
elif target_found < 10:
r10 += 1
num_captions += 1
ret_result = [ r1/num_captions, r5/num_captions, r10/num_captions ]
print(*[ str(x) for x in [architecture_name, rnn_name, run]+ret_result ], sep='\t', file=f_out)
run_end_time = timeit.default_timer()
print(format_duration(round(run_end_time-run_start_time)))
print()
sys.stdout.flush()
print(' '*50, time.strftime('%Y/%m/%d %H:%M:%S'))
sys.stdout.flush()
|
|
# -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes.
Email: danaukes<at>seas.harvard.edu.
Please see LICENSE.txt for full license.
"""
import popupcad
import shapely.geometry
import numpy
import PySide.QtCore as qc
import PySide.QtGui as qg
import scipy.linalg
try: #Hack to ensure Python 2 & 3 support
import itertools.izip as zip
except ImportError:
pass
import shapely.geometry as sg
from popupcad.filetypes.genericshapebase import GenericShapeBase
class GenericLine(GenericShapeBase):
@classmethod
def condition_loop(cls,loop):
return cls._condition_loop(loop,remove_loop_reduncancy=False,remove_forward_redundancy=False)
def outputinteractive(self):
from popupcad.graphics2d.interactive import InteractiveLine
return InteractiveLine(self)
def outputstatic(self, *args, **kwargs):
from popupcad.graphics2d.static import StaticLine
return StaticLine(self, *args, **kwargs)
def gen_painterpath(self, exterior, interiors):
path = qg.QPainterPath()
path.addPolygon(self.generateQPolygon(exterior))
return path
def to_shapely(self):
exterior_p = self.exteriorpoints(scaling = popupcad.csg_processing_scaling)
obj = sg.LineString(exterior_p)
return obj
def segments(self):
return self.segments_open()
def output_dxf(self,model_space,layer = None):
dxfattribs = {}
if layer is not None:
dxfattribs['layer']=layer
model_space.add_lwpolyline(self.exteriorpoints(),dxfattribs = dxfattribs)
class GenericPolyline(GenericShapeBase):
@classmethod
def condition_loop(cls,loop):
return cls._condition_loop(loop,remove_loop_reduncancy=False)
@classmethod
def remove_redundant_points(cls, points, scaling=1):
return GenericShapeBase.remove_redundant_points(points,scaling,loop_test = False)
def outputinteractive(self):
from popupcad.graphics2d.interactive import InteractivePath
return InteractivePath(self)
def outputstatic(self, *args, **kwargs):
from popupcad.graphics2d.static import StaticPath
return StaticPath(self, *args, **kwargs)
def gen_painterpath(self, exterior, interiors):
path = qg.QPainterPath()
path.addPolygon(self.generateQPolygon(exterior))
return path
def to_shapely(self):
exterior_p = self.exteriorpoints(scaling = popupcad.csg_processing_scaling)
try:
obj = sg.LineString(exterior_p)
return obj
except ValueError as e:
if e.args[0]=='LineStrings must have at least 2 coordinate tuples':
return sg.LineString()
else:
raise
def segments(self):
return self.segments_open()
def fill(self):
polygons = []
for loop in [self.get_exterior()]+self.get_interiors():
newloop = [vertex.copy(identical = False) for vertex in loop]
polygons.append(GenericPoly(newloop,[],self.is_construction()))
return polygons
def output_dxf(self,model_space,layer = None):
dxfattribs = {}
if layer is not None:
dxfattribs['layer']=layer
model_space.add_lwpolyline(self.exteriorpoints(),dxfattribs = dxfattribs)
def addvertex_exterior(self, vertex, special=False):
self.addvertex_exterior_special(vertex,special)
class GenericPoly(GenericShapeBase):
def outputinteractive(self):
from popupcad.graphics2d.interactive import InteractivePoly
return InteractivePoly(self)
def outputstatic(self, *args, **kwargs):
from popupcad.graphics2d.static import StaticPoly
return StaticPoly(self, *args, **kwargs)
def gen_painterpath(self, exterior, interiors):
path = qg.QPainterPath()
for item in [exterior] + interiors:
path.addPolygon(self.generateQPolygon(item))
path.closeSubpath()
return path
def triangles3(self):
from pypoly2tri.shapes import Point
from pypoly2tri.cdt import CDT
# if you have poly2tri installed
# from p2t import Point
# from p2t import CDT
new = self.copy(identical = False)
new._condition(round_vertices=False,
test_rounded_vertices = True,
remove_forward_redundancy = True,
remove_loop_reduncancy = True,
terminate_with_start = False,
decimal_places = popupcad.geometry_round_value)
exterior = [Point(*point) for point in new.exteriorpoints(scaling = popupcad.triangulation_scaling)]
interiors = [[Point(*point) for point in interior]
for interior in new.interiorpoints(scaling = popupcad.triangulation_scaling)]
cdt = CDT(exterior)
[cdt.AddHole(interior) for interior in interiors]
# pypoly2tri code
cdt.Triangulate()
tris = [tri.toList() for tri in cdt.GetTriangles()]
# poly2tri code
# triangles = cdt.triangulate()
# tris = [[(tri.a.x, tri.a.y), (tri.b.x, tri.b.y), (tri.c.x, tri.c.y)]
# for tri in triangles]
tris = (numpy.array(tris)/popupcad.triangulation_scaling).tolist()
return tris
def to_shapely(self):
exterior_p = self.exteriorpoints(scaling = popupcad.csg_processing_scaling)
interiors_p = self.interiorpoints(scaling = popupcad.csg_processing_scaling)
obj = sg.Polygon(exterior_p, interiors_p)
return obj
def addvertex_exterior(self, vertex, special=False):
self.addvertex_exterior_special(vertex,special)
def segments(self):
return self.segments_closed()
def mass_properties(self,density,z_lower,z_upper,length_scaling = 1):
z_lower = z_lower*length_scaling/popupcad.SI_length_scaling
z_upper = z_upper*length_scaling/popupcad.SI_length_scaling
tris = numpy.array(self.triangles3())*length_scaling/popupcad.SI_length_scaling
shape = list(tris.shape)
shape[2]+=1
z_center = (z_lower+z_upper)/2
tris2 = numpy.ones(shape)
tris2[:,:,:2] = tris
areas = abs(numpy.array([scipy.linalg.det(tri) for tri in tris2])/2)
area = areas.sum()
tris2[:,:,2] = z_center
centroids = tris2.sum(1)/3
centroid = (areas*centroids.T).sum(1)/areas.sum()
thickness = z_upper - z_lower
volume = area*thickness
mass = volume*density
return area,centroid,volume,mass,tris
def inertia_tensor(self,about_point,density,z_lower,z_upper,tris):
z_lower = z_lower/popupcad.SI_length_scaling
z_upper = z_upper/popupcad.SI_length_scaling
import popupcad.algorithms.triangle as triangle
tris3 = [triangle.Triangle(*tri) for tri in tris]
tets = [tet for tri in tris3 for tet in tri.extrude(z_lower,z_upper)]
Is = numpy.array([tet.I(density,about_point) for tet in tets])
I = Is.sum(0)
return I
def hollow(self):
polylines = []
for loop in [self.get_exterior()]+self.get_interiors():
newloop = [vertex.copy(identical = False) for vertex in loop+loop[0:1]]
polylines.append(GenericPolyline(newloop,[],self.is_construction()))
return polylines
def output_dxf(self,model_space,layer = None):
exterior = self.exteriorpoints()
dxfattribs = {'closed':True}
if layer is not None:
dxfattribs['layer']=layer
model_space.add_lwpolyline(exterior,dxfattribs=dxfattribs)
for interior in self.interiorpoints():
dxfattribs = {'closed':True}
if layer is not None:
dxfattribs['layer']=layer
model_space.add_lwpolyline(interior,dxfattribs=dxfattribs)
#Gets the center
def get_center(self):
points = self.exteriorpoints()
x_values = [point[0]/popupcad.SI_length_scaling for point in points]
y_values = [point[1]/popupcad.SI_length_scaling for point in points]
x = float(sum(x_values)) / len(x_values)
y = float(sum(y_values)) / len(y_values)
return (x, y)
def exterior_points_from_center(self):
center = self.get_center()
points = self.exteriorpoints()
x_values = [point[0]/popupcad.SI_length_scaling - center[0] for point in points]
y_values = [point[1]/popupcad.SI_length_scaling - center[1] for point in points]
return list(zip(x_values, y_values))
def extrudeVertices(self, extrusion_factor, z0=0):
"""Extrudes the vertices of a shape and returns the three dimensional values
"""
a = self.triangles3()
vertices = []
for coord in a:
for dec in coord:
vertices.append(dec[0]) #x-axis
vertices.append(dec[1]) #y-axis
vertices.append(z0) #z-axis
for coord in a:
for dec in reversed(coord):
vertices.append(dec[0]) #x-axis
vertices.append(dec[1]) #y-axis
vertices.append(z0 + extrusion_factor) #z-axi
top_edges = self.exteriorpoints_3d(z0=z0)
bottom_edges = self.exteriorpoint_3d(z0=z0 + extrusion_factor)
sideTriangles = list(zip(top_edges, top_edges[1:] + top_edges[:1], bottom_edges))
sideTriangles2 = list(zip(bottom_edges[1:] + bottom_edges[:1], bottom_edges, top_edges[1:] + top_edges[:1]))
sideTriangles.extend(sideTriangles2)
sideTriangles = [list(triangle) for triangle in sideTriangles]
import itertools
sideTriangles = list(itertools.chain.from_iterable(sideTriangles))
sideTriangles = [list(point) for point in sideTriangles]
sideTriangles = list(itertools.chain.from_iterable(sideTriangles))
vertices.extend(sideTriangles)
return vertices
class GenericCircle(GenericShapeBase):
@classmethod
def condition_loop(cls,loop):
cls._condition_loop(loop,remove_loop_reduncancy=False,remove_forward_redundancy=False)
def outputinteractive(self):
from popupcad.graphics2d.interactive import InteractiveCircle
return InteractiveCircle(self)
def outputstatic(self, *args, **kwargs):
from popupcad.graphics2d.static import StaticCircle
return StaticCircle(self, *args, **kwargs)
def gen_painterpath(self, exterior, interiors):
path = qg.QPainterPath()
center = numpy.array(exterior[0])
edge = numpy.array(exterior[1])
v = edge - center
r = v.dot(v)**.5
point1 = center - r
point2 = center + r
point1 = qc.QPointF(*point1)
point2 = qc.QPointF(*point2)
rect = qc.QRectF(point1, point2)
path.addEllipse(rect)
return path
def to_shapely(self):
exterior_p = self.exteriorpoints(scaling = popupcad.csg_processing_scaling)
exterior = numpy.array(exterior_p)
center = exterior[0]
v = exterior[1] - exterior[0]
r = v.dot(v)**.5
obj = shapely.geometry.Point(*center).buffer(r)
obj = sg.Polygon(obj.boundary)
return obj
def segments(self):
return self.segments_closed()
class GenericTwoPointRect(GenericShapeBase):
@classmethod
def condition_loop(cls,loop):
return cls._condition_loop(loop,remove_loop_reduncancy=False,remove_forward_redundancy=False)
def outputinteractive(self):
from popupcad.graphics2d.interactive import InteractiveRect2Point
return InteractiveRect2Point(self)
def outputstatic(self, *args, **kwargs):
from popupcad.graphics2d.static import StaticRect2Point
return StaticRect2Point(self, *args, **kwargs)
def gen_painterpath(self, exterior, interiors):
path = qg.QPainterPath()
points = [qc.QPointF(*point) for point in exterior]
rect = qc.QRectF(*points)
path.addRect(rect)
return path
def to_shapely(self):
exterior_p = self.exteriorpoints(scaling = popupcad.csg_processing_scaling)
corner1 = exterior_p[0]
corner2 = (exterior_p[0][0], exterior_p[1][1])
corner3 = exterior_p[1]
corner4 = (exterior_p[1][0], exterior_p[0][1])
corners = [corner1, corner2, corner3, corner4]
obj = sg.Polygon(corners)
return obj
def segments(self):
return self.segments_closed()
if __name__=='__main__':
a = GenericPoly.gen_from_point_lists([[0,0],[0,1],[1,2],[2,1],[2,-1],[1,-2],[0,-1]],[])
#` area,centroid,I= a.mass_props(1,-.1,.1)\
z_lower = -.1
z_upper = .1
length_scaling = 1
density = 1
area,centroid,volume,mass,tris = a.mass_properties(density,z_lower ,z_upper,length_scaling)
about_point = centroid
I = a.inertia_tensor(about_point,density,z_lower,z_upper,tris)
area2 = a.trueArea()
print(area,area2)
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Miscellaneous JAX helper functions."""
import functools
from typing import Any, Callable, Type, TypeVar, Union
import dataclasses
import flax
import jax
import jax.numpy as jnp
import numpy as np
jax.config.enable_omnistaging()
# Type alias for functions that handle NDArrays
NDArray = Union[np.ndarray, jnp.DeviceArray]
T = TypeVar("T")
@dataclasses.dataclass
class LeafPlaceholder:
"""Represents a dataclass tree leaf of a particular type.
The main purpose for a LeafPlaceholder object is to be a jax pytree leaf
that we can replace with some other concrete value of the appropriate type.
Attributes:
ty: The type annotation for the leaf.
"""
ty: Union[Type[Any], str]
# Support pickling, since types can't be pickled directly.
def __getstate__(self):
if isinstance(self.ty, str):
return self.ty
else:
return repr(self.ty)
def __setstate__(self, state):
self.ty = state
def synthesize_dataclass(ty):
"""Synthesize an instance of a dataclass.
Any fields of the dataclass that are also dataclasses will be recursively
synthesized as well. Types with a "default constructor" ty() will be
instantiated with that type, and other types (those for which ty() is a
TypeError, such as typing.Any or jax_util.NDArray) will be instantiated with a
LeafPlaceholder.
Args:
ty: Type to synthesize, usually a dataclass.
Returns:
Instance of the type, or a leaf placeholder.
"""
if dataclasses.is_dataclass(ty):
return ty(
**{
field.name: synthesize_dataclass(field.type)
for field in dataclasses.fields(ty)
})
else:
try:
return ty()
except TypeError:
return LeafPlaceholder(ty)
def vmap_with_kwargs(fun,
positional_axes=0,
out_axes=0,
**kwargs_axes):
"""Wrapper around jax.vmap that supports specifying axes for kwargs.
For instance, if we have
def foo(w, x, y, z):
...
then instead of writing `jax.vmap(foo, in_axes=(0, 1, (2, 3), None))` and
calling with only positional arguments, you can instead write
vmap_with_kwargs(foo, positional_axes=(0,), x_axis=1, y_axes=(2, 3))
and call it with x, y, and z as keyword arguments.
Args:
fun: Function to vmap.
positional_axes: Input axes for positional arguments; like `in_axes` for
jax.vmap. If not provided, all positional arguments will be vmapped across
their first dimension.
out_axes: Output axes; see jax.vmap.
**kwargs_axes: Input axes for keyword arguments, which works the same way as
in_axes does for positional arguments. Each keyword argument should have
the suffix "_axis" or "_axes", corresponding to the axis to vectorize the
keyword argument along. Any missing kwargs will be assumed to be
broadcasted (i.e. it is as if they were given an axis of None).
Returns:
Batched/vectorized version of `fun`; see jax.vmap. Note that the positional
and keyword arguments used to call this batched version must match the
positional and keyword axis specifications passed in to vmap_with_kwargs.
"""
known_kw_axes = {}
for keyword, value in kwargs_axes.items():
if not (keyword.endswith("_axis") or keyword.endswith("_axes")):
raise ValueError(
f"Keyword argument {keyword} does not end in '_axis' or '_axes'")
known_kw_axes[keyword[:-5]] = value
@functools.wraps(fun)
def apply(args, known_kw_axes, extra_kwargs):
return fun(*args, **known_kw_axes, **extra_kwargs)
mapped = jax.vmap(
apply, in_axes=(positional_axes, known_kw_axes, None), out_axes=out_axes)
@functools.wraps(mapped)
def wrapper(*args, **kwargs):
return mapped(args, {k: v for k, v in kwargs.items() if k in known_kw_axes},
{k: v for k, v in kwargs.items() if k not in known_kw_axes})
return wrapper
def np_or_jnp(arr):
"""Return either numpy or jax.numpy based on the type of arr."""
# See also https://numpy.org/neps/nep-0037-array-module.html
if isinstance(arr,
(jnp.DeviceArray, jax.core.UnshapedArray, jax.core.Tracer)):
return jnp
else:
return np
def pad_to(arr, size, axis = 0):
"""Pad one axis of an array to a specific size by adding zeros at the end.
Args:
arr: Array to pad.
size: Requested size of the axis to pad.
axis: Axis to pad.
Returns:
Version of arr padded with zeros along the requested axis.
"""
pad_widths = [[0, 0] if i != axis else [0, size - arr.shape[axis]]
for i in range(arr.ndim)]
return np_or_jnp(arr).pad(arr, pad_widths, mode="constant")
def register_dataclass_pytree(cls):
"""Register a dataclass as a JAX pytree and a flax serializable object.
This makes it so that wrapped dataclasses can be used as parameters inside
a JAX/flax model, and handled by core jax/flax functions. We assume that every
parameter of the dataclass is a JAX datatype that should be mapped over.
Differences from flax.struct.dataclass:
- Assumes dataclass wrapper has already been applied. This allows customizing
the creation of the dataclass before registering the object.
- Does not support `pytree_node` fields.
Args:
cls: Class to register as a pytree.
Returns:
The input argument (so that this can be used as a decorator).
Raises:
ValueError: If cls is not a dataclass.
"""
if not dataclasses.is_dataclass(cls):
raise ValueError(f"{cls} is not a dataclass. Perhaps you need to call "
"dataclasses.dataclass first?")
def to_shallow_dict(instance):
"""Returns a shallow-dict view of instance, with keys in field order."""
fields = dataclasses.fields(instance)
return {field.name: getattr(instance, field.name) for field in fields}
jax.tree_util.register_pytree_node(
cls,
lambda instance: (to_shallow_dict(instance).values(), None),
lambda _, values: cls(*values),
)
def to_state_dict(instance):
"""Returns a flax state dict for this instance."""
# Convert object to a shallow dict, then let flax do the rest.
return flax.serialization.to_state_dict(to_shallow_dict(instance))
def from_state_dict(representative, state_dict):
"""Returns an instance of the object with the given state dict."""
# Tell flax to restore to a shallow dict, then construct the object.
old_shallow = to_shallow_dict(representative)
new_shallow = flax.serialization.from_state_dict(old_shallow, state_dict)
return cls(**new_shallow)
flax.serialization.register_serialization_state(cls, to_state_dict,
from_state_dict)
return cls
@flax.deprecated.nn.module
def flax_tag(arr):
"""Wraps a value in a flax module, to inspect intermediate values."""
return arr
def force_physical_layout(operand):
"""Force the physical layout of `operand` to match its logical layout.
The return value of this function is identical to the argument, but is
guaranteed to have its physical layout match the order of dimensions in the
shape. The last dimension will be the minormost dimension (the one that
changes the fastest, and should be a multiple of 128 on TPU) and the first
dimension will be the majormost dimension (the one that changes the slowest).
Note that XLA may still insert copies before or after this operation, so it
doesn't guarantee that this layout will persist. However, it should serve as
a hint to encourage XLA to choose a good layout instead of a bad one, and
can be used to prevent a bad but required choice from propagating to other
values.
Args:
operand: Array to constrain.
Returns:
Copy of operand whose physical layout matches its shape.
"""
return force_physical_layout_p.bind(operand)
def _force_physical_layout_impl(operand):
"""Implementation for force_physical_layout_p."""
# Flatten the operand.
flat = jnp.reshape(operand, (-1,))
# Do something XLA can't simplify, but is actually a no-op.
# Since the false branch depends on the linearized order of the elements,
# this means the reshapes must actually happen. On TPU, all reshapes are
# implemented as bitcasts, which implies that the order of the dimensions is
# in major-to-minor order (i.e. the physical layout matches the logical one).
flat = jax.lax.cond(
jax.lax.rng_uniform(jax.lax.tie_in(operand, 0), 1, ()) < 2, flat,
lambda f: f, flat, lambda f: f[::-1])
# Restore the operand.
return jnp.reshape(flat, operand.shape)
force_physical_layout_p = jax.core.Primitive("force_physical_layout")
force_physical_layout_p.def_impl(_force_physical_layout_impl)
force_physical_layout_p.def_abstract_eval(
lambda operand, **_: jax.abstract_arrays.raise_to_shaped(operand))
jax.interpreters.xla.translations[
force_physical_layout_p] = jax.interpreters.xla.lower_fun(
_force_physical_layout_impl, multiple_results=False)
jax.interpreters.ad.deflinear(force_physical_layout_p,
lambda ct: [force_physical_layout(ct)])
jax.interpreters.batching.primitive_batchers[force_physical_layout_p] = (
lambda args, dims: (force_physical_layout(args[0]), dims[0]))
|
|
# -*- coding: utf-8 -*-
"""
sphinx.domains.c
~~~~~~~~~~~~~~~~
The C language domain.
:copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import string
from docutils import nodes
from sphinx import addnodes
from sphinx.roles import XRefRole
from sphinx.locale import l_, _
from sphinx.domains import Domain, ObjType
from sphinx.directives import ObjectDescription
from sphinx.util.nodes import make_refnode
from sphinx.util.docfields import Field, TypedField
# RE to split at word boundaries
wsplit_re = re.compile(r'(\W+)')
# REs for C signatures
c_sig_re = re.compile(
r'''^([^(]*?) # return type
([\w:.]+) \s* # thing name (colon allowed for C++)
(?: \((.*)\) )? # optionally arguments
(\s+const)? $ # const specifier
''', re.VERBOSE)
c_funcptr_sig_re = re.compile(
r'''^([^(]+?) # return type
(\( [^()]+ \)) \s* # name in parentheses
\( (.*) \) # arguments
(\s+const)? $ # const specifier
''', re.VERBOSE)
c_funcptr_arg_sig_re = re.compile(
r'''^\s*([^(,]+?) # return type
\( ([^()]+) \) \s* # name in parentheses
\( (.*) \) # arguments
(\s+const)? # const specifier
\s*(?=$|,) # end with comma or end of string
''', re.VERBOSE)
c_funcptr_name_re = re.compile(r'^\(\s*\*\s*(.*?)\s*\)$')
class CObject(ObjectDescription):
"""
Description of a C language object.
"""
doc_field_types = [
TypedField('parameter', label=l_('Parameters'),
names=('param', 'parameter', 'arg', 'argument'),
typerolename='type', typenames=('type',)),
Field('returnvalue', label=l_('Returns'), has_arg=False,
names=('returns', 'return')),
Field('returntype', label=l_('Return type'), has_arg=False,
names=('rtype',)),
]
# These C types aren't described anywhere, so don't try to create
# a cross-reference to them
stopwords = set((
'const', 'void', 'char', 'wchar_t', 'int', 'short',
'long', 'float', 'double', 'unsigned', 'signed', 'FILE',
'clock_t', 'time_t', 'ptrdiff_t', 'size_t', 'ssize_t',
'struct', '_Bool',
))
def _parse_type(self, node, ctype):
# add cross-ref nodes for all words
for part in [_f for _f in wsplit_re.split(ctype) if _f]:
tnode = nodes.Text(part, part)
if part[0] in string.ascii_letters+'_' and \
part not in self.stopwords:
pnode = addnodes.pending_xref(
'', refdomain='c', reftype='type', reftarget=part,
modname=None, classname=None)
pnode += tnode
node += pnode
else:
node += tnode
def _parse_arglist(self, arglist):
while True:
m = c_funcptr_arg_sig_re.match(arglist)
if m:
yield m.group()
arglist = c_funcptr_arg_sig_re.sub('', arglist)
if ',' in arglist:
_, arglist = arglist.split(',', 1)
else:
break
else:
if ',' in arglist:
arg, arglist = arglist.split(',', 1)
yield arg
else:
yield arglist
break
def handle_signature(self, sig, signode):
"""Transform a C signature into RST nodes."""
# first try the function pointer signature regex, it's more specific
m = c_funcptr_sig_re.match(sig)
if m is None:
m = c_sig_re.match(sig)
if m is None:
raise ValueError('no match')
rettype, name, arglist, const = m.groups()
signode += addnodes.desc_type('', '')
self._parse_type(signode[-1], rettype)
try:
classname, funcname = name.split('::', 1)
classname += '::'
signode += addnodes.desc_addname(classname, classname)
signode += addnodes.desc_name(funcname, funcname)
# name (the full name) is still both parts
except ValueError:
signode += addnodes.desc_name(name, name)
# clean up parentheses from canonical name
m = c_funcptr_name_re.match(name)
if m:
name = m.group(1)
typename = self.env.ref_context.get('c:type')
if self.name == 'c:member' and typename:
fullname = typename + '.' + name
else:
fullname = name
if not arglist:
if self.objtype == 'function':
# for functions, add an empty parameter list
signode += addnodes.desc_parameterlist()
if const:
signode += addnodes.desc_addname(const, const)
return fullname
paramlist = addnodes.desc_parameterlist()
arglist = arglist.replace('`', '').replace('\\ ', '') # remove markup
# this messes up function pointer types, but not too badly ;)
for arg in self._parse_arglist(arglist):
arg = arg.strip()
param = addnodes.desc_parameter('', '', noemph=True)
try:
m = c_funcptr_arg_sig_re.match(arg)
if m:
self._parse_type(param, m.group(1) + '(')
param += nodes.emphasis(m.group(2), m.group(2))
self._parse_type(param, ')(' + m.group(3) + ')')
if m.group(4):
param += addnodes.desc_addname(m.group(4), m.group(4))
else:
ctype, argname = arg.rsplit(' ', 1)
self._parse_type(param, ctype)
# separate by non-breaking space in the output
param += nodes.emphasis(' '+argname, u'\xa0'+argname)
except ValueError:
# no argument name given, only the type
self._parse_type(param, arg)
paramlist += param
signode += paramlist
if const:
signode += addnodes.desc_addname(const, const)
return fullname
def get_index_text(self, name):
if self.objtype == 'function':
return _('%s (C function)') % name
elif self.objtype == 'member':
return _('%s (C member)') % name
elif self.objtype == 'macro':
return _('%s (C macro)') % name
elif self.objtype == 'type':
return _('%s (C type)') % name
elif self.objtype == 'var':
return _('%s (C variable)') % name
else:
return ''
def add_target_and_index(self, name, sig, signode):
# for C API items we add a prefix since names are usually not qualified
# by a module name and so easily clash with e.g. section titles
targetname = 'c.' + name
if targetname not in self.state.document.ids:
signode['names'].append(targetname)
signode['ids'].append(targetname)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
inv = self.env.domaindata['c']['objects']
if name in inv:
self.state_machine.reporter.warning(
'duplicate C object description of %s, ' % name +
'other instance in ' + self.env.doc2path(inv[name][0]),
line=self.lineno)
inv[name] = (self.env.docname, self.objtype)
indextext = self.get_index_text(name)
if indextext:
self.indexnode['entries'].append(('single', indextext,
targetname, ''))
def before_content(self):
self.typename_set = False
if self.name == 'c:type':
if self.names:
self.env.ref_context['c:type'] = self.names[0]
self.typename_set = True
def after_content(self):
if self.typename_set:
self.env.ref_context.pop('c:type', None)
class CXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
if not has_explicit_title:
target = target.lstrip('~') # only has a meaning for the title
# if the first character is a tilde, don't display the module/class
# parts of the contents
if title[0:1] == '~':
title = title[1:]
dot = title.rfind('.')
if dot != -1:
title = title[dot+1:]
return title, target
class CDomain(Domain):
"""C language domain."""
name = 'c'
label = 'C'
object_types = {
'function': ObjType(l_('function'), 'func'),
'member': ObjType(l_('member'), 'member'),
'macro': ObjType(l_('macro'), 'macro'),
'type': ObjType(l_('type'), 'type'),
'var': ObjType(l_('variable'), 'data'),
}
directives = {
'function': CObject,
'member': CObject,
'macro': CObject,
'type': CObject,
'var': CObject,
}
roles = {
'func': CXRefRole(fix_parens=True),
'member': CXRefRole(),
'macro': CXRefRole(),
'data': CXRefRole(),
'type': CXRefRole(),
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
}
def clear_doc(self, docname):
for fullname, (fn, _l) in list(self.data['objects'].items()):
if fn == docname:
del self.data['objects'][fullname]
def merge_domaindata(self, docnames, otherdata):
# XXX check duplicates
for fullname, (fn, objtype) in otherdata['objects'].items():
if fn in docnames:
self.data['objects'][fullname] = (fn, objtype)
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
# strip pointer asterisk
target = target.rstrip(' *')
if target not in self.data['objects']:
return None
obj = self.data['objects'][target]
return make_refnode(builder, fromdocname, obj[0], 'c.' + target,
contnode, target)
def resolve_any_xref(self, env, fromdocname, builder, target,
node, contnode):
# strip pointer asterisk
target = target.rstrip(' *')
if target not in self.data['objects']:
return []
obj = self.data['objects'][target]
return [('c:' + self.role_for_objtype(obj[1]),
make_refnode(builder, fromdocname, obj[0], 'c.' + target,
contnode, target))]
def get_objects(self):
for refname, (docname, type) in list(self.data['objects'].items()):
yield (refname, refname, type, docname, 'c.' + refname, 1)
|
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 14 07:53:12 2016
@author: c_dolar
"""
import datetime, csv, re
def extractAuthorInfo(infostr):
"""
This function extracts data from the author information string.
The string needs to be in the format Last_name, first_name (affiliation, country)
"""
# pattern is (last_name, first_name (affliation, country))
matches = re.search("^([^()]+),([^()]+)\((.+)\)$",infostr)
speaker = Speaker(last_name=matches.groups()[0], first_name=matches.groups()[1].strip(),
organization=matches.groups()[2])
if 'Ltd. &' in speaker.first_name:
print('break')
return speaker
def nowString():
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
def toDateString(dt):
try:
return dt.strftime('%Y-%m-%d %H:%M')
except:
return "NULL"
def fromDateString(st):
try:
return datetime.datetime.strptime(st,'%Y-%m-%d %H:%M')
except ValueError:
return None
class Event:
def __init__(self, id=None, start_at=datetime.datetime.now,
end_at=datetime.datetime.now, text="", name="",
place="NULL", version="NULL", level_id="NULL", type_id="NULL",
track_id="NULL", url="NULL", event_type="NULL", order="NULL",
deleted_at="NULL", created_at=nowString(),
updated_at=nowString()):
self.id = id
self.start_at = start_at
self.end_at = end_at
self.text = text
self.name = name
self.place = place
self.version = version
self.level_id = level_id
self.type_id = type_id
self.track_id = track_id
self.url = url
self.event_type = event_type
self.order = order
self.deleted_at = deleted_at
self.created_at = created_at
self.updated_at = updated_at
def __rep__(self):
return "Event: id {}, start_at {}, end_at {}, name {}, created_at {}, updated_at {}, deleted_at {}".format(self.id, self.start_at, self.end_at, self.name, self.created_at, self.updated_at, self.deleted_at)
def __str__(self):
return self.__rep__()
def mark_deleted(self):
self.deleted_at = nowString()
def to_array(self):
# id, start at, end at, text, name, place, version, level_id, type_id,
# track_id, url, event_type, order, deleted_at, created_at, updated_at
return [self.id, self.start_at, self.end_at, self.text, self.name,
self.place, self.version, self.level_id, self.type_id,
self.track_id, self.url, self.event_type, self.order,
self.deleted_at, self.created_at, self.updated_at]
def update(self, event):
self.start_at = event.start_at
self.end_at = event.end_at
self.text = event.text
self.name = event.name
self.place = event.place
self.version = event.version
self.level_id = event.level_id
self.type_id = event.type_id
self.track_id = event.track_id
self.url = event.url
self.event_type = event.event_type
self.order = event.order
self.deleted_at = event.deleted_at
#self.created_at = event.created_at
self.updated_at = nowString()
def event_from_array(array):
return Event(id=int(array[0]), start_at=array[1], end_at=array[2],
text=array[3], name=array[4], place=array[5],
version=array[6], level_id=array[7], type_id=array[8],
track_id=array[9], url=array[10], event_type=array[11],
order=array[12],deleted_at=array[13],
created_at=array[14],updated_at=array[15])
class EventTrack:
def __init__(self, id=None, name="", order="NULL", deleted_at="NULL",
created_at=nowString(), updated_at=nowString()):
self.id = id
self.name = name
self.order = order
self.deleted_at = deleted_at
self.created_at = created_at
self.updated_at = updated_at
def __rep__(self):
return "Track: id {}, name {}, created_at {}, updated_at {}, deleted_at {}".format(self.id, self.name, self.created_at, self.updated_at, self.deleted_at)
def __str__(self):
return self.__rep__()
def mark_deleted(self):
self.deleted_at = nowString()
def to_array(self):
return [self.id, self.name, self.order,
self.deleted_at, self.created_at, self.updated_at]
def update(self, track):
self.name = track.name
self.order = track.order
self.deleted_at = track.deleted_at
#self.created_at = track.created_at
self.updated_at = nowString()
def track_from_array(array):
return EventTrack(id=int(array[0]), name=array[1], order=array[2],
deleted_at=array[3], created_at=array[4],
updated_at=array[5])
class EventSpeaker:
def __init__(self, id=None, event_id=None, speaker_id=None,
created_at=nowString(), updated_at=nowString()):
self.id = id
self.event_id = event_id
self.speaker_id = speaker_id
self.created_at = created_at
self.updated_at = updated_at
def __rep__(self):
return "EventSpeaker: id {}, event_id {}, speaker_id {}, created_at {}, updated_at {}".format(self.id, self.event_id, self.speaker_id, self.created_at, self.updated_at)
def __str__(self):
return self.__rep__()
def to_array(self):
return [self.id, self.event_id, self.speaker_id, self.created_at,
self.updated_at]
def update(self, eventSpeaker):
self.event_id = eventSpeaker.event_id
self.speaker_id = eventSpeaker.speaker_id
#self.created_at = eventSpeaker.created_at
self.updated_at = nowString()
def mark_deleted(self):
self.speaker_id = 0
def event_speaker_from_array(array):
return EventSpeaker(id=int(array[0]), event_id=int(array[1]), speaker_id=int(array[2]),
created_at=array[3], updated_at=array[4])
class Speaker:
def __init__(self, id=None, first_name="", last_name="", characteristic="",
job="", organization="", twitter_name="", website="",
avatar="", email="", order="NULL",
created_at=nowString(), updated_at=nowString(),
deleted_at="NULL"):
self.id = id
self.first_name = first_name
self.last_name = last_name
self.characteristic = characteristic
self.job = job
self.organization = organization
self.twitter_name = twitter_name
self.website = website
self.avatar = avatar
self.email = email
self.order = order
self.created_at = created_at
self.updated_at = updated_at
self.deleted_at = deleted_at
def __rep__(self):
return "Speaker: id {}, first_name {}, last_name {}, created_at {}, updated_at {}, deleted_at {}".format(self.id, self.first_name, self.last_name, self.created_at, self.updated_at, self.deleted_at)
def __str__(self):
return self.__rep__()
def to_array(self):
return [self.id, self.first_name, self.last_name, self.characteristic,
self.job, self.organization, self.twitter_name, self.website,
self.avatar, self.email, self.order, self.created_at,
self.updated_at, self.deleted_at]
def update(self, speaker):
self.first_name = speaker.first_name
self.last_name = speaker.last_name
self.characteristic = speaker.characteristic
self.job = speaker.job
self.organization = speaker.organization
self.twitter_name = speaker.twitter_name
self.website = speaker.website
self.avatar = speaker.avatar
self.email = speaker.email
self.order = speaker.order
self.created_at = speaker.created_at
self.updated_at = nowString()
self.deleted_at = speaker.deleted_at
def mark_deleted(self):
self.deleted_at = nowString()
def speaker_from_array(array):
return Speaker(id=int(array[0]), first_name=array[1], last_name=array[2],
characteristic=array[3], job=array[4], organization=array[5],
twitter_name=array[6], website=array[7], avatar=array[8],
email=array[9], order=array[10], created_at=array[11],
updated_at=array[12], deleted_at=array[13])
class ConnfaData:
def __init__(self):
self.tracks=[]
self.lastTrackId=0
self.updatedTracks=[]
self.events=[]
self.lastEventId=0
self.updatedEvents=[]
self.speakers=[]
self.lastSpeakerId=0
self.updatedSpeakers=[]
self.eventSpeakers=[]
self.lastEventSpeakerId=0
self.updatedEventSpeakers=[]
def __markNonUpdatedAsDeleted(self):
for track in self.tracks:
if track in self.updatedTracks is False:
track.mark_deleted()
print("Track {} will be marked as deleted: {}".format(track.name, track))
for speaker in self.speakers:
if speaker in self.updatedSpeakers is False:
speaker.mark_deleted()
print("Speaker {} will be marked as deleted: {}".format(speaker.last_name, speaker))
for event in self.events:
if event in self.updatedEvents is False:
event.mark_deleted()
print("Event {} will be marked as deleted: {}".format(event.name, event))
for eventSpeaker in self.eventSpeakers:
if eventSpeaker in self.updatedEventSpeakers is False:
eventSpeaker.mark_invalid()
print("Event Speaker {} will be marked as deleted: {}".format(eventSpeaker.id, eventSpeaker))
def loadData(self, speakersFilename="speakers_export.csv",
eventsFilename="events_export.csv",
eventspeakersFilename="event_speakers_export.csv",
tracksFilename="tracks_export.csv"):
# first, load the tracks
with open(tracksFilename) as f:
reader = csv.reader(f)
for row in reader:
newTrack = track_from_array(row)
self.tracks.append(newTrack)
self.lastTrackId = max(self.lastTrackId, newTrack.id)
# now load the speakers
with open(speakersFilename) as f:
reader = csv.reader(f)
for row in reader:
newSpeaker = speaker_from_array(row)
self.speakers.append(newSpeaker)
self.lastSpeakerId = max(self.lastSpeakerId, newSpeaker.id)
# now load the events
with open(eventsFilename) as f:
reader = csv.reader(f)
for row in reader:
newEvent = event_from_array(row)
self.events.append(newEvent)
self.lastEventId = max(self.lastEventId, newEvent.id)
# now the event-speakers data
with open(eventspeakersFilename) as f:
reader = csv.reader(f)
for row in reader:
newEventSpeaker = event_speaker_from_array(row)
self.eventSpeakers.append(newEventSpeaker)
self.lastEventSpeakerId = max(self.lastEventSpeakerId, newEventSpeaker.id)
def saveData(self, speakersFilename="speakers_export.csv",
eventsFilename="events_export.csv",
eventspeakersFilename="event_speakers_export.csv",
tracksFilename="tracks_export.csv"):
self.__markNonUpdatedAsDeleted()
# first, save the tracks
with open(tracksFilename,"w") as f:
writer = csv.writer(f)
for track in self.tracks:
writer.writerow(track.to_array())
# now save the speakers
with open(speakersFilename,"w") as f:
writer = csv.writer(f)
for speaker in self.speakers:
writer.writerow(speaker.to_array())
# now save the events
with open(eventsFilename,"w") as f:
writer = csv.writer(f)
for event in self.events:
writer.writerow(event.to_array())
# now the event-speakers data
with open(eventspeakersFilename,"w") as f:
writer = csv.writer(f)
for eventSpeaker in self.eventSpeakers:
writer.writerow(eventSpeaker.to_array())
def insertSpeaker(self, speaker):
matchSpeakers = self.getMatchingSpeakers(first_name=speaker.first_name, last_name=speaker.last_name)
if len(matchSpeakers)==0:
self.lastSpeakerId=self.lastSpeakerId+1
speaker.id = self.lastSpeakerId
self.speakers.append(speaker)
print("Inserting speaker {}".format(speaker))
else:
matchSpeakers[0].update(speaker)
speaker = matchSpeakers[0]
if len(matchSpeakers) > 1:
print("More than one match for speaker {}".format(speaker.__str__()))
self.updatedSpeakers.append(speaker)
return speaker
def insertTrack(self, track):
matchTracks = self.getMatchingTracks(track.name)
if len(matchTracks)==0:
self.lastTrackId+=1
track.id = self.lastTrackId
self.tracks.append(track)
print("Inserting track {}".format(track))
else:
matchTracks[0].update(track)
track = matchTracks[0]
if len(matchTracks) > 1:
print("More than one match for track {}".format(track.__str__()))
self.updatedTracks.append(track)
return track
def insertEvent(self, event):
matchEvents = self.getMatchingEvents(event.name)
if len(matchEvents)==0:
self.lastEventId+=1
event.id = self.lastEventId
self.events.append(event)
print("Inserting event {}".format(event))
else:
matchEvents[0].update(event)
event = matchEvents[0]
if len(matchEvents) > 1:
print("More than one match for event {}".format(event.__str__()))
self.updatedEvents.append(event)
return event
def insertEventSpeaker(self, eventSpeaker):
matchEventSpeakers = self.getMatchingEventSpeakers(event_id=eventSpeaker.event_id,
speaker_id=eventSpeaker.speaker_id)
if len(matchEventSpeakers)==0:
self.lastEventSpeakerId+=1
eventSpeaker.id = self.lastEventSpeakerId
self.eventSpeakers.append(eventSpeaker)
print("Inserting event speaker {}".format(eventSpeaker))
else:
matchEventSpeakers[0].update(eventSpeaker)
eventSpeaker = matchEventSpeakers[0]
if len(matchEventSpeakers) > 1:
print("More than one match for event spekaer {}".format(eventSpeaker.__str__()))
self.updatedEventSpeakers.append(eventSpeaker)
return eventSpeaker
def getMatchingSpeakers(self, first_name=None, last_name=None, updated_at=None):
matchingSpeakers=[]
for speaker in self.speakers:
if first_name != None and speaker.first_name != first_name:
continue
if last_name != None and speaker.last_name != last_name:
continue
if updated_at != None and fromDateString(speaker.updated_at).date != fromDateString(updated_at).date():
continue
matchingSpeakers.append(speaker)
return matchingSpeakers
def getMatchingTracks(self, name=None, updated_at=None):
matchingTracks = []
for track in self.tracks:
if name != None and name != track.name:
continue
if updated_at != None and fromDateString(track.updated_at).date != fromDateString(updated_at).date():
continue
matchingTracks.append(track)
return matchingTracks
def getMatchingEvents(self, title=None, updated_at=None):
matchingEvents = []
for event in self.events:
if title != None and event.name != title:
continue
if updated_at != None and fromDateString(event.updated_at).date != fromDateString(updated_at).date():
continue
matchingEvents.append(event)
return matchingEvents
def getMatchingEventSpeakers(self, event_id=None, speaker_id=None, updated_at=None):
matchingEventSpeakers = []
for eventSpeaker in self.eventSpeakers:
if event_id != None and eventSpeaker.event_id != event_id:
continue
if speaker_id != None and eventSpeaker.speaker_id != speaker_id:
continue
if updated_at != None and fromDateString(eventSpeaker.updated_at).date != fromDateString(updated_at).date():
continue
matchingEventSpeakers.append(eventSpeaker)
return matchingEventSpeakers
class EDASData:
def __extractSessionData(self):
with open(self.sessionsFileName) as f:
reader = csv.reader(f)
cols = reader.next()
self.sessionData = {}
for num,row in enumerate(reader):
rowdata = {}
for i,entry in enumerate(row):
rowdata[cols[i]]=entry
rowdata['Papers'] = []
rowdata['ID']=num
self.sessionData[rowdata['Title']] = rowdata
def __extractPaperData(self):
with open (self.papersFileName, 'r') as f:
reader = csv.reader(f)
cols = reader.next()
data = []
self.speakers = {}
for row in reader:
rowdata={}
for i,entry in enumerate(row):
rowdata[cols[i]]=entry
try:
self.sessionData[rowdata['Session']]['Papers'].append(rowdata)
except:
print("No session data for session {}".format(rowdata['Session']))
data.append(rowdata)
def loadData(self, sessionsFileName="2017icce-sessions.csv",
papersFileName="2017icce-papers.csv" ):
self.sessionsFileName = sessionsFileName
self.papersFileName = papersFileName
self.__extractSessionData()
self.__extractPaperData()
def exportData(self, connfaData):
"""
Export the data.
"""
for key in self.sessionData.keys():
session = self.sessionData[key]
# id, name, order, deleted, created, updated
track = EventTrack(id=session['ID'], name=session['Title'],order=session['ID'])
track = connfaData.insertTrack(track)
if len(session['Papers']) > 0:
for paper in session['Papers']:
sessionstarttime = fromDateString(paper['Session start time'])
min_per_paper=float(session['Minutes per paper'])
order = int(paper['Order in session'])
if ("poster" in session['Title'].lower()):
starttime = session['Start time']
endtime = session['End time']
else:
starttime = toDateString(sessionstarttime + datetime.timedelta(minutes=(order-1)*min_per_paper))
endtime = toDateString(sessionstarttime + datetime.timedelta(minutes=order*min_per_paper))
event = Event(start_at=starttime, end_at=endtime,
text=paper['Abstract'], name=paper['Title'], place=paper['Session room'],
type_id='1',track_id=track.id,url=paper['URL'],
event_type='session', order=paper['Order in session'])
event = connfaData.insertEvent(event)
for n in range(1,9):
author = paper['Author {}'.format(n)]
if len(author)>0:
speaker = extractAuthorInfo(author)
if n==1:
speaker.characteristic = paper['First author bio']
speaker = connfaData.insertSpeaker(speaker)
eventSpeaker = EventSpeaker(event_id=event.id, speaker_id=speaker.id)
eventSpeaker = connfaData.insertEventSpeaker(eventSpeaker)
else:
event = Event(start_at=session['Start time'],
end_at=session['End time'], text='', name=session['Title'],
place=session['Room'], type_id='1', track_id=track.id, url='',
event_type='session')
connfaData.insertEvent(event)
# todo: set deleted_at to all events not updated today (or this hour)
if __name__ == "__main__":
data = ConnfaData()
data.loadData()
edas = EDASData()
edas.loadData()
edas.exportData(data)
data.saveData(speakersFilename="speakers_export_new.csv",
eventsFilename="events_export_ew.csv",
eventspeakersFilename="event_speakers_export_new.csv",
tracksFilename="tracks_export_new.csv")
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import re
from oslo_log import log as logging
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from jacket.compute import cache_utils
from jacket import context
from jacket.compute import exception
from jacket.i18n import _
from jacket.i18n import _LI
from jacket.compute.network import model as network_model
from jacket.objects import compute as objects
from jacket.objects.compute import base as obj_base
LOG = logging.getLogger(__name__)
# NOTE(vish): cache mapping for one week
_CACHE_TIME = 7 * 24 * 60 * 60
_CACHE = None
def memoize(func):
@functools.wraps(func)
def memoizer(context, reqid):
global _CACHE
if not _CACHE:
_CACHE = cache_utils.get_client(expiration_time=_CACHE_TIME)
key = "%s:%s" % (func.__name__, reqid)
key = str(key)
value = _CACHE.get(key)
if value is None:
value = func(context, reqid)
_CACHE.set(key, value)
return value
return memoizer
def reset_cache():
global _CACHE
_CACHE = None
def image_type(image_type):
"""Converts to a three letter image type.
aki, kernel => aki
ari, ramdisk => ari
anything else => ami
"""
if image_type == 'kernel':
return 'aki'
if image_type == 'ramdisk':
return 'ari'
if image_type not in ['aki', 'ari']:
return 'ami'
return image_type
def resource_type_from_id(context, resource_id):
"""Get resource type by ID
Returns a string representation of the Amazon resource type, if known.
Returns None on failure.
:param context: context under which the method is called
:param resource_id: resource_id to evaluate
"""
known_types = {
'i': 'instance',
'r': 'reservation',
'vol': 'volume',
'snap': 'snapshot',
'ami': 'image',
'aki': 'image',
'ari': 'image'
}
type_marker = resource_id.split('-')[0]
return known_types.get(type_marker)
@memoize
def id_to_glance_id(context, image_id):
"""Convert an internal (db) id to a glance id."""
return objects.S3ImageMapping.get_by_id(context, image_id).uuid
@memoize
def glance_id_to_id(context, glance_id):
"""Convert a glance id to an internal (db) id."""
if not glance_id:
return
try:
return objects.S3ImageMapping.get_by_uuid(context, glance_id).id
except exception.NotFound:
s3imap = objects.S3ImageMapping(context, uuid=glance_id)
s3imap.create()
return s3imap.id
def ec2_id_to_glance_id(context, ec2_id):
image_id = ec2_id_to_id(ec2_id)
return id_to_glance_id(context, image_id)
def glance_id_to_ec2_id(context, glance_id, image_type='ami'):
image_id = glance_id_to_id(context, glance_id)
if image_id is None:
return
return image_ec2_id(image_id, image_type=image_type)
def ec2_id_to_id(ec2_id):
"""Convert an ec2 ID (i-[base 16 number]) to an instance id (int)."""
try:
return int(ec2_id.split('-')[-1], 16)
except ValueError:
raise exception.InvalidEc2Id(ec2_id=ec2_id)
def image_ec2_id(image_id, image_type='ami'):
"""Returns image ec2_id using id and three letter type."""
template = image_type + '-%08x'
return id_to_ec2_id(image_id, template=template)
def get_ip_info_for_instance_from_nw_info(nw_info):
if not isinstance(nw_info, network_model.NetworkInfo):
nw_info = network_model.NetworkInfo.hydrate(nw_info)
ip_info = {}
fixed_ips = nw_info.fixed_ips()
ip_info['fixed_ips'] = [ip['address'] for ip in fixed_ips
if ip['version'] == 4]
ip_info['fixed_ip6s'] = [ip['address'] for ip in fixed_ips
if ip['version'] == 6]
ip_info['floating_ips'] = [ip['address'] for ip in nw_info.floating_ips()]
return ip_info
def get_ip_info_for_instance(context, instance):
"""Return a dictionary of IP information for an instance."""
if isinstance(instance, obj_base.NovaObject):
nw_info = instance.info_cache.network_info
else:
# FIXME(comstud): Temporary as we transition to objects.
info_cache = instance.info_cache or {}
nw_info = info_cache.get('network_info')
# Make sure empty response is turned into the model
if not nw_info:
nw_info = []
return get_ip_info_for_instance_from_nw_info(nw_info)
def id_to_ec2_id(instance_id, template='i-%08x'):
"""Convert an instance ID (int) to an ec2 ID (i-[base 16 number])."""
return template % int(instance_id)
def id_to_ec2_inst_id(instance_id):
"""Get or create an ec2 instance ID (i-[base 16 number]) from uuid."""
if instance_id is None:
return None
elif uuidutils.is_uuid_like(instance_id):
ctxt = context.get_admin_context()
int_id = get_int_id_from_instance_uuid(ctxt, instance_id)
return id_to_ec2_id(int_id)
else:
return id_to_ec2_id(instance_id)
def ec2_inst_id_to_uuid(context, ec2_id):
""""Convert an instance id to uuid."""
int_id = ec2_id_to_id(ec2_id)
return get_instance_uuid_from_int_id(context, int_id)
@memoize
def get_instance_uuid_from_int_id(context, int_id):
imap = objects.EC2InstanceMapping.get_by_id(context, int_id)
return imap.uuid
def id_to_ec2_snap_id(snapshot_id):
"""Get or create an ec2 volume ID (vol-[base 16 number]) from uuid."""
if uuidutils.is_uuid_like(snapshot_id):
ctxt = context.get_admin_context()
int_id = get_int_id_from_snapshot_uuid(ctxt, snapshot_id)
return id_to_ec2_id(int_id, 'snap-%08x')
else:
return id_to_ec2_id(snapshot_id, 'snap-%08x')
def id_to_ec2_vol_id(volume_id):
"""Get or create an ec2 volume ID (vol-[base 16 number]) from uuid."""
if uuidutils.is_uuid_like(volume_id):
ctxt = context.get_admin_context()
int_id = get_int_id_from_volume_uuid(ctxt, volume_id)
return id_to_ec2_id(int_id, 'vol-%08x')
else:
return id_to_ec2_id(volume_id, 'vol-%08x')
def ec2_vol_id_to_uuid(ec2_id):
"""Get the corresponding UUID for the given ec2-id."""
ctxt = context.get_admin_context()
# NOTE(jgriffith) first strip prefix to get just the numeric
int_id = ec2_id_to_id(ec2_id)
return get_volume_uuid_from_int_id(ctxt, int_id)
_ms_time_regex = re.compile('^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3,6}Z$')
def status_to_ec2_attach_status(volume):
"""Get the corresponding EC2 attachment state.
According to EC2 API, the valid attachment status in response is:
attaching | attached | detaching | detached
"""
volume_status = volume.get('status')
attach_status = volume.get('attach_status')
if volume_status in ('attaching', 'detaching'):
ec2_attach_status = volume_status
elif attach_status in ('attached', 'detached'):
ec2_attach_status = attach_status
else:
msg = _("Unacceptable attach status:%s for ec2 API.") % attach_status
raise exception.Invalid(msg)
return ec2_attach_status
def is_ec2_timestamp_expired(request, expires=None):
"""Checks the timestamp or expiry time included in an EC2 request
and returns true if the request is expired
"""
timestamp = request.get('Timestamp')
expiry_time = request.get('Expires')
def parse_strtime(strtime):
if _ms_time_regex.match(strtime):
# NOTE(MotoKen): time format for aws-sdk-java contains millisecond
time_format = "%Y-%m-%dT%H:%M:%S.%fZ"
else:
time_format = "%Y-%m-%dT%H:%M:%SZ"
return timeutils.parse_strtime(strtime, time_format)
try:
if timestamp and expiry_time:
msg = _("Request must include either Timestamp or Expires,"
" but cannot contain both")
LOG.error(msg)
raise exception.InvalidRequest(msg)
elif expiry_time:
query_time = parse_strtime(expiry_time)
return timeutils.is_older_than(query_time, -1)
elif timestamp:
query_time = parse_strtime(timestamp)
# Check if the difference between the timestamp in the request
# and the time on our servers is larger than 5 minutes, the
# request is too old (or too new).
if query_time and expires:
return timeutils.is_older_than(query_time, expires) or \
timeutils.is_newer_than(query_time, expires)
return False
except ValueError:
LOG.info(_LI("Timestamp is invalid."))
return True
@memoize
def get_int_id_from_instance_uuid(context, instance_uuid):
if instance_uuid is None:
return
try:
imap = objects.EC2InstanceMapping.get_by_uuid(context, instance_uuid)
return imap.id
except exception.NotFound:
imap = objects.EC2InstanceMapping(context)
imap.uuid = instance_uuid
imap.create()
return imap.id
@memoize
def get_int_id_from_volume_uuid(context, volume_uuid):
if volume_uuid is None:
return
try:
vmap = objects.EC2VolumeMapping.get_by_uuid(context, volume_uuid)
return vmap.id
except exception.NotFound:
vmap = objects.EC2VolumeMapping(context)
vmap.uuid = volume_uuid
vmap.create()
return vmap.id
@memoize
def get_volume_uuid_from_int_id(context, int_id):
vmap = objects.EC2VolumeMapping.get_by_id(context, int_id)
return vmap.uuid
def ec2_snap_id_to_uuid(ec2_id):
"""Get the corresponding UUID for the given ec2-id."""
ctxt = context.get_admin_context()
# NOTE(jgriffith) first strip prefix to get just the numeric
int_id = ec2_id_to_id(ec2_id)
return get_snapshot_uuid_from_int_id(ctxt, int_id)
@memoize
def get_int_id_from_snapshot_uuid(context, snapshot_uuid):
if snapshot_uuid is None:
return
try:
smap = objects.EC2SnapshotMapping.get_by_uuid(context, snapshot_uuid)
return smap.id
except exception.NotFound:
smap = objects.EC2SnapshotMapping(context, uuid=snapshot_uuid)
smap.create()
return smap.id
@memoize
def get_snapshot_uuid_from_int_id(context, int_id):
smap = objects.EC2SnapshotMapping.get_by_id(context, int_id)
return smap.uuid
_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
def camelcase_to_underscore(str):
return _c2u.sub(r'_\1', str).lower().strip('_')
def _try_convert(value):
"""Return a non-string from a string or unicode, if possible.
============= =====================================================
When value is returns
============= =====================================================
zero-length ''
'None' None
'True' True case insensitive
'False' False case insensitive
'0', '-0' 0
0xN, -0xN int from hex (positive) (N is any number)
0bN, -0bN int from binary (positive) (N is any number)
* try conversion to int, float, complex, fallback value
"""
def _negative_zero(value):
epsilon = 1e-7
return 0 if abs(value) < epsilon else value
if len(value) == 0:
return ''
if value == 'None':
return None
lowered_value = value.lower()
if lowered_value == 'true':
return True
if lowered_value == 'false':
return False
for prefix, base in [('0x', 16), ('0b', 2), ('0', 8), ('', 10)]:
try:
if lowered_value.startswith((prefix, "-" + prefix)):
return int(lowered_value, base)
except ValueError:
pass
try:
return _negative_zero(float(value))
except ValueError:
return value
def dict_from_dotted_str(items):
"""parse multi dot-separated argument into dict.
EBS boot uses multi dot-separated arguments like
BlockDeviceMapping.1.DeviceName=snap-id
Convert the above into
{'block_device_mapping': {'1': {'device_name': snap-id}}}
"""
args = {}
for key, value in items:
parts = key.split(".")
key = str(camelcase_to_underscore(parts[0]))
if isinstance(value, six.string_types):
# NOTE(vish): Automatically convert strings back
# into their respective values
value = _try_convert(value)
if len(parts) > 1:
d = args.get(key, {})
args[key] = d
for k in parts[1:-1]:
k = camelcase_to_underscore(k)
v = d.get(k, {})
d[k] = v
d = v
d[camelcase_to_underscore(parts[-1])] = value
else:
args[key] = value
return args
def search_opts_from_filters(filters):
return {f['name'].replace('-', '_'): f['value']['1']
for f in filters if f['value']['1']} if filters else {}
def regex_from_ec2_regex(ec2_re):
"""Converts an EC2-style regex to a python regex.
Approach is based on python fnmatch.
"""
iter_ec2_re = iter(ec2_re)
py_re = ''
for char in iter_ec2_re:
if char == '*':
py_re += '.*'
elif char == '?':
py_re += '.'
elif char == '\\':
try:
next_char = next(iter_ec2_re)
except StopIteration:
next_char = ''
if next_char == '*' or next_char == '?':
py_re += '[%s]' % next_char
else:
py_re += '\\\\' + next_char
else:
py_re += re.escape(char)
return '\A%s\Z(?s)' % py_re
|
|
"""
Field classes.
"""
from __future__ import unicode_literals
import copy
import datetime
import os
import re
import sys
import uuid
import warnings
from decimal import Decimal, DecimalException
from io import BytesIO
from django.core import validators
from django.core.exceptions import ValidationError
# Provide this import for backwards compatibility.
from django.core.validators import EMPTY_VALUES # NOQA
from django.forms.utils import from_current_timezone, to_current_timezone
from django.forms.widgets import (
FILE_INPUT_CONTRADICTION, CheckboxInput, ClearableFileInput, DateInput,
DateTimeInput, EmailInput, HiddenInput, MultipleHiddenInput,
NullBooleanSelect, NumberInput, Select, SelectMultiple,
SplitDateTimeWidget, SplitHiddenDateTimeWidget, TextInput, TimeInput,
URLInput,
)
from django.utils import formats, six
from django.utils.dateparse import parse_duration
from django.utils.deprecation import (
RemovedInDjango110Warning, RenameMethodsBase,
)
from django.utils.duration import duration_string
from django.utils.encoding import force_str, force_text, smart_text
from django.utils.ipv6 import clean_ipv6_address
from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
__all__ = (
'Field', 'CharField', 'IntegerField',
'DateField', 'TimeField', 'DateTimeField', 'DurationField',
'RegexField', 'EmailField', 'FileField', 'ImageField', 'URLField',
'BooleanField', 'NullBooleanField', 'ChoiceField', 'MultipleChoiceField',
'ComboField', 'MultiValueField', 'FloatField', 'DecimalField',
'SplitDateTimeField', 'GenericIPAddressField', 'FilePathField',
'SlugField', 'TypedChoiceField', 'TypedMultipleChoiceField', 'UUIDField',
)
class RenameFieldMethods(RenameMethodsBase):
renamed_methods = (
('_has_changed', 'has_changed', RemovedInDjango110Warning),
)
class Field(six.with_metaclass(RenameFieldMethods, object)):
widget = TextInput # Default widget to use when rendering this type of Field.
hidden_widget = HiddenInput # Default widget to use when rendering this as "hidden".
default_validators = [] # Default set of validators
# Add an 'invalid' entry to default_error_message if you want a specific
# field error message not raised by the field validators.
default_error_messages = {
'required': _('This field is required.'),
}
empty_values = list(validators.EMPTY_VALUES)
# Tracks each time a Field instance is created. Used to retain order.
creation_counter = 0
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text='', error_messages=None, show_hidden_initial=False,
validators=[], localize=False, label_suffix=None):
# required -- Boolean that specifies whether the field is required.
# True by default.
# widget -- A Widget class, or instance of a Widget class, that should
# be used for this Field when displaying it. Each Field has a
# default Widget that it'll use if you don't specify this. In
# most cases, the default widget is TextInput.
# label -- A verbose name for this field, for use in displaying this
# field in a form. By default, Django will use a "pretty"
# version of the form field name, if the Field is part of a
# Form.
# initial -- A value to use in this Field's initial display. This value
# is *not* used as a fallback if data isn't given.
# help_text -- An optional string to use as "help text" for this Field.
# error_messages -- An optional dictionary to override the default
# messages that the field will raise.
# show_hidden_initial -- Boolean that specifies if it is needed to render a
# hidden widget with initial value after widget.
# validators -- List of additional validators to use
# localize -- Boolean that specifies if the field should be localized.
# label_suffix -- Suffix to be added to the label. Overrides
# form's label_suffix.
self.required, self.label, self.initial = required, label, initial
self.show_hidden_initial = show_hidden_initial
self.help_text = help_text
self.label_suffix = label_suffix
widget = widget or self.widget
if isinstance(widget, type):
widget = widget()
# Trigger the localization machinery if needed.
self.localize = localize
if self.localize:
widget.is_localized = True
# Let the widget know whether it should display as required.
widget.is_required = self.required
# Hook into self.widget_attrs() for any Field-specific HTML attributes.
extra_attrs = self.widget_attrs(widget)
if extra_attrs:
widget.attrs.update(extra_attrs)
self.widget = widget
# Increase the creation counter, and save our local copy.
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
self.validators = self.default_validators + validators
super(Field, self).__init__()
def prepare_value(self, value):
return value
def to_python(self, value):
return value
def validate(self, value):
if value in self.empty_values and self.required:
raise ValidationError(self.error_messages['required'], code='required')
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise ValidationError(errors)
def clean(self, value):
"""
Validates the given value and returns its "cleaned" value as an
appropriate Python object.
Raises ValidationError for any errors.
"""
value = self.to_python(value)
self.validate(value)
self.run_validators(value)
return value
def bound_data(self, data, initial):
"""
Return the value that should be shown for this field on render of a
bound form, given the submitted POST data for the field and the initial
data, if any.
For most fields, this will simply be data; FileFields need to handle it
a bit differently.
"""
return data
def widget_attrs(self, widget):
"""
Given a Widget instance (*not* a Widget class), returns a dictionary of
any HTML attributes that should be added to the Widget, based on this
Field.
"""
return {}
def has_changed(self, initial, data):
"""
Return True if data differs from initial.
"""
# For purposes of seeing whether something has changed, None is
# the same as an empty string, if the data or initial value we get
# is None, replace it w/ ''.
initial_value = initial if initial is not None else ''
try:
data = self.to_python(data)
if hasattr(self, '_coerce'):
data = self._coerce(data)
initial_value = self._coerce(initial_value)
except ValidationError:
return True
data_value = data if data is not None else ''
return initial_value != data_value
def __deepcopy__(self, memo):
result = copy.copy(self)
memo[id(self)] = result
result.widget = copy.deepcopy(self.widget, memo)
result.validators = self.validators[:]
return result
class CharField(Field):
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
self.max_length, self.min_length = max_length, min_length
super(CharField, self).__init__(*args, **kwargs)
if min_length is not None:
self.validators.append(validators.MinLengthValidator(int(min_length)))
if max_length is not None:
self.validators.append(validators.MaxLengthValidator(int(max_length)))
def to_python(self, value):
"Returns a Unicode object."
if value in self.empty_values:
return ''
return smart_text(value)
def widget_attrs(self, widget):
attrs = super(CharField, self).widget_attrs(widget)
if self.max_length is not None:
# The HTML attribute is maxlength, not max_length.
attrs.update({'maxlength': str(self.max_length)})
return attrs
class IntegerField(Field):
widget = NumberInput
default_error_messages = {
'invalid': _('Enter a whole number.'),
}
re_decimal = re.compile(r'\.0*\s*$')
def __init__(self, max_value=None, min_value=None, *args, **kwargs):
self.max_value, self.min_value = max_value, min_value
if kwargs.get('localize') and self.widget == NumberInput:
# Localized number input is not well supported on most browsers
kwargs.setdefault('widget', super(IntegerField, self).widget)
super(IntegerField, self).__init__(*args, **kwargs)
if max_value is not None:
self.validators.append(validators.MaxValueValidator(max_value))
if min_value is not None:
self.validators.append(validators.MinValueValidator(min_value))
def to_python(self, value):
"""
Validates that int() can be called on the input. Returns the result
of int(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
# Strip trailing decimal and zeros.
try:
value = int(self.re_decimal.sub('', str(value)))
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def widget_attrs(self, widget):
attrs = super(IntegerField, self).widget_attrs(widget)
if isinstance(widget, NumberInput):
if self.min_value is not None:
attrs['min'] = self.min_value
if self.max_value is not None:
attrs['max'] = self.max_value
return attrs
class FloatField(IntegerField):
default_error_messages = {
'invalid': _('Enter a number.'),
}
def to_python(self, value):
"""
Validates that float() can be called on the input. Returns the result
of float(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = float(value)
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def validate(self, value):
super(FloatField, self).validate(value)
# Check for NaN (which is the only thing not equal to itself) and +/- infinity
if value != value or value in (Decimal('Inf'), Decimal('-Inf')):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def widget_attrs(self, widget):
attrs = super(FloatField, self).widget_attrs(widget)
if isinstance(widget, NumberInput) and 'step' not in widget.attrs:
attrs.setdefault('step', 'any')
return attrs
class DecimalField(IntegerField):
default_error_messages = {
'invalid': _('Enter a number.'),
'max_digits': ungettext_lazy(
'Ensure that there are no more than %(max)s digit in total.',
'Ensure that there are no more than %(max)s digits in total.',
'max'),
'max_decimal_places': ungettext_lazy(
'Ensure that there are no more than %(max)s decimal place.',
'Ensure that there are no more than %(max)s decimal places.',
'max'),
'max_whole_digits': ungettext_lazy(
'Ensure that there are no more than %(max)s digit before the decimal point.',
'Ensure that there are no more than %(max)s digits before the decimal point.',
'max'),
}
def __init__(self, max_value=None, min_value=None, max_digits=None, decimal_places=None, *args, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
super(DecimalField, self).__init__(max_value, min_value, *args, **kwargs)
def to_python(self, value):
"""
Validates that the input is a decimal number. Returns a Decimal
instance. Returns None for empty values. Ensures that there are no more
than max_digits in the number, and no more than decimal_places digits
after the decimal point.
"""
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
value = smart_text(value).strip()
try:
value = Decimal(value)
except DecimalException:
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def validate(self, value):
super(DecimalField, self).validate(value)
if value in self.empty_values:
return
# Check for NaN, Inf and -Inf values. We can't compare directly for NaN,
# since it is never equal to itself. However, NaN is the only value that
# isn't equal to itself, so we can use this to identify NaN
if value != value or value == Decimal("Inf") or value == Decimal("-Inf"):
raise ValidationError(self.error_messages['invalid'], code='invalid')
sign, digittuple, exponent = value.as_tuple()
decimals = abs(exponent)
# digittuple doesn't include any leading zeros.
digits = len(digittuple)
if decimals > digits:
# We have leading zeros up to or past the decimal point. Count
# everything past the decimal point as a digit. We do not count
# 0 before the decimal point as a digit since that would mean
# we would not allow max_digits = decimal_places.
digits = decimals
whole_digits = digits - decimals
if self.max_digits is not None and digits > self.max_digits:
raise ValidationError(
self.error_messages['max_digits'],
code='max_digits',
params={'max': self.max_digits},
)
if self.decimal_places is not None and decimals > self.decimal_places:
raise ValidationError(
self.error_messages['max_decimal_places'],
code='max_decimal_places',
params={'max': self.decimal_places},
)
if (self.max_digits is not None and self.decimal_places is not None
and whole_digits > (self.max_digits - self.decimal_places)):
raise ValidationError(
self.error_messages['max_whole_digits'],
code='max_whole_digits',
params={'max': (self.max_digits - self.decimal_places)},
)
return value
def widget_attrs(self, widget):
attrs = super(DecimalField, self).widget_attrs(widget)
if isinstance(widget, NumberInput) and 'step' not in widget.attrs:
if self.decimal_places is not None:
# Use exponential notation for small values since they might
# be parsed as 0 otherwise. ref #20765
step = str(Decimal('1') / 10 ** self.decimal_places).lower()
else:
step = 'any'
attrs.setdefault('step', step)
return attrs
class BaseTemporalField(Field):
def __init__(self, input_formats=None, *args, **kwargs):
super(BaseTemporalField, self).__init__(*args, **kwargs)
if input_formats is not None:
self.input_formats = input_formats
def to_python(self, value):
# Try to coerce the value to unicode.
unicode_value = force_text(value, strings_only=True)
if isinstance(unicode_value, six.text_type):
value = unicode_value.strip()
# If unicode, try to strptime against each input format.
if isinstance(value, six.text_type):
for format in self.input_formats:
try:
return self.strptime(value, format)
except (ValueError, TypeError):
continue
raise ValidationError(self.error_messages['invalid'], code='invalid')
def strptime(self, value, format):
raise NotImplementedError('Subclasses must define this method.')
class DateField(BaseTemporalField):
widget = DateInput
input_formats = formats.get_format_lazy('DATE_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid date.'),
}
def to_python(self, value):
"""
Validates that the input can be converted to a date. Returns a Python
datetime.date object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
return super(DateField, self).to_python(value)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format).date()
class TimeField(BaseTemporalField):
widget = TimeInput
input_formats = formats.get_format_lazy('TIME_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid time.')
}
def to_python(self, value):
"""
Validates that the input can be converted to a time. Returns a Python
datetime.time object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.time):
return value
return super(TimeField, self).to_python(value)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format).time()
class DateTimeField(BaseTemporalField):
widget = DateTimeInput
input_formats = formats.get_format_lazy('DATETIME_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid date/time.'),
}
def prepare_value(self, value):
if isinstance(value, datetime.datetime):
value = to_current_timezone(value)
return value
def to_python(self, value):
"""
Validates that the input can be converted to a datetime. Returns a
Python datetime.datetime object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.datetime):
return from_current_timezone(value)
if isinstance(value, datetime.date):
result = datetime.datetime(value.year, value.month, value.day)
return from_current_timezone(result)
result = super(DateTimeField, self).to_python(value)
return from_current_timezone(result)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format)
class DurationField(Field):
default_error_messages = {
'invalid': _('Enter a valid duration.'),
}
def prepare_value(self, value):
if isinstance(value, datetime.timedelta):
return duration_string(value)
return value
def to_python(self, value):
if value in self.empty_values:
return None
if isinstance(value, datetime.timedelta):
return value
value = parse_duration(value)
if value is None:
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
class RegexField(CharField):
def __init__(self, regex, max_length=None, min_length=None, error_message=None, *args, **kwargs):
"""
regex can be either a string or a compiled regular expression object.
error_message is an optional error message to use, if
'Enter a valid value' is too generic for you.
"""
# error_message is just kept for backwards compatibility:
if error_message is not None:
warnings.warn(
"The 'error_message' argument is deprecated. Use "
"Field.error_messages['invalid'] instead.",
RemovedInDjango110Warning, stacklevel=2
)
error_messages = kwargs.get('error_messages') or {}
error_messages['invalid'] = error_message
kwargs['error_messages'] = error_messages
super(RegexField, self).__init__(max_length, min_length, *args, **kwargs)
self._set_regex(regex)
def _get_regex(self):
return self._regex
def _set_regex(self, regex):
if isinstance(regex, six.string_types):
regex = re.compile(regex, re.UNICODE)
self._regex = regex
if hasattr(self, '_regex_validator') and self._regex_validator in self.validators:
self.validators.remove(self._regex_validator)
self._regex_validator = validators.RegexValidator(regex=regex)
self.validators.append(self._regex_validator)
regex = property(_get_regex, _set_regex)
class EmailField(CharField):
widget = EmailInput
default_validators = [validators.validate_email]
def clean(self, value):
value = self.to_python(value).strip()
return super(EmailField, self).clean(value)
class FileField(Field):
widget = ClearableFileInput
default_error_messages = {
'invalid': _("No file was submitted. Check the encoding type on the form."),
'missing': _("No file was submitted."),
'empty': _("The submitted file is empty."),
'max_length': ungettext_lazy(
'Ensure this filename has at most %(max)d character (it has %(length)d).',
'Ensure this filename has at most %(max)d characters (it has %(length)d).',
'max'),
'contradiction': _('Please either submit a file or check the clear checkbox, not both.')
}
def __init__(self, *args, **kwargs):
self.max_length = kwargs.pop('max_length', None)
self.allow_empty_file = kwargs.pop('allow_empty_file', False)
super(FileField, self).__init__(*args, **kwargs)
def to_python(self, data):
if data in self.empty_values:
return None
# UploadedFile objects should have name and size attributes.
try:
file_name = data.name
file_size = data.size
except AttributeError:
raise ValidationError(self.error_messages['invalid'], code='invalid')
if self.max_length is not None and len(file_name) > self.max_length:
params = {'max': self.max_length, 'length': len(file_name)}
raise ValidationError(self.error_messages['max_length'], code='max_length', params=params)
if not file_name:
raise ValidationError(self.error_messages['invalid'], code='invalid')
if not self.allow_empty_file and not file_size:
raise ValidationError(self.error_messages['empty'], code='empty')
return data
def clean(self, data, initial=None):
# If the widget got contradictory inputs, we raise a validation error
if data is FILE_INPUT_CONTRADICTION:
raise ValidationError(self.error_messages['contradiction'], code='contradiction')
# False means the field value should be cleared; further validation is
# not needed.
if data is False:
if not self.required:
return False
# If the field is required, clearing is not possible (the widget
# shouldn't return False data in that case anyway). False is not
# in self.empty_value; if a False value makes it this far
# it should be validated from here on out as None (so it will be
# caught by the required check).
data = None
if not data and initial:
return initial
return super(FileField, self).clean(data)
def bound_data(self, data, initial):
if data in (None, FILE_INPUT_CONTRADICTION):
return initial
return data
def has_changed(self, initial, data):
if data is None:
return False
return True
class ImageField(FileField):
default_error_messages = {
'invalid_image': _(
"Upload a valid image. The file you uploaded was either not an "
"image or a corrupted image."
),
}
def to_python(self, data):
"""
Checks that the file-upload field data contains a valid image (GIF, JPG,
PNG, possibly others -- whatever the Python Imaging Library supports).
"""
f = super(ImageField, self).to_python(data)
if f is None:
return None
from PIL import Image
# We need to get a file object for Pillow. We might have a path or we might
# have to read the data into memory.
if hasattr(data, 'temporary_file_path'):
file = data.temporary_file_path()
else:
if hasattr(data, 'read'):
file = BytesIO(data.read())
else:
file = BytesIO(data['content'])
try:
# load() could spot a truncated JPEG, but it loads the entire
# image in memory, which is a DoS vector. See #3848 and #18520.
image = Image.open(file)
# verify() must be called immediately after the constructor.
image.verify()
# Annotating so subclasses can reuse it for their own validation
f.image = image
# Pillow doesn't detect the MIME type of all formats. In those
# cases, content_type will be None.
f.content_type = Image.MIME.get(image.format)
except Exception:
# Pillow doesn't recognize it as an image.
six.reraise(ValidationError, ValidationError(
self.error_messages['invalid_image'],
code='invalid_image',
), sys.exc_info()[2])
if hasattr(f, 'seek') and callable(f.seek):
f.seek(0)
return f
class URLField(CharField):
widget = URLInput
default_error_messages = {
'invalid': _('Enter a valid URL.'),
}
default_validators = [validators.URLValidator()]
def to_python(self, value):
def split_url(url):
"""
Returns a list of url parts via ``urlparse.urlsplit`` (or raises a
``ValidationError`` exception for certain).
"""
try:
return list(urlsplit(url))
except ValueError:
# urlparse.urlsplit can raise a ValueError with some
# misformatted URLs.
raise ValidationError(self.error_messages['invalid'], code='invalid')
value = super(URLField, self).to_python(value)
if value:
url_fields = split_url(value)
if not url_fields[0]:
# If no URL scheme given, assume http://
url_fields[0] = 'http'
if not url_fields[1]:
# Assume that if no domain is provided, that the path segment
# contains the domain.
url_fields[1] = url_fields[2]
url_fields[2] = ''
# Rebuild the url_fields list, since the domain segment may now
# contain the path too.
url_fields = split_url(urlunsplit(url_fields))
value = urlunsplit(url_fields)
return value
def clean(self, value):
value = self.to_python(value).strip()
return super(URLField, self).clean(value)
class BooleanField(Field):
widget = CheckboxInput
def to_python(self, value):
"""Returns a Python boolean object."""
# Explicitly check for the string 'False', which is what a hidden field
# will submit for False. Also check for '0', since this is what
# RadioSelect will provide. Because bool("True") == bool('1') == True,
# we don't need to handle that explicitly.
if isinstance(value, six.string_types) and value.lower() in ('false', '0'):
value = False
else:
value = bool(value)
return super(BooleanField, self).to_python(value)
def validate(self, value):
if not value and self.required:
raise ValidationError(self.error_messages['required'], code='required')
def has_changed(self, initial, data):
# Sometimes data or initial could be None or '' which should be the
# same thing as False.
if initial == 'False':
# show_hidden_initial may have transformed False to 'False'
initial = False
return bool(initial) != bool(data)
class NullBooleanField(BooleanField):
"""
A field whose valid values are None, True and False. Invalid values are
cleaned to None.
"""
widget = NullBooleanSelect
def to_python(self, value):
"""
Explicitly checks for the string 'True' and 'False', which is what a
hidden field will submit for True and False, for 'true' and 'false',
which are likely to be returned by JavaScript serializations of forms,
and for '1' and '0', which is what a RadioField will submit. Unlike
the Booleanfield we need to explicitly check for True, because we are
not using the bool() function
"""
if value in (True, 'True', 'true', '1'):
return True
elif value in (False, 'False', 'false', '0'):
return False
else:
return None
def validate(self, value):
pass
def has_changed(self, initial, data):
# None (unknown) and False (No) are not the same
if initial is not None:
initial = bool(initial)
if data is not None:
data = bool(data)
return initial != data
class CallableChoiceIterator(object):
def __init__(self, choices_func):
self.choices_func = choices_func
def __iter__(self):
for e in self.choices_func():
yield e
class ChoiceField(Field):
widget = Select
default_error_messages = {
'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'),
}
def __init__(self, choices=(), required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
super(ChoiceField, self).__init__(required=required, widget=widget, label=label,
initial=initial, help_text=help_text, *args, **kwargs)
self.choices = choices
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
result._choices = copy.deepcopy(self._choices, memo)
return result
def _get_choices(self):
return self._choices
def _set_choices(self, value):
# Setting choices also sets the choices on the widget.
# choices can be any iterable, but we call list() on it because
# it will be consumed more than once.
if callable(value):
value = CallableChoiceIterator(value)
else:
value = list(value)
self._choices = self.widget.choices = value
choices = property(_get_choices, _set_choices)
def to_python(self, value):
"Returns a Unicode object."
if value in self.empty_values:
return ''
return smart_text(value)
def validate(self, value):
"""
Validates that the input is in self.choices.
"""
super(ChoiceField, self).validate(value)
if value and not self.valid_value(value):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
def valid_value(self, value):
"Check to see if the provided value is a valid choice"
text_value = force_text(value)
for k, v in self.choices:
if isinstance(v, (list, tuple)):
# This is an optgroup, so look inside the group for options
for k2, v2 in v:
if value == k2 or text_value == force_text(k2):
return True
else:
if value == k or text_value == force_text(k):
return True
return False
class TypedChoiceField(ChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', '')
super(TypedChoiceField, self).__init__(*args, **kwargs)
def _coerce(self, value):
"""
Validate that the value can be coerced to the right type (if not empty).
"""
if value == self.empty_value or value in self.empty_values:
return self.empty_value
try:
value = self.coerce(value)
except (ValueError, TypeError, ValidationError):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
return value
def clean(self, value):
value = super(TypedChoiceField, self).clean(value)
return self._coerce(value)
class MultipleChoiceField(ChoiceField):
hidden_widget = MultipleHiddenInput
widget = SelectMultiple
default_error_messages = {
'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'),
'invalid_list': _('Enter a list of values.'),
}
def to_python(self, value):
if not value:
return []
elif not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['invalid_list'], code='invalid_list')
return [smart_text(val) for val in value]
def validate(self, value):
"""
Validates that the input is a list or tuple.
"""
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
# Validate that each value in the value list is in self.choices.
for val in value:
if not self.valid_value(val):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
def has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = set(force_text(value) for value in initial)
data_set = set(force_text(value) for value in data)
return data_set != initial_set
class TypedMultipleChoiceField(MultipleChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', [])
super(TypedMultipleChoiceField, self).__init__(*args, **kwargs)
def _coerce(self, value):
"""
Validates that the values are in self.choices and can be coerced to the
right type.
"""
if value == self.empty_value or value in self.empty_values:
return self.empty_value
new_value = []
for choice in value:
try:
new_value.append(self.coerce(choice))
except (ValueError, TypeError, ValidationError):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': choice},
)
return new_value
def clean(self, value):
value = super(TypedMultipleChoiceField, self).clean(value)
return self._coerce(value)
def validate(self, value):
if value != self.empty_value:
super(TypedMultipleChoiceField, self).validate(value)
elif self.required:
raise ValidationError(self.error_messages['required'], code='required')
class ComboField(Field):
"""
A Field whose clean() method calls multiple Field clean() methods.
"""
def __init__(self, fields=(), *args, **kwargs):
super(ComboField, self).__init__(*args, **kwargs)
# Set 'required' to False on the individual fields, because the
# required validation will be handled by ComboField, not by those
# individual fields.
for f in fields:
f.required = False
self.fields = fields
def clean(self, value):
"""
Validates the given value against all of self.fields, which is a
list of Field instances.
"""
super(ComboField, self).clean(value)
for field in self.fields:
value = field.clean(value)
return value
class MultiValueField(Field):
"""
A Field that aggregates the logic of multiple Fields.
Its clean() method takes a "decompressed" list of values, which are then
cleaned into a single value according to self.fields. Each value in
this list is cleaned by the corresponding field -- the first value is
cleaned by the first field, the second value is cleaned by the second
field, etc. Once all fields are cleaned, the list of clean values is
"compressed" into a single value.
Subclasses should not have to implement clean(). Instead, they must
implement compress(), which takes a list of valid values and returns a
"compressed" version of those values -- a single value.
You'll probably want to use this with MultiWidget.
"""
default_error_messages = {
'invalid': _('Enter a list of values.'),
'incomplete': _('Enter a complete value.'),
}
def __init__(self, fields=(), *args, **kwargs):
self.require_all_fields = kwargs.pop('require_all_fields', True)
super(MultiValueField, self).__init__(*args, **kwargs)
for f in fields:
f.error_messages.setdefault('incomplete',
self.error_messages['incomplete'])
if self.require_all_fields:
# Set 'required' to False on the individual fields, because the
# required validation will be handled by MultiValueField, not
# by those individual fields.
f.required = False
self.fields = fields
def __deepcopy__(self, memo):
result = super(MultiValueField, self).__deepcopy__(memo)
result.fields = tuple(x.__deepcopy__(memo) for x in self.fields)
return result
def validate(self, value):
pass
def clean(self, value):
"""
Validates every value in the given list. A value is validated against
the corresponding Field in self.fields.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), clean() would call
DateField.clean(value[0]) and TimeField.clean(value[1]).
"""
clean_data = []
errors = []
if not value or isinstance(value, (list, tuple)):
if not value or not [v for v in value if v not in self.empty_values]:
if self.required:
raise ValidationError(self.error_messages['required'], code='required')
else:
return self.compress([])
else:
raise ValidationError(self.error_messages['invalid'], code='invalid')
for i, field in enumerate(self.fields):
try:
field_value = value[i]
except IndexError:
field_value = None
if field_value in self.empty_values:
if self.require_all_fields:
# Raise a 'required' error if the MultiValueField is
# required and any field is empty.
if self.required:
raise ValidationError(self.error_messages['required'], code='required')
elif field.required:
# Otherwise, add an 'incomplete' error to the list of
# collected errors and skip field cleaning, if a required
# field is empty.
if field.error_messages['incomplete'] not in errors:
errors.append(field.error_messages['incomplete'])
continue
try:
clean_data.append(field.clean(field_value))
except ValidationError as e:
# Collect all validation errors in a single list, which we'll
# raise at the end of clean(), rather than raising a single
# exception for the first error we encounter. Skip duplicates.
errors.extend(m for m in e.error_list if m not in errors)
if errors:
raise ValidationError(errors)
out = self.compress(clean_data)
self.validate(out)
self.run_validators(out)
return out
def compress(self, data_list):
"""
Returns a single value for the given list of values. The values can be
assumed to be valid.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), this might return a datetime
object created by combining the date and time in data_list.
"""
raise NotImplementedError('Subclasses must implement this method.')
def has_changed(self, initial, data):
if initial is None:
initial = ['' for x in range(0, len(data))]
else:
if not isinstance(initial, list):
initial = self.widget.decompress(initial)
for field, initial, data in zip(self.fields, initial, data):
try:
initial = field.to_python(initial)
except ValidationError:
return True
if field.has_changed(initial, data):
return True
return False
class FilePathField(ChoiceField):
def __init__(self, path, match=None, recursive=False, allow_files=True,
allow_folders=False, required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
super(FilePathField, self).__init__(choices=(), required=required,
widget=widget, label=label, initial=initial, help_text=help_text,
*args, **kwargs)
if self.required:
self.choices = []
else:
self.choices = [("", "---------")]
if self.match is not None:
self.match_re = re.compile(self.match)
if recursive:
for root, dirs, files in sorted(os.walk(self.path)):
if self.allow_files:
for f in files:
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
if self.allow_folders:
for f in dirs:
if f == '__pycache__':
continue
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
else:
try:
for f in sorted(os.listdir(self.path)):
if f == '__pycache__':
continue
full_file = os.path.join(self.path, f)
if (((self.allow_files and os.path.isfile(full_file)) or
(self.allow_folders and os.path.isdir(full_file))) and
(self.match is None or self.match_re.search(f))):
self.choices.append((full_file, f))
except OSError:
pass
self.widget.choices = self.choices
class SplitDateTimeField(MultiValueField):
widget = SplitDateTimeWidget
hidden_widget = SplitHiddenDateTimeWidget
default_error_messages = {
'invalid_date': _('Enter a valid date.'),
'invalid_time': _('Enter a valid time.'),
}
def __init__(self, input_date_formats=None, input_time_formats=None, *args, **kwargs):
errors = self.default_error_messages.copy()
if 'error_messages' in kwargs:
errors.update(kwargs['error_messages'])
localize = kwargs.get('localize', False)
fields = (
DateField(input_formats=input_date_formats,
error_messages={'invalid': errors['invalid_date']},
localize=localize),
TimeField(input_formats=input_time_formats,
error_messages={'invalid': errors['invalid_time']},
localize=localize),
)
super(SplitDateTimeField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
# Raise a validation error if time or date is empty
# (possible if SplitDateTimeField has required=False).
if data_list[0] in self.empty_values:
raise ValidationError(self.error_messages['invalid_date'], code='invalid_date')
if data_list[1] in self.empty_values:
raise ValidationError(self.error_messages['invalid_time'], code='invalid_time')
result = datetime.datetime.combine(*data_list)
return from_current_timezone(result)
return None
class GenericIPAddressField(CharField):
def __init__(self, protocol='both', unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.default_validators = validators.ip_address_validators(protocol, unpack_ipv4)[0]
super(GenericIPAddressField, self).__init__(*args, **kwargs)
def to_python(self, value):
if value in self.empty_values:
return ''
value = value.strip()
if value and ':' in value:
return clean_ipv6_address(value, self.unpack_ipv4)
return value
class SlugField(CharField):
default_validators = [validators.validate_slug]
def clean(self, value):
value = self.to_python(value).strip()
return super(SlugField, self).clean(value)
class UUIDField(CharField):
default_error_messages = {
'invalid': _('Enter a valid UUID.'),
}
def prepare_value(self, value):
if isinstance(value, uuid.UUID):
return value.hex
return value
def to_python(self, value):
value = super(UUIDField, self).to_python(value)
if value in self.empty_values:
return None
if not isinstance(value, uuid.UUID):
try:
value = uuid.UUID(value)
except ValueError:
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
|
|
# sqlalchemy/ext/baked.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Baked query extension.
Provides a creational pattern for the :class:`.query.Query` object which
allows the fully constructed object, Core select statement, and string
compiled result to be fully cached.
"""
from ..orm.query import Query
from ..orm import strategies, attributes, properties, \
strategy_options, util as orm_util, interfaces
from .. import log as sqla_log
from ..sql import util as sql_util
from ..orm import exc as orm_exc
from .. import exc as sa_exc
from .. import util
import copy
import logging
log = logging.getLogger(__name__)
class BakedQuery(object):
"""A builder object for :class:`.query.Query` objects."""
__slots__ = 'steps', '_bakery', '_cache_key', '_spoiled'
def __init__(self, bakery, initial_fn, args=()):
self._cache_key = ()
self._update_cache_key(initial_fn, args)
self.steps = [initial_fn]
self._spoiled = False
self._bakery = bakery
@classmethod
def bakery(cls, size=200):
"""Construct a new bakery."""
_bakery = util.LRUCache(size)
def call(initial_fn, *args):
return cls(_bakery, initial_fn, args)
return call
def _clone(self):
b1 = BakedQuery.__new__(BakedQuery)
b1._cache_key = self._cache_key
b1.steps = list(self.steps)
b1._bakery = self._bakery
b1._spoiled = self._spoiled
return b1
def _update_cache_key(self, fn, args=()):
self._cache_key += (fn.__code__,) + args
def __iadd__(self, other):
if isinstance(other, tuple):
self.add_criteria(*other)
else:
self.add_criteria(other)
return self
def __add__(self, other):
if isinstance(other, tuple):
return self.with_criteria(*other)
else:
return self.with_criteria(other)
def add_criteria(self, fn, *args):
"""Add a criteria function to this :class:`.BakedQuery`.
This is equivalent to using the ``+=`` operator to
modify a :class:`.BakedQuery` in-place.
"""
self._update_cache_key(fn, args)
self.steps.append(fn)
return self
def with_criteria(self, fn, *args):
"""Add a criteria function to a :class:`.BakedQuery` cloned from this one.
This is equivalent to using the ``+`` operator to
produce a new :class:`.BakedQuery` with modifications.
"""
return self._clone().add_criteria(fn, *args)
def for_session(self, session):
"""Return a :class:`.Result` object for this :class:`.BakedQuery`.
This is equivalent to calling the :class:`.BakedQuery` as a
Python callable, e.g. ``result = my_baked_query(session)``.
"""
return Result(self, session)
def __call__(self, session):
return self.for_session(session)
def spoil(self, full=False):
"""Cancel any query caching that will occur on this BakedQuery object.
The BakedQuery can continue to be used normally, however additional
creational functions will not be cached; they will be called
on every invocation.
This is to support the case where a particular step in constructing
a baked query disqualifies the query from being cacheable, such
as a variant that relies upon some uncacheable value.
:param full: if False, only functions added to this
:class:`.BakedQuery` object subsequent to the spoil step will be
non-cached; the state of the :class:`.BakedQuery` up until
this point will be pulled from the cache. If True, then the
entire :class:`.Query` object is built from scratch each
time, with all creational functions being called on each
invocation.
"""
if not full:
_spoil_point = self._clone()
_spoil_point._cache_key += ('_query_only', )
self.steps = [_spoil_point._retrieve_baked_query]
self._spoiled = True
return self
def _retrieve_baked_query(self, session):
query = self._bakery.get(self._cache_key, None)
if query is None:
query = self._as_query(session)
self._bakery[self._cache_key] = query.with_session(None)
return query.with_session(session)
def _bake(self, session):
query = self._as_query(session)
context = query._compile_context()
self._bake_subquery_loaders(session, context)
context.session = None
context.query = query = context.query.with_session(None)
query._execution_options = query._execution_options.union(
{"compiled_cache": self._bakery}
)
# we'll be holding onto the query for some of its state,
# so delete some compilation-use-only attributes that can take up
# space
for attr in (
'_correlate', '_from_obj', '_mapper_adapter_map',
'_joinpath', '_joinpoint'):
query.__dict__.pop(attr, None)
self._bakery[self._cache_key] = context
return context
def _as_query(self, session):
query = self.steps[0](session)
for step in self.steps[1:]:
query = step(query)
return query
def _bake_subquery_loaders(self, session, context):
"""convert subquery eager loaders in the cache into baked queries.
For subquery eager loading to work, all we need here is that the
Query point to the correct session when it is run. However, since
we are "baking" anyway, we may as well also turn the query into
a "baked" query so that we save on performance too.
"""
context.attributes['baked_queries'] = baked_queries = []
for k, v in list(context.attributes.items()):
if isinstance(v, Query):
if 'subquery' in k:
bk = BakedQuery(self._bakery, lambda *args: v)
bk._cache_key = self._cache_key + k
bk._bake(session)
baked_queries.append((k, bk._cache_key, v))
del context.attributes[k]
def _unbake_subquery_loaders(self, session, context, params):
"""Retrieve subquery eager loaders stored by _bake_subquery_loaders
and turn them back into Result objects that will iterate just
like a Query object.
"""
for k, cache_key, query in context.attributes["baked_queries"]:
bk = BakedQuery(self._bakery, lambda sess: query.with_session(sess))
bk._cache_key = cache_key
context.attributes[k] = bk.for_session(session).params(**params)
class Result(object):
"""Invokes a :class:`.BakedQuery` against a :class:`.Session`.
The :class:`.Result` object is where the actual :class:`.query.Query`
object gets created, or retrieved from the cache,
against a target :class:`.Session`, and is then invoked for results.
"""
__slots__ = 'bq', 'session', '_params'
def __init__(self, bq, session):
self.bq = bq
self.session = session
self._params = {}
def params(self, *args, **kw):
"""Specify parameters to be replaced into the string SQL statement."""
if len(args) == 1:
kw.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary.")
self._params.update(kw)
return self
def _as_query(self):
return self.bq._as_query(self.session).params(self._params)
def __str__(self):
return str(self._as_query())
def __iter__(self):
bq = self.bq
if bq._spoiled:
return iter(self._as_query())
baked_context = bq._bakery.get(bq._cache_key, None)
if baked_context is None:
baked_context = bq._bake(self.session)
context = copy.copy(baked_context)
context.session = self.session
context.attributes = context.attributes.copy()
bq._unbake_subquery_loaders(self.session, context, self._params)
context.statement.use_labels = True
if context.autoflush and not context.populate_existing:
self.session._autoflush()
return context.query.params(self._params).\
with_session(self.session)._execute_and_instances(context)
def first(self):
"""Return the first row.
Equivalent to :meth:`.Query.first`.
"""
bq = self.bq.with_criteria(lambda q: q.slice(0, 1))
ret = list(bq.for_session(self.session).params(self._params))
if len(ret) > 0:
return ret[0]
else:
return None
def one(self):
"""Return exactly one result or raise an exception.
Equivalent to :meth:`.Query.one`.
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
raise orm_exc.NoResultFound("No row was found for one()")
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one()")
def all(self):
"""Return all rows.
Equivalent to :meth:`.Query.all`.
"""
return list(self)
def get(self, ident):
"""Retrieve an object based on identity.
Equivalent to :meth:`.Query.get`.
"""
query = self.bq.steps[0](self.session)
return query._get_impl(ident, self._load_on_ident)
def _load_on_ident(self, query, key):
"""Load the given identity key from the database."""
ident = key[1]
mapper = query._mapper_zero()
_get_clause, _get_params = mapper._get_clause
def setup(query):
_lcl_get_clause = _get_clause
q = query._clone()
q._get_condition()
q._order_by = None
# None present in ident - turn those comparisons
# into "IS NULL"
if None in ident:
nones = set([
_get_params[col].key for col, value in
zip(mapper.primary_key, ident) if value is None
])
_lcl_get_clause = sql_util.adapt_criterion_to_null(
_lcl_get_clause, nones)
_lcl_get_clause = q._adapt_clause(_lcl_get_clause, True, False)
q._criterion = _lcl_get_clause
return q
# cache the query against a key that includes
# which positions in the primary key are NULL
# (remember, we can map to an OUTER JOIN)
bq = self.bq
bq = bq.with_criteria(setup, tuple(elem is None for elem in ident))
params = dict([
(_get_params[primary_key].key, id_val)
for id_val, primary_key in zip(ident, mapper.primary_key)
])
result = list(bq.for_session(self.session).params(**params))
l = len(result)
if l > 1:
raise orm_exc.MultipleResultsFound()
elif l:
return result[0]
else:
return None
def bake_lazy_loaders():
"""Enable the use of baked queries for all lazyloaders systemwide.
This operation should be safe for all lazy loaders, and will reduce
Python overhead for these operations.
"""
strategies.LazyLoader._strategy_keys[:] = []
BakedLazyLoader._strategy_keys[:] = []
properties.RelationshipProperty.strategy_for(
lazy="select")(BakedLazyLoader)
properties.RelationshipProperty.strategy_for(
lazy=True)(BakedLazyLoader)
properties.RelationshipProperty.strategy_for(
lazy="baked_select")(BakedLazyLoader)
def unbake_lazy_loaders():
"""Disable the use of baked queries for all lazyloaders systemwide.
This operation reverts the changes produced by :func:`.bake_lazy_loaders`.
"""
strategies.LazyLoader._strategy_keys[:] = []
BakedLazyLoader._strategy_keys[:] = []
properties.RelationshipProperty.strategy_for(
lazy="select")(strategies.LazyLoader)
properties.RelationshipProperty.strategy_for(
lazy=True)(strategies.LazyLoader)
properties.RelationshipProperty.strategy_for(
lazy="baked_select")(BakedLazyLoader)
assert strategies.LazyLoader._strategy_keys
@sqla_log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="baked_select")
class BakedLazyLoader(strategies.LazyLoader):
def _emit_lazyload(self, session, state, ident_key, passive):
q = BakedQuery(
self.mapper._compiled_cache,
lambda session: session.query(self.mapper))
q.add_criteria(
lambda q: q._adapt_all_clauses()._with_invoke_all_eagers(False),
self.parent_property)
if not self.parent_property.bake_queries:
q.spoil(full=True)
if self.parent_property.secondary is not None:
q.add_criteria(
lambda q:
q.select_from(self.mapper, self.parent_property.secondary))
pending = not state.key
# don't autoflush on pending
if pending or passive & attributes.NO_AUTOFLUSH:
q.add_criteria(lambda q: q.autoflush(False))
if state.load_path:
q.spoil()
q.add_criteria(
lambda q:
q._with_current_path(state.load_path[self.parent_property]))
if state.load_options:
q.spoil()
q.add_criteria(
lambda q: q._conditional_options(*state.load_options))
if self.use_get:
return q(session)._load_on_ident(
session.query(self.mapper), ident_key)
if self.parent_property.order_by:
q.add_criteria(
lambda q:
q.order_by(*util.to_list(self.parent_property.order_by)))
for rev in self.parent_property._reverse_property:
# reverse props that are MANYTOONE are loading *this*
# object from get(), so don't need to eager out to those.
if rev.direction is interfaces.MANYTOONE and \
rev._use_get and \
not isinstance(rev.strategy, strategies.LazyLoader):
q.add_criteria(
lambda q:
q.options(
strategy_options.Load(
rev.parent).baked_lazyload(rev.key)))
lazy_clause, params = self._generate_lazy_clause(state, passive)
if pending:
if orm_util._none_set.intersection(params.values()):
return None
q.add_criteria(lambda q: q.filter(lazy_clause))
result = q(session).params(**params).all()
if self.uselist:
return result
else:
l = len(result)
if l:
if l > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for lazily-loaded attribute '%s' "
% self.parent_property)
return result[0]
else:
return None
@strategy_options.loader_option()
def baked_lazyload(loadopt, attr):
"""Indicate that the given attribute should be loaded using "lazy"
loading with a "baked" query used in the load.
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "baked_select"})
@baked_lazyload._add_unbound_fn
def baked_lazyload(*keys):
return strategy_options._UnboundLoad._from_keys(
strategy_options._UnboundLoad.baked_lazyload, keys, False, {})
@baked_lazyload._add_unbound_all_fn
def baked_lazyload_all(*keys):
return strategy_options._UnboundLoad._from_keys(
strategy_options._UnboundLoad.baked_lazyload, keys, True, {})
baked_lazyload = baked_lazyload._unbound_fn
baked_lazyload_all = baked_lazyload_all._unbound_all_fn
bakery = BakedQuery.bakery
|
|
# -----------------------------------------------------------------------------
# Copyright * 2014, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The Crisis Mapping Toolkit (CMT) v1 platform is licensed under the Apache
# License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# -----------------------------------------------------------------------------
import os
import ee
import math
import numpy
import scipy
import scipy.special
import scipy.optimize
import histogram
import matplotlib
#matplotlib.use('tkagg')
import matplotlib.pyplot as plt
from cmt.mapclient_qt import addToMap
#------------------------------------------------------------------------
''' sar_martinis radar algorithm (find threshold by histogram splits on selected subregions)
Algorithm from paper:
"Towards operational near real-time flood detection using a split-based
automatic thresholding procedure on high resolution TerraSAR-X data"
by S. Martinis, A. Twele, and S. Voigt, Nat. Hazards Earth Syst. Sci., 9, 303-314, 2009
This algorithm seems extremely sensitive to multiple threshold and
scale parameters. So far it has not worked well on any data set.
'''
RED_PALETTE = '000000, FF0000'
BLUE_PALETTE = '000000, 0000FF'
TEAL_PALETTE = '000000, 00FFFF'
LBLUE_PALETTE = '000000, ADD8E6'
GREEN_PALETTE = '000000, 00FF00'
GRAY_PALETTE = '000000, FFFFFF'
def getBoundingBox(bounds):
'''Returns (minLon, minLat, maxLon, maxLat) from domain bounds'''
coordList = bounds['coordinates'][0]
minLat = 999
minLon = 999999
maxLat = -999
maxLon = -999999
for c in coordList:
if c[0] < minLon:
minLon = c[0]
if c[0] > maxLon:
maxLon = c[0]
if c[1] < minLat:
minLat = c[1]
if c[1] > maxLat:
maxLat = c[1]
return (minLon, minLat, maxLon, maxLat)
def divideUpBounds(bounds, boxSizeMeters, maxBoxesPerSide):
'''Divides up a single boundary into a grid based on a grid size in meters'''
# Get the four corners of the box and side widths in meters
(minLon, minLat, maxLon, maxLat) = getBoundingBox(bounds)
bottomLeft = ee.Geometry.Point(minLon, minLat)
topLeft = ee.Geometry.Point(minLon, maxLat)
bottomRight = ee.Geometry.Point(maxLon, minLat)
topRight = ee.Geometry.Point(maxLon, maxLat)
height = float(bottomLeft.distance(topLeft).getInfo())
width = float(bottomLeft.distance(bottomRight).getInfo())
# Determine the number of boxes
numBoxesX = int(math.ceil(width / boxSizeMeters))
numBoxesY = int(math.ceil(height / boxSizeMeters))
if numBoxesX > maxBoxesPerSide:
numBoxesX = maxBoxesPerSide
if numBoxesY > maxBoxesPerSide:
numBoxesY = maxBoxesPerSide
boxSizeMeters = ((width/numBoxesX) + (height/numBoxesY)) / 2
print 'Using ' + str(numBoxesX*numBoxesY) + ' boxes of size ' + str(boxSizeMeters)
# Now compute the box boundaries in degrees
boxWidthLon = (maxLon - minLon) / numBoxesX
boxHeightLat = (maxLat - minLat) / numBoxesY
y = minLat
boxList = []
for r in range(0,numBoxesY):
y = y + boxHeightLat
x = minLon
for c in range(0,numBoxesX):
x = x + boxWidthLon
boxBounds = ee.Geometry.Rectangle(x, y, x+boxWidthLon, y+boxHeightLat)
#print boxBounds
boxList.append(boxBounds)
return boxList, boxSizeMeters
def getBoundsCenter(bounds):
'''Returns the center point of a boundary'''
coordList = bounds['coordinates'][0]
meanLat = 0
meanLon = 0
for c in coordList:
meanLat = meanLat + c[1]
meanLon = meanLon + c[0]
meanLat = meanLat / len(coordList)
meanLon = meanLon / len(coordList)
return (meanLat, meanLon)
#
#def __show_histogram(histogram, binCenters):
# '''Create a plot of a histogram'''
# plt.bar(binCenters, histogram)
#
# plt.show()
#def __show_histogram(histogram, params=None):
# '''Create a plot of a histogram'''
# #values = histogram['histogram']
# #start = histogram['bucketMin']
# #width = histogram['bucketWidth']
# ind = numpy.arange(start=start, stop=start + width * len(values), step=width)[:-1]
# plt.bar(ind, height=values[:-1], width=width, color='b')
# #if params != None:
# # m = domains.MINIMUM_VALUES[instrument]
# # if instrument == domains.UAVSAR:
# # m = math.log10(m)
# # mid = int((params[0] - start) / width)
# # cumulative = sum(values[:mid]) + values[mid] / 2
# # scale = cumulative / __cdf(params, m, params[0])
# # plt.bar(ind, map(lambda x : scale * (__cdf(params, m, x + width / 2) - __cdf(params, m, x - width
#
def applyCutlerLinearLogScale(grayImage, roi):
'''Translates the input SAR image into a hybrid linear-log scale as described in
"Robust automated thresholding of SAR imagery for open-water detection"
by Patrick J Cutler and Frederick W Koehler'''
TOP_SECTION_PERCENTILE = 99
TOP_SECTION_START = 221
topRange = 256 - TOP_SECTION_START
# Compute a histogram of the entire area
# - Do this at a lower resolution to reduce computation time
PERCENTILE_SCALE = 50 # Resolution in meters to compute the percentile at
percentiles = grayImage.reduceRegion(ee.Reducer.percentile([0, TOP_SECTION_PERCENTILE, 100], ['min', 'split', 'max']),
roi, PERCENTILE_SCALE).getInfo()
# Extracting the results is annoying because EE prepends the channel name
minVal = next(val for key, val in percentiles.items() if 'min' in key)
splitVal = next(val for key, val in percentiles.items() if 'split' in key)
maxVal = next(val for key, val in percentiles.items() if 'max' in key)
lowRange = splitVal - minVal
logMin = math.log10(splitVal)
logMax = math.log10(maxVal)
logRange = logMax - logMin
#addToMap(grayImage.select(['vh']), {}, 'vh', False)
# Intensities from 0 to 98th percent are mapped to 0 - 220 on a linear scale
# Intensities from 99 to 100th percent are mapped to 221 - 255 on a log scale
lowMask = grayImage.lt(splitVal )
highMask = grayImage.gte(splitVal)
#addToMap(lowMask, {'min': 0, 'max': 1, 'opacity': 1.0, 'palette': GRAY_PALETTE}, 'low range', False)
#addToMap(highMask, {'min': 0, 'max': 1, 'opacity': 1.0, 'palette': GRAY_PALETTE}, 'high range', False)
linearPortion = grayImage.subtract(minVal).divide(lowRange).multiply(TOP_SECTION_START-1).multiply(lowMask )#.uint8()
logPortion = grayImage.log10().subtract(logMin).divide(logRange).multiply(topRange).add(TOP_SECTION_START).multiply(highMask)
#addToMap(linearPortion, {'min': 0, 'max': 255, 'opacity': 1.0, 'palette': GRAY_PALETTE}, 'linear', False)
#addToMap(logPortion, {'min': 0, 'max': 255, 'opacity': 1.0, 'palette': GRAY_PALETTE}, 'log', False)
scaledImage = linearPortion.add(logPortion)
return scaledImage
def sar_martinis_cr(domain):
'''Just calls sar_martinis with the CR option instead of the default CV option'''
return sar_martinis(domain, True)
def sar_martinis(domain, cr_method=False):
'''Compute a global threshold via histogram splitting on selected subregions'''
sensor = domain.get_radar()
radarImage = sensor.image
# Many papers reccomend a median type filter to remove speckle noise.
# 1: Divide up the image into a grid of tiles, X
# Divide up the region into a grid of subregions
MAX_BOXES_PER_SIDE = 12 # Cap the number of boxes at 144
DESIRED_BOX_SIZE_METERS = 3000
boxList, boxSizeMeters = divideUpBounds(domain.bounds, DESIRED_BOX_SIZE_METERS, MAX_BOXES_PER_SIDE)
# Extract the center point from each box
centersList = map(getBoundsCenter, boxList)
# SENTINEL = 12m/pixel
KERNEL_SIZE = 13 # Each box will be covered by a 13x13 pixel kernel
metersPerPixel = boxSizeMeters / KERNEL_SIZE
print 'Using metersPerPixel: ' + str(metersPerPixel)
avgKernel = ee.Kernel.square(KERNEL_SIZE, 'pixels', True); # <-- EE fails if this is in meters!
# Select the radar layer we want to work in
if 'water_detect_radar_channel' in domain.algorithm_params:
channelName = domain.algorithm_params['water_detect_radar_channel']
else: # Just use the first radar channel
channelName = sensor.band_names[0]
# Rescale the input data so the statistics are not dominated by very bright pixels
GRAY_MAX = 255
grayLayer = applyCutlerLinearLogScale(radarImage.select([channelName]), domain.bounds)
#addToMap(grayLayer, {'min': 0, 'max': GRAY_MAX, 'opacity': 1.0, 'palette': GRAY_PALETTE}, 'grayLayer', False)
# Compute the global mean, then make a constant image out of it.
globalMean = grayLayer.reduceRegion(ee.Reducer.mean(), domain.bounds, metersPerPixel)
globalMeanImage = ee.Image.constant(globalMean.getInfo()[channelName])
print 'global mean = ' + str(globalMean.getInfo()[channelName])
# Compute mean and standard deviation across the entire image
meanImage = grayLayer.convolve(avgKernel)
graysSquared = grayLayer.pow(ee.Image(2))
meansSquared = meanImage.pow(ee.Image(2))
meanOfSquaredImage = graysSquared.convolve(avgKernel)
meansDiff = meanOfSquaredImage.subtract(meansSquared)
stdImage = meansDiff.sqrt()
# Debug plots
#addToMap(meanImage, {'min': 3000, 'max': 70000, 'opacity': 1.0, 'palette': GRAY_PALETTE}, 'Mean', False)
#addToMap(stdImage, {'min': 3000, 'max': 200000, 'opacity': 1.0, 'palette': GRAY_PALETTE}, 'StdDev', False)
#addToMap(meanImage, {'min': 0, 'max': GRAY_MAX, 'opacity': 1.0, 'palette': GRAY_PALETTE}, 'Mean', False)
#addToMap(stdImage, {'min': 0, 'max': 40, 'opacity': 1.0, 'palette': GRAY_PALETTE}, 'StdDev', False)
# Compute these two statistics across the entire image
CV = meanImage.divide(stdImage).reproject( "EPSG:4326", None, metersPerPixel)
R = meanImage.divide(globalMeanImage).reproject("EPSG:4326", None, metersPerPixel)
# 2: Prune to a reduced set of tiles X'
# Parameters which control which sub-regions will have their histograms analyzed
# - These are strongly influenced by the smoothing kernel size!!!
MIN_CV = 0.7
MAX_CV = 1.3
MAX_R = 1.1
MIN_R = 0.5
# Debug plots
addToMap(CV, {'min': 0, 'max': 4.0, 'opacity': 1.0, 'palette': GRAY_PALETTE}, 'CV', False)
addToMap(R, {'min': 0, 'max': 4.0, 'opacity': 1.0, 'palette': GRAY_PALETTE}, 'R', False)
if cr_method:
MIN_CR = 0.10
# sar_griefeneder reccomends replacing CV with CR = (std / gray value range), min value 0.05
imageMin = grayLayer.reduceRegion(ee.Reducer.min(), domain.bounds, metersPerPixel).getInfo()[channelName]
imageMax = grayLayer.reduceRegion(ee.Reducer.max(), domain.bounds, metersPerPixel).getInfo()[channelName]
grayRange = imageMax - imageMin
CR = stdImage.divide(grayRange)
#addToMap(CR, {'min': 0, 'max': 0.3, 'opacity': 1.0, 'palette': GRAY_PALETTE}, 'CR', False)
# Filter out pixels based on computed statistics
t1 = CV.gte(MIN_CV)
t2 = CV.lte(MAX_CV)
t3 = R.gte(MIN_R)
t4 = R.lte(MAX_R)
if cr_method:
temp = CR.gte(MIN_CR).And(t3).And(t4)
else:
temp = t1.And(t2).And(t3).And(t4)
X_prime = temp.reproject("EPSG:4326", None, metersPerPixel)
addToMap(X_prime.mask(X_prime), {'min': 0, 'max': 1, 'opacity': 1.0, 'palette': TEAL_PALETTE}, 'X_prime', False)
# 3: Prune again to a final set of tiles X''
# Further pruning happens here but for now we are skipping it and using
# everything that got by the filter. This would speed local computation.
# - This is equivalent to using a large number for N'' in the original paper
# (which does not suggest a value for N'')
X_doublePrime = X_prime
# 4: For each tile, compute the optimal threshold
# Assemble all local gray values at each point ?
localPixelLists = grayLayer.neighborhoodToBands(avgKernel)
maskWrapper = ee.ImageCollection([X_doublePrime]);
collection = ee.ImageCollection([localPixelLists]);
# Extract the point data at from each sub-region!
localThresholdList = []
usedPointList = []
rejectedPointList = []
for loc in centersList:
try:
thisLoc = ee.Geometry.Point(loc[1], loc[0])
# If the mask for this location is invalid, skip this location
maskValue = maskWrapper.getRegion(thisLoc, metersPerPixel);
maskValue = maskValue.getInfo()[1][4] # TODO: Not the best way to grab the value!
if not maskValue:
rejectedPointList.append(thisLoc)
continue
# Otherwise pull down all the pixel values surrounding this center point
pointData = collection.getRegion(thisLoc, metersPerPixel)
pixelVals = pointData.getInfo()[1][4:] # TODO: Not the best way to grab the value!
# TODO: Can EE handle making a histogram around this region or do we need to do this ourselves?
#pointData = localPixelLists.reduceRegion(thisRegion, ee.Reducer.histogram(), SAMPLING_SCALE);
#print pointData.getInfo()
#print pixelVals
#__show_histogram(pixelVals)
#plt.bar(range(len(pixelVals)), pixelVals)
# Compute a histogram from the pixels (TODO: Do this with EE!)
NUM_BINS = 256
hist, binEdges = numpy.histogram(pixelVals, NUM_BINS)
binCenters = numpy.divide(numpy.add(binEdges[:NUM_BINS], binEdges[1:]), 2.0)
# Compute a split on the histogram
splitVal = histogram.splitHistogramKittlerIllingworth(hist, binCenters)
print "Computed local threshold = " + str(splitVal)
localThresholdList.append(splitVal)
usedPointList.append(thisLoc)
#plt.bar(binCenters, hist)
#plt.show()
except Exception,e:
print 'Failed to compute a location:'
print str(e)
numUsedPoints = len(usedPointList)
numUnusedPoints = len(rejectedPointList)
if (numUsedPoints > 0):
usedPointListEE = ee.FeatureCollection(ee.Feature(usedPointList[0]))
for i in range(1,numUsedPoints):
temp = ee.FeatureCollection(ee.Feature(usedPointList[i]))
usedPointListEE = usedPointListEE.merge(temp)
usedPointsDraw = usedPointListEE.draw('00FF00', 8)
addToMap(usedPointsDraw, {}, 'Used PTs', False)
if (numUnusedPoints > 0):
unusedPointListEE = ee.FeatureCollection(ee.Feature(rejectedPointList[0]))
for i in range(1,numUnusedPoints):
temp = ee.FeatureCollection(ee.Feature(rejectedPointList[i]))
unusedPointListEE = unusedPointListEE.merge(temp)
unusedPointsDraw = unusedPointListEE.draw('FF0000', 8)
addToMap(unusedPointsDraw, {}, 'Unused PTs', False)
# 5: Use the individual thresholds to compute a global threshold
computedThreshold = numpy.median(localThresholdList) # Nothing fancy going on here!
print 'Computed global threshold = ' + str(computedThreshold)
finalWaterClass = grayLayer.lte(computedThreshold)
#addToMap(finalWaterClass.mask(finalWaterClass), {'min': 0, 'max': 1, 'opacity': 0.6, 'palette': RED_PALETTE}, 'mirtinis class', False)
# Rename the channel to what the evaluation function requires
finalWaterClass = finalWaterClass.select([channelName], ['b1'])
return finalWaterClass
|
|
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import argparse
import fnmatch
import logging
import os
import sys
import telemetry_mini
import six
BROWSER_FLAGS = [
'--enable-remote-debugging',
'--disable-fre',
'--no-default-browser-check',
'--no-first-run',
]
TRACE_CONFIG = {
'excludedCategories': ['*'],
'includedCategories': ['rails', 'toplevel', 'startup', 'blink.user_timing'],
'memoryDumpConfig': {'triggers': []}
}
BROWSERS = {
'android-chrome': telemetry_mini.ChromeApp,
'android-chromium': telemetry_mini.ChromiumApp,
'android-system-chrome': telemetry_mini.SystemChromeApp,
}
class TwitterApp(telemetry_mini.AndroidApp):
PACKAGE_NAME = 'com.twitter.android'
class InstagramApp(telemetry_mini.AndroidApp):
PACKAGE_NAME = 'com.instagram.android'
class HangoutsApp(telemetry_mini.AndroidApp):
PACKAGE_NAME = 'com.google.android.talk'
class TwitterFlipkartStory(telemetry_mini.UserStory):
"""Load Chrome Custom Tab from another application.
The flow of the story is:
- Start Twitter app to view the @flipkart profile.
- Tap on a link to open Flipkart in a Chrome Custom Tab.
- Return to Twitter app.
"""
NAME = 'twitter_flipkart'
FLIPKART_TWITTER_LINK = [
('package', 'com.twitter.android'),
('class', 'android.widget.TextView'),
('text', 'flipkart.com')
]
def __init__(self, *args, **kwargs):
super(TwitterFlipkartStory, self).__init__(*args, **kwargs)
self.watcher = ProcessWatcher(self.device)
self.twitter = TwitterApp(self.device)
def RunPrepareSteps(self):
self.twitter.ForceStop()
def RunStorySteps(self):
# Activity will launch Twitter app on Flipkart profile.
self.actions.StartActivity('https://twitter.com/flipkart')
self.watcher.StartWatching(self.twitter)
# Tapping on Flikpart link on Twitter app will launch Chrome.
self.actions.TapUiElement(self.FLIPKART_TWITTER_LINK)
self.watcher.StartWatching(self.browser)
self.browser.WaitForCurrentPageReady()
self.actions.SwipeUp(repeat=3)
# Return to Twitter app.
self.actions.GoBack()
self.watcher.AssertAllAlive()
def RunCleanupSteps(self):
self.twitter.ForceStop()
class FlipkartInstagramStory(telemetry_mini.UserStory):
"""Interaction between Chrome, PWAs and a WebView-based app.
The flow of the story is:
- Launch the Flipkart PWA.
- Go back home and launch the Instagram app.
- Use the app switcher to return to Flipkart.
- Go back home and launch Cricbuzz from a shortcut.
"""
NAME = 'flipkart_instagram'
def __init__(self, *args, **kwargs):
super(FlipkartInstagramStory, self).__init__(*args, **kwargs)
self.watcher = ProcessWatcher(self.device)
self.instagram = InstagramApp(self.device)
def RunPrepareSteps(self):
self.instagram.ForceStop()
self.actions.ClearRecentApps()
def RunStorySteps(self):
# Tap on home screen shortcut to open Flipkart PWA.
self.actions.TapHomeScreenShortcut('Flipkart Lite')
self.watcher.StartWatching(self.browser)
self.browser.WaitForCurrentPageReady()
self.actions.SwipeUp(repeat=2)
# Go back home, then launch Instagram app.
self.actions.GoHome()
self.actions.TapHomeScreenShortcut('Instagram')
self.watcher.StartWatching(self.instagram)
self.actions.SwipeUp(repeat=5)
# Go to app switcher and return to Flipkart PWA.
self.actions.GoAppSwitcher()
self.actions.TapAppSwitcherTitle('Flipkart Lite')
self.actions.SwipeDown()
# Go back home, then open Cricbuzz shortcut.
self.actions.GoHome()
self.actions.TapHomeScreenShortcut('Cricbuzz')
self.browser.WaitForCurrentPageReady()
self.actions.SwipeUp()
self.watcher.AssertAllAlive()
def RunCleanupSteps(self):
self.instagram.ForceStop()
class HangoutsIndiaTimesStory(telemetry_mini.UserStory):
"""Interaction between Chrome and a non-WebView-based app.
TODO: Not sure if Hangouts is a non-WebView app. Consider using another app
if needed.
The flow of the story is:
- Launch the Hangouts app.
- Open a conversation with a link to an IndiaTimes article.
- Click on the link to launch Chrome.
- Go back to the conversation.
"""
NAME = 'hangouts_indiatimes'
# TODO: Maybe use more specific targets, e.g. check url is in message.
FIRST_CONVERSATION = [
('resource-id', 'com.google.android.talk:id/conversationContent'),
('index', '0'),
]
SECOND_MESSAGE = [
('resource-id', 'com.google.android.talk:id/message_root'),
('index', '1'),
]
def __init__(self, *args, **kwargs):
super(HangoutsIndiaTimesStory, self).__init__(*args, **kwargs)
self.watcher = ProcessWatcher(self.device)
self.hangouts = HangoutsApp(self.device)
def RunPrepareSteps(self):
self.hangouts.ForceStop()
def RunStorySteps(self):
# Tap on home screen shortcut to open Hangouts app.
self.actions.TapHomeScreenShortcut('Hangouts')
self.watcher.StartWatching(self.hangouts)
# Find conversation with link to IndiaTimes and tap to launch Chrome.
self.actions.TapUiElement(self.FIRST_CONVERSATION)
self.actions.TapUiElement(self.SECOND_MESSAGE)
self.watcher.StartWatching(self.browser)
self.browser.WaitForCurrentPageReady()
self.actions.SwipeUp(repeat=4)
# Go back to Hangouts, then back Home.
self.actions.GoBack()
self.actions.Idle(2)
self.actions.GoHome()
self.watcher.AssertAllAlive()
def RunCleanupSteps(self):
self.hangouts.ForceStop()
STORIES = (
TwitterFlipkartStory,
FlipkartInstagramStory,
HangoutsIndiaTimesStory,
)
class ProcessWatcher(object):
def __init__(self, device):
self.device = device
self._process_pid = {}
def StartWatching(self, process_name):
"""Register a process or android app to keep track of its PID."""
if isinstance(process_name, telemetry_mini.AndroidApp):
process_name = process_name.PACKAGE_NAME
@telemetry_mini.RetryOn(returns_falsy=True)
def GetPids():
# Returns an empty list if the process name is not found.
return self.device.ProcessStatus()[process_name]
assert process_name not in self._process_pid
pids = GetPids()
assert pids, 'PID for %s not found' % process_name
assert len(pids) == 1, 'Single PID for %s expected, but found: %s' % (
process_name, pids)
logging.info('Started watching %s (PID=%d)', process_name, pids[0])
self._process_pid[process_name] = pids[0]
def AssertAllAlive(self):
"""Check that all watched processes remain alive and were not restarted."""
status = self.device.ProcessStatus()
all_alive = True
for process_name, old_pid in sorted(six.iteritems(self._process_pid)):
new_pids = status[process_name]
if not new_pids:
all_alive = False
logging.error('Process %s died (PID=%d).', process_name, old_pid)
elif new_pids != [old_pid]:
all_alive = False
logging.error(
'Process %s restarted (PID=%d -> %s).', process_name,
old_pid, new_pids)
else:
logging.info('Process %s still alive (PID=%d)', process_name, old_pid)
assert all_alive, 'Some watched processes died or got restarted'
def EnsureSingleBrowser(device, browser_name, force_install=False):
"""Ensure a single Chrome browser is installed and available on the device.
Having more than one Chrome browser available may produce results which are
confusing or unreliable (e.g. unclear which browser will respond by default
to intents triggered by other apps).
This function ensures only the selected browser is available, installing it
if necessary, and uninstalling/disabling others.
"""
browser = BROWSERS[browser_name](device)
available_browsers = set(device.ListPackages('chrome', only_enabled=True))
# Install or enable if needed.
if force_install or browser.PACKAGE_NAME not in available_browsers:
browser.Install()
# Uninstall disable other browser apps.
for other_browser in six.itervalues(BROWSERS):
if (other_browser.PACKAGE_NAME != browser.PACKAGE_NAME and
other_browser.PACKAGE_NAME in available_browsers):
other_browser(device).Uninstall()
# Finally check that only the selected browser is actually available.
available_browsers = device.ListPackages('chrome', only_enabled=True)
assert browser.PACKAGE_NAME in available_browsers, (
'Unable to make %s available' % browser.PACKAGE_NAME)
available_browsers.remove(browser.PACKAGE_NAME)
assert not available_browsers, (
'Other browsers may intefere with the test: %s' % available_browsers)
return browser
def main():
browser_names = sorted(BROWSERS)
default_browser = 'android-chrome'
parser = argparse.ArgumentParser()
parser.add_argument('--serial',
help='device serial on which to run user stories'
' (defaults to first device found)')
parser.add_argument('--adb-bin', default='adb', metavar='PATH',
help='path to adb binary to use (default: %(default)s)')
parser.add_argument('--browser', default=default_browser, metavar='NAME',
choices=browser_names,
help='one of: %s' % ', '.join(
'%s (default)' % b if b == default_browser else b
for b in browser_names))
parser.add_argument('--story-filter', metavar='PATTERN', default='*',
help='run the matching stories only (allows Unix'
' shell-style wildcards)')
parser.add_argument('--repeat', metavar='NUM', type=int, default=1,
help='repeat the story set a number of times'
' (default: %(default)d)')
parser.add_argument('--output-dir', metavar='PATH',
help='path to directory for placing output trace files'
' (defaults to current directory)')
parser.add_argument('--force-install', action='store_true',
help='install APK even if browser is already available')
parser.add_argument('--apks-dir', metavar='PATH',
help='path where to find APKs to install')
parser.add_argument('--port', type=int, default=1234,
help='port for connection with device'
' (default: %(default)s)')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
logging.basicConfig()
if args.verbose:
logging.getLogger().setLevel(logging.INFO)
stories = [s for s in STORIES if fnmatch.fnmatch(s.NAME, args.story_filter)]
if not stories:
logging.error('No matching stories')
return 1
if args.output_dir is None:
args.output_dir = os.getcwd()
else:
args.output_dir = os.path.realpath(args.output_dir)
if not os.path.isdir(args.output_dir):
logging.error('Output directory does not exit')
return 2
if args.apks_dir is None:
args.apks_dir = os.path.realpath(os.path.join(
os.path.dirname(__file__), '..', '..', '..', '..',
'out', 'Release', 'apks'))
telemetry_mini.AndroidApp.APKS_DIR = args.apks_dir
telemetry_mini.AdbMini.ADB_BIN = args.adb_bin
if args.serial is None:
device = next(telemetry_mini.AdbMini.GetDevices())
logging.warning(
'Connected to first device found: --serial %s', device.serial)
else:
device = telemetry_mini.AdbMini(args.serial)
# Some operations may require a rooted device.
device.RunCommand('root')
device.RunCommand('wait-for-device')
browser = EnsureSingleBrowser(device, args.browser, args.force_install)
browser.SetBrowserFlags(BROWSER_FLAGS)
browser.SetTraceConfig(TRACE_CONFIG)
browser.SetDevToolsLocalPort(args.port)
telemetry_mini.RunStories(browser, stories, args.repeat, args.output_dir)
return 0
if __name__ == '__main__':
sys.exit(main())
|
|
import py, struct, sys
from rpython.jit.codewriter.assembler import Assembler, AssemblerError
from rpython.jit.codewriter.flatten import SSARepr, Label, TLabel, Register
from rpython.jit.codewriter.flatten import ListOfKind, IndirectCallTargets
from rpython.jit.codewriter.jitcode import MissingLiveness
from rpython.jit.codewriter import heaptracker, longlong
from rpython.jit.metainterp.history import AbstractDescr
from rpython.flowspace.model import Constant
from rpython.rtyper.lltypesystem import lltype, llmemory
from rpython.rlib.rarithmetic import r_int, r_uint
def test_assemble_simple():
ssarepr = SSARepr("test")
i0, i1, i2 = Register('int', 0), Register('int', 1), Register('int', 2)
ssarepr.insns = [
('int_add', i0, i1, '->', i2),
('int_return', i2),
]
assembler = Assembler()
jitcode = assembler.assemble(ssarepr)
assert jitcode.code == ("\x00\x00\x01\x02"
"\x01\x02")
assert assembler.insns == {'int_add/ii>i': 0,
'int_return/i': 1}
assert jitcode.num_regs_i() == 3
assert jitcode.num_regs_r() == 0
assert jitcode.num_regs_f() == 0
def test_assemble_consts():
ssarepr = SSARepr("test")
ssarepr.insns = [
('int_return', Register('int', 13)),
('int_return', Constant(18, lltype.Signed)),
('int_return', Constant(-4, lltype.Signed)),
('int_return', Constant(128, lltype.Signed)),
('int_return', Constant(-129, lltype.Signed)),
]
assembler = Assembler()
jitcode = assembler.assemble(ssarepr)
assert jitcode.code == ("\x00\x0D"
"\x01\x12" # use int_return/c for one-byte consts
"\x01\xFC"
"\x00\xFF" # use int_return/i for larger consts
"\x00\xFE")
assert assembler.insns == {'int_return/i': 0,
'int_return/c': 1}
assert jitcode.constants_i == [128, -129]
def test_assemble_float_consts():
ssarepr = SSARepr("test")
ssarepr.insns = [
('float_return', Register('float', 13)),
('float_return', Constant(18.0, lltype.Float)),
('float_return', Constant(-4.0, lltype.Float)),
('float_return', Constant(128.1, lltype.Float)),
]
assembler = Assembler()
jitcode = assembler.assemble(ssarepr)
assert jitcode.code == ("\x00\x0D"
"\x00\xFF"
"\x00\xFE"
"\x00\xFD")
assert assembler.insns == {'float_return/f': 0}
assert jitcode.constants_f == [longlong.getfloatstorage(18.0),
longlong.getfloatstorage(-4.0),
longlong.getfloatstorage(128.1)]
def test_assemble_llong_consts():
if sys.maxint > 2147483647:
py.test.skip("only for 32-bit platforms")
from rpython.rlib.rarithmetic import r_longlong, r_ulonglong
ssarepr = SSARepr("test")
ssarepr.insns = [
('float_return', Constant(r_longlong(-18000000000000000),
lltype.SignedLongLong)),
('float_return', Constant(r_ulonglong(9900000000000000000),
lltype.UnsignedLongLong)),
]
assembler = Assembler()
jitcode = assembler.assemble(ssarepr)
assert jitcode.code == ("\x00\xFF"
"\x00\xFE")
assert assembler.insns == {'float_return/f': 0}
assert jitcode.constants_f == [r_longlong(-18000000000000000),
r_longlong(-8546744073709551616)]
def test_assemble_cast_consts():
ssarepr = SSARepr("test")
S = lltype.GcStruct('S')
s = lltype.malloc(S)
F = lltype.FuncType([], lltype.Signed)
f = lltype.functionptr(F, 'f')
ssarepr.insns = [
('int_return', Constant('X', lltype.Char)),
('int_return', Constant(unichr(0x1234), lltype.UniChar)),
('int_return', Constant(f, lltype.Ptr(F))),
('ref_return', Constant(s, lltype.Ptr(S))),
]
assembler = Assembler()
jitcode = assembler.assemble(ssarepr)
assert jitcode.code == ("\x00\x58"
"\x01\xFF"
"\x01\xFE"
"\x02\xFF")
assert assembler.insns == {'int_return/c': 0,
'int_return/i': 1,
'ref_return/r': 2}
f_int = heaptracker.adr2int(llmemory.cast_ptr_to_adr(f))
assert jitcode.constants_i == [0x1234, f_int]
s_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, s)
assert jitcode.constants_r == [s_gcref]
def test_assemble_loop():
ssarepr = SSARepr("test")
i0, i1 = Register('int', 0x16), Register('int', 0x17)
ssarepr.insns = [
(Label('L1'),),
('goto_if_not_int_gt', i0, Constant(4, lltype.Signed), TLabel('L2')),
('int_add', i1, i0, '->', i1),
('int_sub', i0, Constant(1, lltype.Signed), '->', i0),
('goto', TLabel('L1')),
(Label('L2'),),
('int_return', i1),
]
assembler = Assembler()
jitcode = assembler.assemble(ssarepr)
assert jitcode.code == ("\x00\x16\x04\x10\x00"
"\x01\x17\x16\x17"
"\x02\x16\x01\x16"
"\x03\x00\x00"
"\x04\x17")
assert assembler.insns == {'goto_if_not_int_gt/icL': 0,
'int_add/ii>i': 1,
'int_sub/ic>i': 2,
'goto/L': 3,
'int_return/i': 4}
def test_assemble_list():
ssarepr = SSARepr("test")
i0, i1 = Register('int', 0x16), Register('int', 0x17)
ssarepr.insns = [
('foobar', ListOfKind('int', [i0, i1, Constant(42, lltype.Signed)]),
ListOfKind('ref', [])),
]
assembler = Assembler()
jitcode = assembler.assemble(ssarepr)
assert jitcode.code == "\x00\x03\x16\x17\xFF\x00"
assert assembler.insns == {'foobar/IR': 0}
assert jitcode.constants_i == [42]
def test_assemble_list_semibug():
# the semibug is that after forcing 42 into the dict of constants,
# it would be reused for all future 42's, even ones that can be
# encoded directly.
ssarepr = SSARepr("test")
ssarepr.insns = [
('foobar', ListOfKind('int', [Constant(42, lltype.Signed)])),
('foobar', ListOfKind('int', [Constant(42, lltype.Signed)])),
('baz', Constant(42, lltype.Signed)),
('bok', Constant(41, lltype.Signed)),
]
assembler = Assembler()
jitcode = assembler.assemble(ssarepr)
assert jitcode.code == ("\x00\x01\xFF"
"\x00\x01\xFF"
"\x01\x2A"
"\x02\xFE")
assert assembler.insns == {'foobar/I': 0,
'baz/c': 1, # in USE_C_FORM
'bok/i': 2} # not in USE_C_FORM
assert jitcode.constants_i == [42, 41]
def test_assemble_descr():
class FooDescr(AbstractDescr):
pass
descrs = [FooDescr() for i in range(300)]
ssarepr = SSARepr("test")
ssarepr.insns = [('foobar', d) for d in descrs[::-1]]
assembler = Assembler()
jitcode = assembler.assemble(ssarepr)
assert jitcode.code == ''.join(["\x00" + struct.pack("<H", i)
for i in range(300)])
assert assembler.insns == {'foobar/d': 0}
assert assembler.descrs == descrs[::-1]
def test_assemble_indirect_call():
lst1 = ["somejitcode1", "somejitcode2"]
lst2 = ["somejitcode1", "somejitcode3"]
ssarepr = SSARepr("test")
ssarepr.insns = [('foobar', IndirectCallTargets(lst1)),
('foobar', IndirectCallTargets(lst2))]
assembler = Assembler()
assembler.assemble(ssarepr)
assert assembler.indirectcalltargets == set(lst1).union(lst2)
def test_num_regs():
assembler = Assembler()
ssarepr = SSARepr("test")
ssarepr.insns = []
jitcode = assembler.assemble(ssarepr)
assert jitcode.num_regs_i() == 0
assert jitcode.num_regs_r() == 0
assert jitcode.num_regs_f() == 0
ssarepr = SSARepr("test")
ssarepr.insns = [('foobar', Register('int', 51),
Register('ref', 27),
Register('int', 12))]
jitcode = assembler.assemble(ssarepr)
assert jitcode.num_regs_i() == 52
assert jitcode.num_regs_r() == 28
assert jitcode.num_regs_f() == 0
def test_liveness():
ssarepr = SSARepr("test")
i0, i1, i2 = Register('int', 0), Register('int', 1), Register('int', 2)
ssarepr.insns = [
('int_add', i0, Constant(10, lltype.Signed), '->', i1),
('-live-', i0, i1),
('-live-', i1, i2),
('int_add', i0, Constant(3, lltype.Signed), '->', i2),
('-live-', i2),
]
assembler = Assembler()
jitcode = assembler.assemble(ssarepr)
assert jitcode.code == ("\x00\x00\x0A\x01" # ends at 4
"\x00\x00\x03\x02") # ends at 8
assert assembler.insns == {'int_add/ic>i': 0}
for i in range(8):
if i != 4:
py.test.raises(MissingLiveness, jitcode._live_vars, i)
assert jitcode._live_vars(4) == '%i0 %i1 %i2'
assert jitcode._live_vars(8) == '%i2'
def test_assemble_error_string_constant():
ssarepr = SSARepr("test")
c = Constant('foobar', lltype.Void)
ssarepr.insns = [
('duh', c),
]
assembler = Assembler()
py.test.raises(AssemblerError, assembler.assemble, ssarepr)
def test_assemble_r_int():
# r_int is a strange type, which the jit should replace with int.
# r_uint is also replaced with int.
ssarepr = SSARepr("test")
i0, i1, i2 = Register('int', 0), Register('int', 1), Register('int', 2)
ssarepr.insns = [
('uint_add', i0, Constant(r_uint(42424242), lltype.Unsigned), '->', i1),
('int_add', i0, Constant(r_int(42424243), lltype.Signed), '->', i2),
]
assembler = Assembler()
jitcode = assembler.assemble(ssarepr)
assert jitcode.constants_i == [42424242, 42424243]
assert map(type, jitcode.constants_i) == [int, int]
|
|
"""The tests for the nx584 sensor platform."""
import requests
import unittest
from unittest import mock
from nx584 import client as nx584_client
from homeassistant.components.binary_sensor import nx584
from homeassistant.setup import setup_component
from tests.common import get_test_home_assistant
class StopMe(Exception):
"""Stop helper."""
pass
class TestNX584SensorSetup(unittest.TestCase):
"""Test the NX584 sensor platform."""
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self._mock_client = mock.patch.object(nx584_client, 'Client')
self._mock_client.start()
self.fake_zones = [
{'name': 'front', 'number': 1},
{'name': 'back', 'number': 2},
{'name': 'inside', 'number': 3},
]
client = nx584_client.Client.return_value
client.list_zones.return_value = self.fake_zones
client.get_version.return_value = '1.1'
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
self._mock_client.stop()
@mock.patch('homeassistant.components.binary_sensor.nx584.NX584Watcher')
@mock.patch('homeassistant.components.binary_sensor.nx584.NX584ZoneSensor')
def test_setup_defaults(self, mock_nx, mock_watcher):
"""Test the setup with no configuration."""
add_entities = mock.MagicMock()
config = {
'host': nx584.DEFAULT_HOST,
'port': nx584.DEFAULT_PORT,
'exclude_zones': [],
'zone_types': {},
}
self.assertTrue(nx584.setup_platform(self.hass, config, add_entities))
mock_nx.assert_has_calls(
[mock.call(zone, 'opening') for zone in self.fake_zones])
self.assertTrue(add_entities.called)
self.assertEqual(nx584_client.Client.call_count, 1)
self.assertEqual(
nx584_client.Client.call_args, mock.call('http://localhost:5007')
)
@mock.patch('homeassistant.components.binary_sensor.nx584.NX584Watcher')
@mock.patch('homeassistant.components.binary_sensor.nx584.NX584ZoneSensor')
def test_setup_full_config(self, mock_nx, mock_watcher):
"""Test the setup with full configuration."""
config = {
'host': 'foo',
'port': 123,
'exclude_zones': [2],
'zone_types': {3: 'motion'},
}
add_entities = mock.MagicMock()
self.assertTrue(nx584.setup_platform(self.hass, config, add_entities))
mock_nx.assert_has_calls([
mock.call(self.fake_zones[0], 'opening'),
mock.call(self.fake_zones[2], 'motion'),
])
self.assertTrue(add_entities.called)
self.assertEqual(nx584_client.Client.call_count, 1)
self.assertEqual(
nx584_client.Client.call_args, mock.call('http://foo:123')
)
self.assertTrue(mock_watcher.called)
def _test_assert_graceful_fail(self, config):
"""Test the failing."""
self.assertFalse(setup_component(
self.hass, 'binary_sensor.nx584', config))
def test_setup_bad_config(self):
"""Test the setup with bad configuration."""
bad_configs = [
{'exclude_zones': ['a']},
{'zone_types': {'a': 'b'}},
{'zone_types': {1: 'notatype'}},
{'zone_types': {'notazone': 'motion'}},
]
for config in bad_configs:
self._test_assert_graceful_fail(config)
def test_setup_connect_failed(self):
"""Test the setup with connection failure."""
nx584_client.Client.return_value.list_zones.side_effect = \
requests.exceptions.ConnectionError
self._test_assert_graceful_fail({})
def test_setup_no_partitions(self):
"""Test the setup with connection failure."""
nx584_client.Client.return_value.list_zones.side_effect = \
IndexError
self._test_assert_graceful_fail({})
def test_setup_version_too_old(self):
"""Test if version is too old."""
nx584_client.Client.return_value.get_version.return_value = '1.0'
self._test_assert_graceful_fail({})
def test_setup_no_zones(self):
"""Test the setup with no zones."""
nx584_client.Client.return_value.list_zones.return_value = []
add_entities = mock.MagicMock()
self.assertTrue(nx584.setup_platform(self.hass, {}, add_entities))
self.assertFalse(add_entities.called)
class TestNX584ZoneSensor(unittest.TestCase):
"""Test for the NX584 zone sensor."""
def test_sensor_normal(self):
"""Test the sensor."""
zone = {'number': 1, 'name': 'foo', 'state': True}
sensor = nx584.NX584ZoneSensor(zone, 'motion')
self.assertEqual('foo', sensor.name)
self.assertFalse(sensor.should_poll)
self.assertTrue(sensor.is_on)
zone['state'] = False
self.assertFalse(sensor.is_on)
class TestNX584Watcher(unittest.TestCase):
"""Test the NX584 watcher."""
@mock.patch.object(nx584.NX584ZoneSensor, 'schedule_update_ha_state')
def test_process_zone_event(self, mock_update):
"""Test the processing of zone events."""
zone1 = {'number': 1, 'name': 'foo', 'state': True}
zone2 = {'number': 2, 'name': 'bar', 'state': True}
zones = {
1: nx584.NX584ZoneSensor(zone1, 'motion'),
2: nx584.NX584ZoneSensor(zone2, 'motion'),
}
watcher = nx584.NX584Watcher(None, zones)
watcher._process_zone_event({'zone': 1, 'zone_state': False})
self.assertFalse(zone1['state'])
self.assertEqual(1, mock_update.call_count)
@mock.patch.object(nx584.NX584ZoneSensor, 'schedule_update_ha_state')
def test_process_zone_event_missing_zone(self, mock_update):
"""Test the processing of zone events with missing zones."""
watcher = nx584.NX584Watcher(None, {})
watcher._process_zone_event({'zone': 1, 'zone_state': False})
self.assertFalse(mock_update.called)
def test_run_with_zone_events(self):
"""Test the zone events."""
empty_me = [1, 2]
def fake_get_events():
"""Return nothing twice, then some events."""
if empty_me:
empty_me.pop()
else:
return fake_events
client = mock.MagicMock()
fake_events = [
{'zone': 1, 'zone_state': True, 'type': 'zone_status'},
{'zone': 2, 'foo': False},
]
client.get_events.side_effect = fake_get_events
watcher = nx584.NX584Watcher(client, {})
@mock.patch.object(watcher, '_process_zone_event')
def run(fake_process):
"""Run a fake process."""
fake_process.side_effect = StopMe
self.assertRaises(StopMe, watcher._run)
self.assertEqual(fake_process.call_count, 1)
self.assertEqual(fake_process.call_args, mock.call(fake_events[0]))
run()
self.assertEqual(3, client.get_events.call_count)
@mock.patch('time.sleep')
def test_run_retries_failures(self, mock_sleep):
"""Test the retries with failures."""
empty_me = [1, 2]
def fake_run():
"""Fake runner."""
if empty_me:
empty_me.pop()
raise requests.exceptions.ConnectionError()
else:
raise StopMe()
watcher = nx584.NX584Watcher(None, {})
with mock.patch.object(watcher, '_run') as mock_inner:
mock_inner.side_effect = fake_run
self.assertRaises(StopMe, watcher.run)
self.assertEqual(3, mock_inner.call_count)
mock_sleep.assert_has_calls([mock.call(10), mock.call(10)])
|
|
# graphicsCrawlerDisplay.py
# -------------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
# graphicsCrawlerDisplay.py
# -------------------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and Pieter
# Abbeel in Spring 2013.
# For more info, see http://inst.eecs.berkeley.edu/~cs188/pacman/pacman.html
import Tkinter
import qlearningAgents
import time
import threading
import sys
import crawler
#import pendulum
import math
from math import pi as PI
robotType = 'crawler'
class Application:
def sigmoid(self, x):
return 1.0 / (1.0 + 2.0 ** (-x))
def incrementSpeed(self, inc):
self.tickTime *= inc
# self.epsilon = min(1.0, self.epsilon)
# self.epsilon = max(0.0,self.epsilon)
# self.learner.setSpeed(self.epsilon)
self.speed_label['text'] = 'Step Delay: %.5f' % (self.tickTime)
def incrementEpsilon(self, inc):
self.ep += inc
self.epsilon = self.sigmoid(self.ep)
self.learner.setEpsilon(self.epsilon)
self.epsilon_label['text'] = 'Epsilon: %.3f' % (self.epsilon)
def incrementGamma(self, inc):
self.ga += inc
self.gamma = self.sigmoid(self.ga)
self.learner.setDiscount(self.gamma)
self.gamma_label['text'] = 'Discount: %.3f' % (self.gamma)
def incrementAlpha(self, inc):
self.al += inc
self.alpha = self.sigmoid(self.al)
self.learner.setLearningRate(self.alpha)
self.alpha_label['text'] = 'Learning Rate: %.3f' % (self.alpha)
def __initGUI(self, win):
## Window ##
self.win = win
## Initialize Frame ##
win.grid()
self.dec = -.5
self.inc = .5
self.tickTime = 0.1
## Epsilon Button + Label ##
self.setupSpeedButtonAndLabel(win)
self.setupEpsilonButtonAndLabel(win)
## Gamma Button + Label ##
self.setUpGammaButtonAndLabel(win)
## Alpha Button + Label ##
self.setupAlphaButtonAndLabel(win)
## Exit Button ##
#self.exit_button = Tkinter.Button(win,text='Quit', command=self.exit)
#self.exit_button.grid(row=0, column=9)
## Simulation Buttons ##
# self.setupSimulationButtons(win)
## Canvas ##
self.canvas = Tkinter.Canvas(root, height=200, width=1000)
self.canvas.grid(row=2,columnspan=10)
def setupAlphaButtonAndLabel(self, win):
self.alpha_minus = Tkinter.Button(win,
text="-",command=(lambda: self.incrementAlpha(self.dec)))
self.alpha_minus.grid(row=1, column=3, padx=10)
self.alpha = self.sigmoid(self.al)
self.alpha_label = Tkinter.Label(win, text='Learning Rate: %.3f' % (self.alpha))
self.alpha_label.grid(row=1, column=4)
self.alpha_plus = Tkinter.Button(win,
text="+",command=(lambda: self.incrementAlpha(self.inc)))
self.alpha_plus.grid(row=1, column=5, padx=10)
def setUpGammaButtonAndLabel(self, win):
self.gamma_minus = Tkinter.Button(win,
text="-",command=(lambda: self.incrementGamma(self.dec)))
self.gamma_minus.grid(row=1, column=0, padx=10)
self.gamma = self.sigmoid(self.ga)
self.gamma_label = Tkinter.Label(win, text='Discount: %.3f' % (self.gamma))
self.gamma_label.grid(row=1, column=1)
self.gamma_plus = Tkinter.Button(win,
text="+",command=(lambda: self.incrementGamma(self.inc)))
self.gamma_plus.grid(row=1, column=2, padx=10)
def setupEpsilonButtonAndLabel(self, win):
self.epsilon_minus = Tkinter.Button(win,
text="-",command=(lambda: self.incrementEpsilon(self.dec)))
self.epsilon_minus.grid(row=0, column=3)
self.epsilon = self.sigmoid(self.ep)
self.epsilon_label = Tkinter.Label(win, text='Epsilon: %.3f' % (self.epsilon))
self.epsilon_label.grid(row=0, column=4)
self.epsilon_plus = Tkinter.Button(win,
text="+",command=(lambda: self.incrementEpsilon(self.inc)))
self.epsilon_plus.grid(row=0, column=5)
def setupSpeedButtonAndLabel(self, win):
self.speed_minus = Tkinter.Button(win,
text="-",command=(lambda: self.incrementSpeed(.5)))
self.speed_minus.grid(row=0, column=0)
self.speed_label = Tkinter.Label(win, text='Step Delay: %.5f' % (self.tickTime))
self.speed_label.grid(row=0, column=1)
self.speed_plus = Tkinter.Button(win,
text="+",command=(lambda: self.incrementSpeed(2)))
self.speed_plus.grid(row=0, column=2)
def skip5kSteps(self):
self.stepsToSkip = 5000
def __init__(self, win):
self.ep = 0
self.ga = 2
self.al = 2
self.stepCount = 0
## Init Gui
self.__initGUI(win)
# Init environment
if robotType == 'crawler':
self.robot = crawler.CrawlingRobot(self.canvas)
self.robotEnvironment = crawler.CrawlingRobotEnvironment(self.robot)
elif robotType == 'pendulum':
self.robot = pendulum.PendulumRobot(self.canvas)
self.robotEnvironment = \
pendulum.PendulumRobotEnvironment(self.robot)
else:
raise "Unknown RobotType"
# Init Agent
simulationFn = lambda agent: \
simulation.SimulationEnvironment(self.robotEnvironment,agent)
actionFn = lambda state: \
self.robotEnvironment.getPossibleActions(state)
self.learner = qlearningAgents.QLearningAgent(actionFn=actionFn)
self.learner.setEpsilon(self.epsilon)
self.learner.setLearningRate(self.alpha)
self.learner.setDiscount(self.gamma)
# Start GUI
self.running = True
self.stopped = False
self.stepsToSkip = 0
self.thread = threading.Thread(target=self.run)
self.thread.start()
def exit(self):
self.running = False
for i in range(5):
if not self.stopped:
time.sleep(0.1)
try:
self.win.destroy()
except:
pass
sys.exit(0)
def step(self):
self.stepCount += 1
state = self.robotEnvironment.getCurrentState()
actions = self.robotEnvironment.getPossibleActions(state)
if len(actions) == 0.0:
self.robotEnvironment.reset()
state = self.robotEnvironment.getCurrentState()
actions = self.robotEnvironment.getPossibleActions(state)
print 'Reset!'
action = self.learner.getAction(state)
if action == None:
raise 'None action returned: Code Not Complete'
nextState, reward = self.robotEnvironment.doAction(action)
self.learner.observeTransition(state, action, nextState, reward)
def animatePolicy(self):
if robotType != 'pendulum':
raise 'Only pendulum can animatePolicy'
totWidth = self.canvas.winfo_reqwidth()
totHeight = self.canvas.winfo_reqheight()
length = 0.48 * min(totWidth, totHeight)
x,y = totWidth-length-30, length+10
angleMin, angleMax = self.robot.getMinAndMaxAngle()
velMin, velMax = self.robot.getMinAndMaxAngleVelocity()
if not 'animatePolicyBox' in dir(self):
self.canvas.create_line(x,y,x+length,y)
self.canvas.create_line(x+length,y,x+length,y-length)
self.canvas.create_line(x+length,y-length,x,y-length)
self.canvas.create_line(x,y-length,x,y)
self.animatePolicyBox = 1
self.canvas.create_text(x+length/2,y+10,text='angle')
self.canvas.create_text(x-30,y-length/2,text='velocity')
self.canvas.create_text(x-60,y-length/4,text='Blue = kickLeft')
self.canvas.create_text(x-60,y-length/4+20,text='Red = kickRight')
self.canvas.create_text(x-60,y-length/4+40,text='White = doNothing')
angleDelta = (angleMax-angleMin) / 100
velDelta = (velMax-velMin) / 100
for i in range(100):
angle = angleMin + i * angleDelta
for j in range(100):
vel = velMin + j * velDelta
state = self.robotEnvironment.getState(angle,vel)
max, argMax = None, None
if not self.learner.seenState(state):
argMax = 'unseen'
else:
for action in ('kickLeft','kickRight','doNothing'):
qVal = self.learner.getQValue(state, action)
if max == None or qVal > max:
max, argMax = qVal, action
if argMax != 'unseen':
if argMax == 'kickLeft':
color = 'blue'
elif argMax == 'kickRight':
color = 'red'
elif argMax == 'doNothing':
color = 'white'
dx = length / 100.0
dy = length / 100.0
x0, y0 = x+i*dx, y-j*dy
self.canvas.create_rectangle(x0,y0,x0+dx,y0+dy,fill=color)
def run(self):
self.stepCount = 0
self.learner.startEpisode()
while True:
minSleep = .01
tm = max(minSleep, self.tickTime)
time.sleep(tm)
self.stepsToSkip = int(tm / self.tickTime) - 1
if not self.running:
self.stopped = True
return
for i in range(self.stepsToSkip):
self.step()
self.stepsToSkip = 0
self.step()
# self.robot.draw()
self.learner.stopEpisode()
def start(self):
self.win.mainloop()
def run():
global root
root = Tkinter.Tk()
root.title( 'Crawler GUI' )
root.resizable( 0, 0 )
# root.mainloop()
app = Application(root)
def update_gui():
app.robot.draw(app.stepCount, app.tickTime)
root.after(10, update_gui)
update_gui()
root.protocol( 'WM_DELETE_WINDOW', app.exit)
try:
app.start()
except:
app.exit()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import webob.exc
from oslo.config import cfg
from neutron.api import api_common
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.v2 import attributes
from neutron.api.v2 import resource as wsgi_resource
from neutron.common import exceptions
from neutron.openstack.common import log as logging
from neutron.openstack.common.notifier import api as notifier_api
from neutron import policy
from neutron import quota
LOG = logging.getLogger(__name__)
FAULT_MAP = {exceptions.NotFound: webob.exc.HTTPNotFound,
exceptions.Conflict: webob.exc.HTTPConflict,
exceptions.InUse: webob.exc.HTTPConflict,
exceptions.BadRequest: webob.exc.HTTPBadRequest,
exceptions.ServiceUnavailable: webob.exc.HTTPServiceUnavailable,
exceptions.NotAuthorized: webob.exc.HTTPForbidden,
netaddr.AddrFormatError: webob.exc.HTTPBadRequest,
}
class Controller(object):
LIST = 'list'
SHOW = 'show'
CREATE = 'create'
UPDATE = 'update'
DELETE = 'delete'
def __init__(self, plugin, collection, resource, attr_info,
allow_bulk=False, member_actions=None, parent=None,
allow_pagination=False, allow_sorting=False):
if member_actions is None:
member_actions = []
self._plugin = plugin
self._collection = collection.replace('-', '_')
self._resource = resource.replace('-', '_')
self._attr_info = attr_info
self._allow_bulk = allow_bulk
self._allow_pagination = allow_pagination
self._allow_sorting = allow_sorting
self._native_bulk = self._is_native_bulk_supported()
self._native_pagination = self._is_native_pagination_supported()
self._native_sorting = self._is_native_sorting_supported()
self._policy_attrs = [name for (name, info) in self._attr_info.items()
if info.get('required_by_policy')]
self._publisher_id = notifier_api.publisher_id('network')
self._dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
self._member_actions = member_actions
self._primary_key = self._get_primary_key()
if self._allow_pagination and self._native_pagination:
# Native pagination need native sorting support
if not self._native_sorting:
raise exceptions.Invalid(
_("Native pagination depend on native sorting")
)
if not self._allow_sorting:
LOG.info(_("Allow sorting is enabled because native "
"pagination requires native sorting"))
self._allow_sorting = True
if parent:
self._parent_id_name = '%s_id' % parent['member_name']
parent_part = '_%s' % parent['member_name']
else:
self._parent_id_name = None
parent_part = ''
self._plugin_handlers = {
self.LIST: 'get%s_%s' % (parent_part, self._collection),
self.SHOW: 'get%s_%s' % (parent_part, self._resource)
}
for action in [self.CREATE, self.UPDATE, self.DELETE]:
self._plugin_handlers[action] = '%s%s_%s' % (action, parent_part,
self._resource)
def _get_primary_key(self, default_primary_key='id'):
for key, value in self._attr_info.iteritems():
if value.get('primary_key', False):
return key
return default_primary_key
def _is_native_bulk_supported(self):
native_bulk_attr_name = ("_%s__native_bulk_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_bulk_attr_name, False)
def _is_native_pagination_supported(self):
native_pagination_attr_name = ("_%s__native_pagination_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_pagination_attr_name, False)
def _is_native_sorting_supported(self):
native_sorting_attr_name = ("_%s__native_sorting_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_sorting_attr_name, False)
def _is_visible(self, context, attr_name, data):
action = "%s:%s" % (self._plugin_handlers[self.SHOW], attr_name)
# Optimistically init authz_check to True
authz_check = True
try:
attr = (attributes.RESOURCE_ATTRIBUTE_MAP
[self._collection].get(attr_name))
if attr and attr.get('enforce_policy'):
authz_check = policy.check_if_exists(
context, action, data)
except KeyError:
# The extension was not configured for adding its resources
# to the global resource attribute map. Policy check should
# not be performed
LOG.debug(_("The resource %(resource)s was not found in the "
"RESOURCE_ATTRIBUTE_MAP; unable to perform authZ "
"check for attribute %(attr)s"),
{'resource': self._collection,
'attr': attr_name})
except exceptions.PolicyRuleNotFound:
LOG.debug(_("Policy rule:%(action)s not found. Assuming no "
"authZ check is defined for %(attr)s"),
{'action': action,
'attr': attr_name})
attr_val = self._attr_info.get(attr_name)
return attr_val and attr_val['is_visible'] and authz_check
def _view(self, context, data, fields_to_strip=None):
# make sure fields_to_strip is iterable
if not fields_to_strip:
fields_to_strip = []
return dict(item for item in data.iteritems()
if (self._is_visible(context, item[0], data) and
item[0] not in fields_to_strip))
def _do_field_list(self, original_fields):
fields_to_add = None
# don't do anything if fields were not specified in the request
if original_fields:
fields_to_add = [attr for attr in self._policy_attrs
if attr not in original_fields]
original_fields.extend(self._policy_attrs)
return original_fields, fields_to_add
def __getattr__(self, name):
if name in self._member_actions:
def _handle_action(request, id, **kwargs):
arg_list = [request.context, id]
# Fetch the resource and verify if the user can access it
try:
resource = self._item(request, id, True)
except exceptions.PolicyNotAuthorized:
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
body = kwargs.pop('body', None)
# Explicit comparison with None to distinguish from {}
if body is not None:
arg_list.append(body)
# TODO(salvatore-orlando): bp/make-authz-ortogonal
# The body of the action request should be included
# in the info passed to the policy engine
# It is ok to raise a 403 because accessibility to the
# object was checked earlier in this method
policy.enforce(request.context, name, resource)
return getattr(self._plugin, name)(*arg_list, **kwargs)
return _handle_action
else:
raise AttributeError
def _get_pagination_helper(self, request):
if self._allow_pagination and self._native_pagination:
return api_common.PaginationNativeHelper(request,
self._primary_key)
elif self._allow_pagination:
return api_common.PaginationEmulatedHelper(request,
self._primary_key)
return api_common.NoPaginationHelper(request, self._primary_key)
def _get_sorting_helper(self, request):
if self._allow_sorting and self._native_sorting:
return api_common.SortingNativeHelper(request, self._attr_info)
elif self._allow_sorting:
return api_common.SortingEmulatedHelper(request, self._attr_info)
return api_common.NoSortingHelper(request, self._attr_info)
def _items(self, request, do_authz=False, parent_id=None):
"""Retrieves and formats a list of elements of the requested entity."""
# NOTE(salvatore-orlando): The following ensures that fields which
# are needed for authZ policy validation are not stripped away by the
# plugin before returning.
original_fields, fields_to_add = self._do_field_list(
api_common.list_args(request, 'fields'))
filters = api_common.get_filters(request, self._attr_info,
['fields', 'sort_key', 'sort_dir',
'limit', 'marker', 'page_reverse'])
kwargs = {'filters': filters,
'fields': original_fields}
sorting_helper = self._get_sorting_helper(request)
pagination_helper = self._get_pagination_helper(request)
sorting_helper.update_args(kwargs)
sorting_helper.update_fields(original_fields, fields_to_add)
pagination_helper.update_args(kwargs)
pagination_helper.update_fields(original_fields, fields_to_add)
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj_getter = getattr(self._plugin, self._plugin_handlers[self.LIST])
obj_list = obj_getter(request.context, **kwargs)
obj_list = sorting_helper.sort(obj_list)
obj_list = pagination_helper.paginate(obj_list)
# Check authz
if do_authz:
# FIXME(salvatore-orlando): obj_getter might return references to
# other resources. Must check authZ on them too.
# Omit items from list that should not be visible
obj_list = [obj for obj in obj_list
if policy.check(request.context,
self._plugin_handlers[self.SHOW],
obj,
plugin=self._plugin)]
collection = {self._collection:
[self._view(request.context, obj,
fields_to_strip=fields_to_add)
for obj in obj_list]}
pagination_links = pagination_helper.get_links(obj_list)
if pagination_links:
collection[self._collection + "_links"] = pagination_links
return collection
def _item(self, request, id, do_authz=False, field_list=None,
parent_id=None):
"""Retrieves and formats a single element of the requested entity."""
kwargs = {'fields': field_list}
action = self._plugin_handlers[self.SHOW]
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj_getter = getattr(self._plugin, action)
obj = obj_getter(request.context, id, **kwargs)
# Check authz
# FIXME(salvatore-orlando): obj_getter might return references to
# other resources. Must check authZ on them too.
if do_authz:
policy.enforce(request.context, action, obj)
return obj
def _send_dhcp_notification(self, context, data, methodname):
if cfg.CONF.dhcp_agent_notification:
self._dhcp_agent_notifier.notify(context, data, methodname)
def index(self, request, **kwargs):
"""Returns a list of the requested entity."""
parent_id = kwargs.get(self._parent_id_name)
return self._items(request, True, parent_id)
def show(self, request, id, **kwargs):
"""Returns detailed information about the requested entity."""
try:
# NOTE(salvatore-orlando): The following ensures that fields
# which are needed for authZ policy validation are not stripped
# away by the plugin before returning.
field_list, added_fields = self._do_field_list(
api_common.list_args(request, "fields"))
parent_id = kwargs.get(self._parent_id_name)
return {self._resource:
self._view(request.context,
self._item(request,
id,
do_authz=True,
field_list=field_list,
parent_id=parent_id),
fields_to_strip=added_fields)}
except exceptions.PolicyNotAuthorized:
# To avoid giving away information, pretend that it
# doesn't exist
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
def _emulate_bulk_create(self, obj_creator, request, body, parent_id=None):
objs = []
try:
for item in body[self._collection]:
kwargs = {self._resource: item}
if parent_id:
kwargs[self._parent_id_name] = parent_id
objs.append(self._view(request.context,
obj_creator(request.context,
**kwargs)))
return objs
# Note(salvatore-orlando): broad catch as in theory a plugin
# could raise any kind of exception
except Exception as ex:
for obj in objs:
obj_deleter = getattr(self._plugin,
self._plugin_handlers[self.DELETE])
try:
kwargs = ({self._parent_id_name: parent_id} if parent_id
else {})
obj_deleter(request.context, obj['id'], **kwargs)
except Exception:
# broad catch as our only purpose is to log the exception
LOG.exception(_("Unable to undo add for "
"%(resource)s %(id)s"),
{'resource': self._resource,
'id': obj['id']})
# TODO(salvatore-orlando): The object being processed when the
# plugin raised might have been created or not in the db.
# We need a way for ensuring that if it has been created,
# it is then deleted
raise ex
def create(self, request, body=None, **kwargs):
"""Creates a new instance of the requested entity."""
parent_id = kwargs.get(self._parent_id_name)
notifier_api.notify(request.context,
self._publisher_id,
self._resource + '.create.start',
notifier_api.CONF.default_notification_level,
body)
body = Controller.prepare_request_body(request.context, body, True,
self._resource, self._attr_info,
allow_bulk=self._allow_bulk)
action = self._plugin_handlers[self.CREATE]
# Check authz
if self._collection in body:
# Have to account for bulk create
items = body[self._collection]
deltas = {}
bulk = True
else:
items = [body]
bulk = False
for item in items:
self._validate_network_tenant_ownership(request,
item[self._resource])
policy.enforce(request.context,
action,
item[self._resource])
try:
tenant_id = item[self._resource]['tenant_id']
count = quota.QUOTAS.count(request.context, self._resource,
self._plugin, self._collection,
tenant_id)
if bulk:
delta = deltas.get(tenant_id, 0) + 1
deltas[tenant_id] = delta
else:
delta = 1
kwargs = {self._resource: count + delta}
except exceptions.QuotaResourceUnknown as e:
# We don't want to quota this resource
LOG.debug(e)
else:
quota.QUOTAS.limit_check(request.context,
item[self._resource]['tenant_id'],
**kwargs)
def notify(create_result):
notifier_method = self._resource + '.create.end'
notifier_api.notify(request.context,
self._publisher_id,
notifier_method,
notifier_api.CONF.default_notification_level,
create_result)
self._send_dhcp_notification(request.context,
create_result,
notifier_method)
return create_result
kwargs = {self._parent_id_name: parent_id} if parent_id else {}
if self._collection in body and self._native_bulk:
# plugin does atomic bulk create operations
obj_creator = getattr(self._plugin, "%s_bulk" % action)
objs = obj_creator(request.context, body, **kwargs)
return notify({self._collection: [self._view(request.context, obj)
for obj in objs]})
else:
obj_creator = getattr(self._plugin, action)
if self._collection in body:
# Emulate atomic bulk behavior
objs = self._emulate_bulk_create(obj_creator, request,
body, parent_id)
return notify({self._collection: objs})
else:
kwargs.update({self._resource: body})
obj = obj_creator(request.context, **kwargs)
return notify({self._resource: self._view(request.context,
obj)})
def delete(self, request, id, **kwargs):
"""Deletes the specified entity."""
notifier_api.notify(request.context,
self._publisher_id,
self._resource + '.delete.start',
notifier_api.CONF.default_notification_level,
{self._resource + '_id': id})
action = self._plugin_handlers[self.DELETE]
# Check authz
parent_id = kwargs.get(self._parent_id_name)
obj = self._item(request, id, parent_id=parent_id)
try:
policy.enforce(request.context,
action,
obj)
except exceptions.PolicyNotAuthorized:
# To avoid giving away information, pretend that it
# doesn't exist
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
obj_deleter = getattr(self._plugin, action)
obj_deleter(request.context, id, **kwargs)
notifier_method = self._resource + '.delete.end'
notifier_api.notify(request.context,
self._publisher_id,
notifier_method,
notifier_api.CONF.default_notification_level,
{self._resource + '_id': id})
result = {self._resource: self._view(request.context, obj)}
self._send_dhcp_notification(request.context,
result,
notifier_method)
def update(self, request, id, body=None, **kwargs):
"""Updates the specified entity's attributes."""
parent_id = kwargs.get(self._parent_id_name)
try:
payload = body.copy()
except AttributeError:
msg = _("Invalid format: %s") % request.body
raise exceptions.BadRequest(resource='body', msg=msg)
payload['id'] = id
notifier_api.notify(request.context,
self._publisher_id,
self._resource + '.update.start',
notifier_api.CONF.default_notification_level,
payload)
body = Controller.prepare_request_body(request.context, body, False,
self._resource, self._attr_info,
allow_bulk=self._allow_bulk)
action = self._plugin_handlers[self.UPDATE]
# Load object to check authz
# but pass only attributes in the original body and required
# by the policy engine to the policy 'brain'
field_list = [name for (name, value) in self._attr_info.iteritems()
if (value.get('required_by_policy') or
value.get('primary_key') or
'default' not in value)]
orig_obj = self._item(request, id, field_list=field_list,
parent_id=parent_id)
orig_obj.update(body[self._resource])
try:
policy.enforce(request.context,
action,
orig_obj)
except exceptions.PolicyNotAuthorized:
# To avoid giving away information, pretend that it
# doesn't exist
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
obj_updater = getattr(self._plugin, action)
kwargs = {self._resource: body}
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj = obj_updater(request.context, id, **kwargs)
result = {self._resource: self._view(request.context, obj)}
notifier_method = self._resource + '.update.end'
notifier_api.notify(request.context,
self._publisher_id,
notifier_method,
notifier_api.CONF.default_notification_level,
result)
self._send_dhcp_notification(request.context,
result,
notifier_method)
return result
@staticmethod
def _populate_tenant_id(context, res_dict, is_create):
if (('tenant_id' in res_dict and
res_dict['tenant_id'] != context.tenant_id and
not context.is_admin)):
msg = _("Specifying 'tenant_id' other than authenticated "
"tenant in request requires admin privileges")
raise webob.exc.HTTPBadRequest(msg)
if is_create and 'tenant_id' not in res_dict:
if context.tenant_id:
res_dict['tenant_id'] = context.tenant_id
else:
msg = _("Running without keystone AuthN requires "
" that tenant_id is specified")
raise webob.exc.HTTPBadRequest(msg)
@staticmethod
def prepare_request_body(context, body, is_create, resource, attr_info,
allow_bulk=False):
"""Verifies required attributes are in request body.
Also checking that an attribute is only specified if it is allowed
for the given operation (create/update).
Attribute with default values are considered to be optional.
body argument must be the deserialized body.
"""
collection = resource + "s"
if not body:
raise webob.exc.HTTPBadRequest(_("Resource body required"))
prep_req_body = lambda x: Controller.prepare_request_body(
context,
x if resource in x else {resource: x},
is_create,
resource,
attr_info,
allow_bulk)
if collection in body:
if not allow_bulk:
raise webob.exc.HTTPBadRequest(_("Bulk operation "
"not supported"))
bulk_body = [prep_req_body(item) for item in body[collection]]
if not bulk_body:
raise webob.exc.HTTPBadRequest(_("Resources required"))
return {collection: bulk_body}
res_dict = body.get(resource)
if res_dict is None:
msg = _("Unable to find '%s' in request body") % resource
raise webob.exc.HTTPBadRequest(msg)
Controller._populate_tenant_id(context, res_dict, is_create)
Controller._verify_attributes(res_dict, attr_info)
if is_create: # POST
for attr, attr_vals in attr_info.iteritems():
if attr_vals['allow_post']:
if ('default' not in attr_vals and
attr not in res_dict):
msg = _("Failed to parse request. Required "
"attribute '%s' not specified") % attr
raise webob.exc.HTTPBadRequest(msg)
res_dict[attr] = res_dict.get(attr,
attr_vals.get('default'))
else:
if attr in res_dict:
msg = _("Attribute '%s' not allowed in POST") % attr
raise webob.exc.HTTPBadRequest(msg)
else: # PUT
for attr, attr_vals in attr_info.iteritems():
if attr in res_dict and not attr_vals['allow_put']:
msg = _("Cannot update read-only attribute %s") % attr
raise webob.exc.HTTPBadRequest(msg)
for attr, attr_vals in attr_info.iteritems():
if (attr not in res_dict or
res_dict[attr] is attributes.ATTR_NOT_SPECIFIED):
continue
# Convert values if necessary
if 'convert_to' in attr_vals:
res_dict[attr] = attr_vals['convert_to'](res_dict[attr])
# Check that configured values are correct
if 'validate' not in attr_vals:
continue
for rule in attr_vals['validate']:
res = attributes.validators[rule](res_dict[attr],
attr_vals['validate'][rule])
if res:
msg_dict = dict(attr=attr, reason=res)
msg = _("Invalid input for %(attr)s. "
"Reason: %(reason)s.") % msg_dict
raise webob.exc.HTTPBadRequest(msg)
return body
@staticmethod
def _verify_attributes(res_dict, attr_info):
extra_keys = set(res_dict.keys()) - set(attr_info.keys())
if extra_keys:
msg = _("Unrecognized attribute(s) '%s'") % ', '.join(extra_keys)
raise webob.exc.HTTPBadRequest(msg)
def _validate_network_tenant_ownership(self, request, resource_item):
# TODO(salvatore-orlando): consider whether this check can be folded
# in the policy engine
if (request.context.is_admin or
self._resource not in ('port', 'subnet')):
return
network = self._plugin.get_network(
request.context,
resource_item['network_id'])
# do not perform the check on shared networks
if network.get('shared'):
return
network_owner = network['tenant_id']
if network_owner != resource_item['tenant_id']:
msg = _("Tenant %(tenant_id)s not allowed to "
"create %(resource)s on this network")
raise webob.exc.HTTPForbidden(msg % {
"tenant_id": resource_item['tenant_id'],
"resource": self._resource,
})
def create_resource(collection, resource, plugin, params, allow_bulk=False,
member_actions=None, parent=None, allow_pagination=False,
allow_sorting=False):
controller = Controller(plugin, collection, resource, params, allow_bulk,
member_actions=member_actions, parent=parent,
allow_pagination=allow_pagination,
allow_sorting=allow_sorting)
return wsgi_resource.Resource(controller, FAULT_MAP)
|
|
import functools, hashlib
import numpy as np
from scipy.stats import norm as ndist
import regreg.api as rr
from selection.tests.instance import gaussian_instance
from selection.learning.learners import mixture_learner
from selection.learning.utils import naive_partial_model_inference, pivot_plot
from selection.learning.core import gbm_fit_sk, infer_general_target
#### A parametric model will need something like this
class gaussian_OLS_learner(mixture_learner):
def __init__(self,
algorithm,
observed_selection,
X_select,
observed_MLE,
observed_Y):
(self.algorithm,
self.observed_outcome,
self.X_select,
self.observed_MLE,
self.observed_Y) = (algorithm,
observed_selection,
X_select,
observed_MLE,
observed_Y)
self.observed_target = observed_MLE
_dispersion = observed_MLE[-1]
gram_matrix = X_select.T.dot(X_select)
self._chol = (np.linalg.cholesky(np.linalg.inv(gram_matrix)) *
np.sqrt(_dispersion))
n, p = X_select.shape
self._Xinv = np.linalg.pinv(X_select)
self._beta_cov = _dispersion * self._Xinv.dot(self._Xinv.T)
self._resid = observed_Y - X_select.dot(self._Xinv.dot(observed_Y))
def learning_proposal(self):
"""
Return perturbed data and perturbed MLE.
"""
n, s = self.X_select.shape
beta_hat = self.observed_MLE[:-1]
dispersion = self.observed_MLE[-1]
perturbed_beta = beta_hat.copy()
nidx = np.random.choice(np.arange(s), min(3, s), replace=False)
for idx in nidx:
scale = np.random.choice(self.scales, 1)
perturbed_beta[idx] += (scale * np.random.standard_normal() *
np.sqrt(self._beta_cov[idx, idx]))
resid = np.random.standard_normal(n) * np.sqrt(dispersion)
resid -= self.X_select.dot(self._Xinv.dot(resid))
perturbed_Y = self.X_select.dot(perturbed_beta) + resid
perturbed_MLE = np.zeros(s+1)
perturbed_MLE[:s] = perturbed_beta
perturbed_MLE[-1] = np.sum(resid**2) / (n - s)
return perturbed_MLE, perturbed_Y
####
def simulate(n=500, p=30, s=5, signal=(0.5, 1), sigma=2, alpha=0.1, B=2000):
# description of statistical problem
X, y, truth, _, _, sigmaX = gaussian_instance(n=n,
p=p,
s=s,
equicorrelated=False,
rho=0.5,
sigma=sigma,
signal=signal,
random_signs=True,
scale=False)
def algorithm(lam, X, y):
n, p = X.shape
success = np.zeros(p)
loss = rr.quadratic_loss((p,), Q=X.T.dot(X))
pen = rr.l1norm(p, lagrange=lam)
S = -X.T.dot(y)
loss.quadratic = rr.identity_quadratic(0, 0, S, 0)
problem = rr.simple_problem(loss, pen)
soln = problem.solve(max_its=100, tol=1.e-10)
success += soln != 0
return set(np.nonzero(success)[0])
# run selection algorithm
lam = 4. * np.sqrt(n)
selection_algorithm = functools.partial(algorithm, lam, X)
instance_hash = hashlib.md5()
instance_hash.update(X.tobytes())
instance_hash.update(y.tobytes())
instance_hash.update(truth.tobytes())
instance_id = instance_hash.hexdigest()
observed_tuple = selection_algorithm(y)
(pivots,
covered,
lengths,
pvalues,
lower,
upper) = [], [], [], [], [], []
targets = []
if len(observed_tuple) > 0:
s = len(observed_tuple)
X_select = X[:, list(observed_tuple)]
Xpi = np.linalg.pinv(X_select)
final_target = np.zeros(s+1)
final_target[:s] = Xpi.dot(X.dot(truth))
final_target[-1] = sigma**2
observed_target = Xpi.dot(y)
resid = y - X_select.dot(observed_target)
dispersion = np.linalg.norm(resid)**2 / (n-s)
target_cov = np.zeros((s+1, s+1))
target_cov[:s][:,:s] = Xpi.dot(Xpi.T) * dispersion
target_cov[s, s] = 2 * dispersion**2 / (n - s)
MLE = np.zeros(s+1)
MLE[:s] = observed_target
MLE[-1] = dispersion
learner = gaussian_OLS_learner(selection_algorithm,
observed_tuple,
X_select,
MLE,
y)
print(observed_tuple)
results = infer_general_target(observed_tuple,
MLE,
target_cov,
learner,
hypothesis=final_target,
fit_probability=gbm_fit_sk,
fit_args={'n_estimators':5000},
alpha=alpha,
B=B)
for result, true_target in zip(results, final_target):
(pivot,
interval,
pvalue,
_) = result
pvalues.append(pvalue)
pivots.append(pivot)
covered.append((interval[0] < true_target) * (interval[1] > true_target))
lengths.append(interval[1] - interval[0])
lower.append(interval[0])
upper.append(interval[1])
if len(observed_tuple) > 0:
df = pd.DataFrame({'pivot':pivots,
'pvalue':pvalues,
'coverage':covered,
'length':lengths,
'upper':upper,
'lower':lower,
'target':final_target,
'variable':list(observed_tuple) + [-1], # -1 for dispersion,
'id':[instance_id]*len(pivots),
})
naive = True # report naive intervals as well?
if naive:
naive_df = naive_partial_model_inference(X,
y,
dispersion,
truth,
observed_tuple,
alpha=alpha)
df = pd.merge(df, naive_df, on='variable')
return df
if __name__ == "__main__":
import statsmodels.api as sm
import matplotlib.pyplot as plt
import pandas as pd
for i in range(2000):
df = simulate(B=10000)
csvfile = 'lasso_selected.csv'
outbase = csvfile[:-4]
if df is not None and i > 0:
try: # concatenate to disk
df = pd.concat([df, pd.read_csv(csvfile)])
except FileNotFoundError:
pass
df.to_csv(csvfile, index=False)
if len(df['pivot']) > 0:
pivot_ax, length_ax = pivot_plot(df, outbase)
|
|
# coding=utf-8
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities."""
import os
import re
from absl import flags
import numpy as np
import tensorflow as tf
from tensorflow.python.client import device_lib
from sklearn.cluster import KMeans, MiniBatchKMeans
_GPUS = None
FLAGS = flags.FLAGS
flags.DEFINE_bool('log_device_placement', False, 'For debugging purpose.')
class EasyDict(dict):
def __init__(self, *args, **kwargs):
super(EasyDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def get_config():
config = tf.ConfigProto()
if len(get_available_gpus()) > 1:
config.allow_soft_placement = True
if FLAGS.log_device_placement:
config.log_device_placement = True
config.gpu_options.allow_growth = True
return config
def setup_tf():
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
tf.logging.set_verbosity(tf.logging.ERROR)
def smart_shape(x):
s = x.shape
st = tf.shape(x)
return [s[i] if s[i].value is not None else st[i] for i in range(4)]
def ilog2(x):
"""Integer log2."""
return int(np.ceil(np.log2(x)))
def find_latest_checkpoint(folder):
"""Replacement for tf.train.latest_checkpoint.
It does not rely on the "checkpoint" file which sometimes contains
absolute path and is generally hard to work with when sharing files
between users / computers.
Args:
folder: string, path to the checkpoint directory.
Returns:
string, file name of the latest checkpoint.
"""
r_step = re.compile(r'.*model\.ckpt-(?P<step>\d+)\.meta')
matches = tf.gfile.Glob(os.path.join(folder, 'model.ckpt-*.meta'))
matches = [(int(r_step.match(x).group('step')), x) for x in matches]
ckpt_file = max(matches)[1][:-5]
return ckpt_file
def get_latest_global_step(folder):
"""Loads the global step from the latest checkpoint in directory.
Args:
folder: string, path to the checkpoint directory.
Returns:
int, the global step of the latest checkpoint or 0 if none was found.
"""
try:
checkpoint_reader = tf.train.NewCheckpointReader(
find_latest_checkpoint(folder))
return checkpoint_reader.get_tensor(tf.GraphKeys.GLOBAL_STEP)
except: # pylint: disable=bare-except
return 0
def get_latest_global_step_in_subdir(folder):
"""Loads the global step from the latest checkpoint in sub-directories.
Args:
folder: string, parent of the checkpoint directories.
Returns:
int, the global step of the latest checkpoint or 0 if none was found.
"""
sub_dirs = (
x for x in tf.gfile.Glob(os.path.join(folder, '*'))
if tf.gfile.Stat(x).IsDirectory())
step = 0
for x in sub_dirs:
step = max(step, get_latest_global_step(x))
return step
def getter_ema(ema, getter, name, *args, **kwargs):
"""Exponential moving average getter for variable scopes.
Args:
ema: ExponentialMovingAverage object, where to get variable moving averages.
getter: default variable scope getter.
name: variable name.
*args: extra args passed to default getter.
**kwargs: extra args passed to default getter.
Returns:
If found the moving average variable, otherwise the default variable.
"""
var = getter(name, *args, **kwargs)
ema_var = ema.average(var)
return ema_var if ema_var else var
def model_vars(scope=None):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
def gpu(x):
return '/gpu:%d' % (x % max(1, len(get_available_gpus())))
def get_available_gpus():
global _GPUS
if _GPUS is None:
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
local_device_protos = device_lib.list_local_devices(session_config=config)
_GPUS = tuple(
[x.name for x in local_device_protos if x.device_type == 'GPU'])
return _GPUS
# Adapted from:
# https://github.com/tensorflow/models/blob/master/tutorials/image/cifar10/cifar10_multi_gpu_train.py
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. For each tower, a
list of its gradients.
Returns:
List of pairs of (gradient, variable) where the gradient has been
averaged across all towers.
"""
if len(tower_grads) <= 1:
return tower_grads[0]
average_grads = []
for grads_and_vars in zip(*tower_grads):
grad = tf.reduce_mean([gv[0] for gv in grads_and_vars], 0)
average_grads.append((grad, grads_and_vars[0][1]))
return average_grads
def para_list(fn, *args):
"""Run on multiple GPUs in parallel and return list of results."""
gpus = len(get_available_gpus())
if gpus <= 1:
return zip(*[fn(*args)])
splitted = [tf.split(x, gpus) for x in args]
outputs = []
for gpu_id, x in enumerate(zip(*splitted)):
with tf.name_scope('tower%d' % gpu_id):
with tf.device(
tf.train.replica_device_setter(
worker_device='/gpu:%d' % gpu_id, ps_device='/cpu:0',
ps_tasks=1)):
outputs.append(fn(*x))
return zip(*outputs)
def para_mean(fn, *args):
"""Run on multiple GPUs in parallel and return means."""
gpus = len(get_available_gpus())
if gpus <= 1:
return fn(*args)
splitted = [tf.split(x, gpus) for x in args]
outputs = []
for gpu_id, x in enumerate(zip(*splitted)):
with tf.name_scope('tower%d' % gpu_id):
with tf.device(
tf.train.replica_device_setter(
worker_device='/gpu:%d' % gpu_id, ps_device='/cpu:0',
ps_tasks=1)):
outputs.append(fn(*x))
if isinstance(outputs[0], (tuple, list)):
return [tf.reduce_mean(x, 0) for x in zip(*outputs)]
return tf.reduce_mean(outputs, 0)
def para_cat(fn, *args):
"""Run on multiple GPUs in parallel and return concatenated outputs."""
gpus = len(get_available_gpus())
if gpus <= 1:
return fn(*args)
splitted = [tf.split(x, gpus) for x in args]
outputs = []
for gpu_id, x in enumerate(zip(*splitted)):
with tf.name_scope('tower%d' % gpu_id):
with tf.device(
tf.train.replica_device_setter(
worker_device='/gpu:%d' % gpu_id, ps_device='/cpu:0',
ps_tasks=1)):
outputs.append(fn(*x))
if isinstance(outputs[0], (tuple, list)):
return [tf.concat(x, axis=0) for x in zip(*outputs)]
return tf.concat(outputs, axis=0)
def get_low_confidence_from_each_clusters(data, n_clusters, grow_size,
confidences):
"""Cluster data into n_clusters clusters and pick low confidence samples from
each cluster such that the total number of samples picked is grow_size."""
data = data.reshape(data.shape[0], -1) # reshape in case not a vector
kmeans = MiniBatchKMeans(n_clusters=n_clusters,
batch_size=n_clusters*4,
reassignment_ratio=0.1,
random_state=0, max_iter=1000,
).fit(data)
pred = kmeans.labels_
sizes = np.histogram(pred, bins=range(n_clusters+1))[0] # size of clusters
grow_size_clusters = np.rint((grow_size * (sizes / np.sum(sizes)))).astype(int)
# To make sure we get grow_size samples in total
# If the current number of samples is smaller than grow_size, add randomly
while np.sum(grow_size_clusters) < grow_size:
idx = np.random.choice(np.where(sizes > grow_size_clusters)[0], 1)[0]
grow_size_clusters[idx] += 1
# If the current number of samples is larger than grow_size, remove from the
# largest cluster
while np.sum(grow_size_clusters) > grow_size:
idx = np.argmax(grow_size_clusters)
grow_size_clusters[idx] -= 1
selected = []
for c in range(n_clusters):
idx_c = np.where(pred == c)[0]
idx = idx_c[confidences[idx_c].argsort()[:grow_size_clusters[c]]]
selected.append(idx)
return np.concatenate(selected).reshape(-1)
def idx_to_fixlen(indices, length):
"""Pad zeros to indices to make it a certain length."""
return np.concatenate([indices, np.zeros(max(0, length - indices.size),
dtype=np.int32)-1])
def fixlen_to_idx(indices):
"""Get the non-negative values from indices."""
nlabeled = np.where(indices >= 0)[0][-1] + 1
return indices[:nlabeled]
def get_class_dist(labels, nclass):
class_cnt = [0.0] * nclass
for label in labels:
class_cnt[label] += 1
class_cnt = np.array(class_cnt)
class_cnt /= sum(class_cnt)
return class_cnt
|
|
import struct
from elftools.elf import elffile, sections
import archinfo
from .absobj import Symbol, Relocation, Segment, Section
from .metaelf import MetaELF
from .errors import CLEError, CLEInvalidBinaryError
import logging
l = logging.getLogger('cle.elf')
__all__ = ('ELFSymbol', 'ELF')
class ELFSymbol(Symbol):
def __init__(self, owner, symb):
realtype = owner.arch.translate_symbol_type(symb.entry.st_info.type)
super(ELFSymbol, self).__init__(owner,
symb.name,
symb.entry.st_value,
symb.entry.st_size,
symb.entry.st_info.bind,
realtype,
symb.entry.st_shndx)
class ELFRelocation(Relocation):
def __init__(self, readelf_reloc, owner, symbol):
addend = readelf_reloc.entry.r_addend if readelf_reloc.is_RELA() else None
super(ELFRelocation, self).__init__(owner,
symbol,
readelf_reloc.entry.r_offset,
readelf_reloc.entry.r_info_type,
addend)
class ELFSegment(Segment):
def __init__(self, readelf_seg):
self.flags = readelf_seg.header.p_flags
super(ELFSegment, self).__init__(readelf_seg.header.p_offset,
readelf_seg.header.p_vaddr,
readelf_seg.header.p_filesz,
readelf_seg.header.p_memsz)
@property
def is_readable(self):
return self.flags & 4 != 0
@property
def is_writable(self):
return self.flags & 2 != 0
@property
def is_executable(self):
return self.flags & 1 != 0
class ELFSection(Section):
def __init__(self, readelf_sec):
super(ELFSection, self).__init__(readelf_sec.name,
readelf_sec.header.sh_offset,
readelf_sec.header.sh_addr,
readelf_sec.header.sh_size,
readelf_sec.header.sh_type,
readelf_sec.header.sh_entsize,
readelf_sec.header.sh_flags,
readelf_sec.header.sh_link,
readelf_sec.header.sh_info,
readelf_sec.header.sh_addralign)
class ELF(MetaELF):
'''
The main loader class for statically loading elves. Uses the pyreadelf library where useful.
'''
def __init__(self, binary, **kwargs):
super(ELF, self).__init__(binary, **kwargs)
self.reader = elffile.ELFFile(open(self.binary, 'rb'))
# Get an appropriate archinfo.Arch for this binary, unless the user specified one
if self.arch is None:
if self.reader.header.e_machine == 'EM_ARM' and \
self.reader.header.e_flags & 0x200:
self.set_arch(archinfo.ArchARMEL('Iend_LE' if 'LSB' in self.reader.header.e_ident.EI_DATA else 'Iend_BE'))
elif self.reader.header.e_machine == 'EM_ARM' and \
self.reader.header.e_flags & 0x400:
self.set_arch(archinfo.ArchARMHF('Iend_LE' if 'LSB' in self.reader.header.e_ident.EI_DATA else 'Iend_BE'))
else:
self.set_arch(archinfo.arch_from_id(self.reader.header.e_machine,
self.reader.header.e_ident.EI_DATA,
self.reader.header.e_ident.EI_CLASS))
self.strtab = None
self.dynsym = None
self.hashtable = None
self._dynamic = {}
self.deps = []
self.rela_type = None
self._inits_extracted = False
self._preinit_arr = []
self._init_func = None
self._init_arr = []
self._fini_func = None
self._fini_arr = []
self._symbol_cache = {}
self.symbols_by_addr = {}
self.imports = {}
self.resolved_imports = []
self.relocs = []
self.jmprel = {}
self._entry = self.reader.header.e_entry
self.pic = self.reader.header.e_type == 'ET_DYN'
self.tls_used = False
self.tls_module_id = None
self.tls_block_offset = None
self.tls_block_size = None
self.tls_tdata_start = None
self.tls_tdata_size = None
self.__register_segments()
self.__register_sections()
# call the methods defined by MetaELF
self._ppc64_abiv1_entry_fix()
self._load_plt()
def __getstate__(self):
self.reader = None
self.strtab = None
self.dynsym = None
self.hashtable = None
return self.__dict__
def __setstate__(self, data):
self.__dict__.update(data)
self.reader = elffile.ELFFile(open(self.binary, 'rb'))
if self._dynamic and 'DT_STRTAB' in self._dynamic:
fakestrtabheader = {
'sh_offset': self._dynamic['DT_STRTAB']
}
self.strtab = elffile.StringTableSection(fakestrtabheader, 'strtab_cle', self.memory)
if 'DT_SYMTAB' in self._dynamic and 'DT_SYMENT' in self._dynamic:
fakesymtabheader = {
'sh_offset': self._dynamic['DT_SYMTAB'],
'sh_entsize': self._dynamic['DT_SYMENT'],
'sh_size': 0
} # bogus size: no iteration allowed
self.dynsym = elffile.SymbolTableSection(fakesymtabheader, 'symtab_cle', self.memory, self.reader, self.strtab)
if 'DT_GNU_HASH' in self._dynamic:
self.hashtable = GNUHashTable(self.dynsym, self.memory, self._dynamic['DT_GNU_HASH'], self.arch)
elif 'DT_HASH' in self._dynamic:
self.hashtable = ELFHashTable(self.dynsym, self.memory, self._dynamic['DT_HASH'], self.arch)
def get_symbol(self, symid):
"""
Gets a Symbol object for the specified symbol
@param symid: either an index into .dynsym or the name of a symbol.
"""
if isinstance(symid, (int, long)):
re_sym = self.dynsym.get_symbol(symid)
if re_sym.name in self._symbol_cache:
return self._symbol_cache[re_sym.name]
symbol = ELFSymbol(self, re_sym)
self._symbol_cache[re_sym.name] = symbol
return symbol
elif isinstance(symid, str):
if symid in self._symbol_cache:
return self._symbol_cache[symid]
if self.hashtable is None:
return None
re_sym = self.hashtable.get(symid)
if re_sym is None:
return None
symbol = ELFSymbol(self, re_sym)
self._symbol_cache[symid] = symbol
return symbol
elif isinstance(symid, sections.Symbol):
if symid.name in self._symbol_cache:
return self._symbol_cache[symid.name]
symbol = ELFSymbol(self, symid)
self._symbol_cache[symid.name] = symbol
return symbol
else:
raise CLEError("Bad symbol identifier: %s" % symid)
def _extract_init_fini(self):
# Extract the initializers and finalizers
if 'DT_PREINIT_ARRAY' in self._dynamic and 'DT_PREINIT_ARRAYSZ' in self._dynamic:
arr_start = self._dynamic['DT_PREINIT_ARRAY']
arr_end = arr_start + self._dynamic['DT_PREINIT_ARRAYSZ']
arr_entsize = self.arch.bytes
self._preinit_arr = map(self.memory.read_addr_at, range(arr_start, arr_end, arr_entsize))
if 'DT_INIT' in self._dynamic:
self._init_func = self._dynamic['DT_INIT']
if 'DT_INIT_ARRAY' in self._dynamic and 'DT_INIT_ARRAYSZ' in self._dynamic:
arr_start = self._dynamic['DT_INIT_ARRAY']
arr_end = arr_start + self._dynamic['DT_INIT_ARRAYSZ']
arr_entsize = self.arch.bytes
self._init_arr = map(self.memory.read_addr_at, range(arr_start, arr_end, arr_entsize))
if 'DT_FINI' in self._dynamic:
self._fini_func = self._dynamic['DT_FINI']
if 'DT_FINI_ARRAY' in self._dynamic and 'DT_FINI_ARRAYSZ' in self._dynamic:
arr_start = self._dynamic['DT_FINI_ARRAY']
arr_end = arr_start + self._dynamic['DT_FINI_ARRAYSZ']
arr_entsize = self.arch.bytes
self._fini_arr = map(self.memory.read_addr_at, range(arr_start, arr_end, arr_entsize))
self._inits_extracted = True
def get_initializers(self):
if not self._inits_extracted: self._extract_init_fini()
out = []
if self.is_main_bin:
# Preinitializers are ignored in shared objects.
out.extend(self._preinit_arr)
else:
# The init func and the init array in the dynamic section are only run by the dynamic loader in shared objects.
# In the main binary they are run by libc_csu_init.
if self._init_func is not None:
out.append(self._init_func + self.rebase_addr)
out.extend(self._init_arr)
return out
def get_finalizers(self):
if not self._inits_extracted: self._extract_init_fini()
out = []
if self._fini_func is not None:
out.append(self._fini_func + self.rebase_addr)
out.extend(map(self._rebase_addr, self._fini_arr))
return out
def __register_segments(self):
for seg_readelf in self.reader.iter_segments():
if seg_readelf.header.p_type == 'PT_LOAD':
self._load_segment(seg_readelf)
elif seg_readelf.header.p_type == 'PT_DYNAMIC':
self.__register_dyn(seg_readelf)
elif seg_readelf.header.p_type == 'PT_TLS':
self.__register_tls(seg_readelf)
def _rebase_addr(self, addr):
return addr + self.rebase_addr
def _load_segment(self, seg):
'''
Loads a segment based on a LOAD directive in the program header table
'''
self.segments.append(ELFSegment(seg))
seg_data = seg.data()
if seg.header.p_memsz > seg.header.p_filesz:
seg_data += '\0' * (seg.header.p_memsz - seg.header.p_filesz)
self.memory.add_backer(seg.header.p_vaddr, seg_data)
def __register_dyn(self, seg_readelf):
'''
Parse the dynamic section for dynamically linked objects
'''
for tag in seg_readelf.iter_tags():
# Create a dictionary, self._dynamic, mapping DT_* strings to their values
tagstr = self.arch.translate_dynamic_tag(tag.entry.d_tag)
self._dynamic[tagstr] = tag.entry.d_val
# For tags that may appear more than once, handle them here
if tagstr == 'DT_NEEDED':
self.deps.append(tag.entry.d_val)
# None of the following things make sense without a string table
if 'DT_STRTAB' in self._dynamic:
# To handle binaries without section headers, we need to hack around pyreadelf's assumptions
# make our own string table
fakestrtabheader = {
'sh_offset': self._dynamic['DT_STRTAB']
}
self.strtab = elffile.StringTableSection(fakestrtabheader, 'strtab_cle', self.memory)
# get the list of strings that are the required shared libraries
self.deps = map(self.strtab.get_string, self.deps)
# get the string for the "shared object name" that this binary provides
if 'DT_SONAME' in self._dynamic:
self.provides = self.strtab.get_string(self._dynamic['DT_SONAME'])
# None of the following structures can be used without a symbol table
if 'DT_SYMTAB' in self._dynamic and 'DT_SYMENT' in self._dynamic:
# Construct our own symbol table to hack around pyreadelf assuming section headers are around
fakesymtabheader = {
'sh_offset': self._dynamic['DT_SYMTAB'],
'sh_entsize': self._dynamic['DT_SYMENT'],
'sh_size': 0
} # bogus size: no iteration allowed
self.dynsym = elffile.SymbolTableSection(fakesymtabheader, 'symtab_cle', self.memory, self.reader, self.strtab)
# set up the hash table, prefering the gnu hash section to the old hash section
# the hash table lets you get any symbol given its name
if 'DT_GNU_HASH' in self._dynamic:
self.hashtable = GNUHashTable(self.dynsym, self.memory, self._dynamic['DT_GNU_HASH'], self.arch)
elif 'DT_HASH' in self._dynamic:
self.hashtable = ELFHashTable(self.dynsym, self.memory, self._dynamic['DT_HASH'], self.arch)
else:
l.warning("No hash table available in %s", self.binary)
# mips' relocations are absolutely screwed up, handle some of them here.
self.__relocate_mips()
# perform a lot of checks to figure out what kind of relocation tables are around
self.rela_type = None
if 'DT_PLTREL' in self._dynamic:
if self._dynamic['DT_PLTREL'] == 7:
self.rela_type = 'RELA'
relentsz = self.reader.structs.Elf_Rela.sizeof()
elif self._dynamic['DT_PLTREL'] == 17:
self.rela_type = 'REL'
relentsz = self.reader.structs.Elf_Rel.sizeof()
else:
raise CLEInvalidBinaryError('DT_PLTREL is not REL or RELA?')
else:
if 'DT_RELA' in self._dynamic:
self.rela_type = 'RELA'
relentsz = self.reader.structs.Elf_Rela.sizeof()
elif 'DT_REL' in self._dynamic:
self.rela_type = 'REL'
relentsz = self.reader.structs.Elf_Rel.sizeof()
else:
return
# try to parse relocations out of a table of type DT_REL{,A}
if 'DT_' + self.rela_type in self._dynamic:
reloffset = self._dynamic['DT_' + self.rela_type]
relsz = self._dynamic['DT_' + self.rela_type + 'SZ']
fakerelheader = {
'sh_offset': reloffset,
'sh_type': 'SHT_' + self.rela_type,
'sh_entsize': relentsz,
'sh_size': relsz
}
readelf_relocsec = elffile.RelocationSection(fakerelheader, 'reloc_cle', self.memory, self.reader)
self.__register_relocs(readelf_relocsec)
# try to parse relocations out of a table of type DT_JMPREL
if 'DT_JMPREL' in self._dynamic:
jmpreloffset = self._dynamic['DT_JMPREL']
jmprelsz = self._dynamic['DT_PLTRELSZ']
fakejmprelheader = {
'sh_offset': jmpreloffset,
'sh_type': 'SHT_' + self.rela_type,
'sh_entsize': relentsz,
'sh_size': jmprelsz
}
readelf_jmprelsec = elffile.RelocationSection(fakejmprelheader, 'jmprel_cle', self.memory, self.reader)
self.jmprel = {reloc.symbol.name: reloc for reloc in self.__register_relocs(readelf_jmprelsec)}
def __register_relocs(self, section):
relocs = []
for readelf_reloc in section.iter_relocations():
# MIPS64 is just plain old fucked up
# https://www.sourceware.org/ml/libc-alpha/2003-03/msg00153.html
if self.arch.name == 'MIPS64':
# Little endian addionally needs one of its fields reversed... WHY
if self.arch.memory_endness == 'Iend_LE':
readelf_reloc.entry.r_info_sym = readelf_reloc.entry.r_info & 0xFFFFFFFF
readelf_reloc.entry.r_info = struct.unpack('>Q', struct.pack('<Q', readelf_reloc.entry.r_info))[0]
type_1 = readelf_reloc.entry.r_info & 0xFF
type_2 = readelf_reloc.entry.r_info >> 8 & 0xFF
type_3 = readelf_reloc.entry.r_info >> 16 & 0xFF
extra_sym = readelf_reloc.entry.r_info >> 24 & 0xFF
if extra_sym != 0:
l.error('r_info_extra_sym is nonzero??? PLEASE SEND HELP')
symbol = self.get_symbol(readelf_reloc.entry.r_info_sym)
if type_1 != 0:
readelf_reloc.entry.r_info_type = type_1
reloc = ELFRelocation(readelf_reloc, self, symbol)
relocs.append(reloc)
self.relocs.append(reloc)
if type_2 != 0:
readelf_reloc.entry.r_info_type = type_2
reloc = ELFRelocation(readelf_reloc, self, symbol)
relocs.append(reloc)
self.relocs.append(reloc)
if type_3 != 0:
readelf_reloc.entry.r_info_type = type_3
reloc = ELFRelocation(readelf_reloc, self, symbol)
relocs.append(reloc)
self.relocs.append(reloc)
else:
symbol = self.get_symbol(readelf_reloc.entry.r_info_sym)
reloc = ELFRelocation(readelf_reloc, self, symbol)
relocs.append(reloc)
self.relocs.append(reloc)
return relocs
def __register_tls(self, seg_readelf):
self.tls_used = True
self.tls_block_size = seg_readelf.header.p_memsz
self.tls_tdata_size = seg_readelf.header.p_filesz
self.tls_tdata_start = seg_readelf.header.p_vaddr
def __register_sections(self):
for sec_readelf in self.reader.iter_sections():
section = ELFSection(sec_readelf)
self.sections.append(section)
self.sections_map[section.name] = section
if isinstance(sec_readelf, elffile.SymbolTableSection):
self.__register_section_symbols(sec_readelf)
def __register_section_symbols(self, sec_re):
for sym_re in sec_re.iter_symbols():
if sym_re.name == '':
continue
self.get_symbol(sym_re)
def __relocate_mips(self):
if 'DT_MIPS_BASE_ADDRESS' not in self._dynamic:
return False
got_local_num = self._dynamic['DT_MIPS_LOCAL_GOTNO'] # number of local GOT entries
# a.k.a the index of the first global GOT entry
symtab_got_idx = self._dynamic['DT_MIPS_GOTSYM'] # index of first symbol w/ GOT entry
symbol_count = self._dynamic['DT_MIPS_SYMTABNO']
gotaddr = self._dynamic['DT_PLTGOT']
wordsize = self.arch.bytes
for i in range(got_local_num):
reloc = Relocation(self, None, gotaddr + i*wordsize, 'mips_local')
self.relocs.append(reloc)
for i in range(symbol_count - symtab_got_idx):
symbol = self.get_symbol(i + symtab_got_idx)
reloc = Relocation(self, symbol, gotaddr + (i + got_local_num)*wordsize, 'mips_global')
self.relocs.append(reloc)
self.jmprel[symbol.name] = reloc
return True
class ELFHashTable(object):
"""
Functions to do lookup from a HASH section of an ELF file.
Information: http://docs.oracle.com/cd/E23824_01/html/819-0690/chapter6-48031.html
"""
def __init__(self, symtab, stream, offset, arch):
"""
@param symtab The symbol table to perform lookups from (as a pyelftools SymbolTableSection)
@param stream A file-like object to read from the ELF's memory
@param offset The offset in the object where the table starts
@param arch The ArchInfo object for the ELF file
"""
self.symtab = symtab
fmt = '<' if arch.memory_endness == 'Iend_LE' else '>'
stream.seek(offset)
self.nbuckets, self.nchains = struct.unpack(fmt + 'II', stream.read(8))
self.buckets = struct.unpack(fmt + 'I'*self.nbuckets, stream.read(4*self.nbuckets))
self.chains = struct.unpack(fmt + 'I'*self.nchains, stream.read(4*self.nchains))
def get(self, k):
"""
Perform a lookup. Returns a pyelftools Symbol object, or None if there is no match.
@param k The string to look up
"""
hval = self.elf_hash(k) % self.nbuckets
symndx = self.buckets[hval]
while symndx != 0:
sym = self.symtab.get_symbol(symndx)
if sym.name == k:
return sym
symndx = self.chains[symndx]
return None
# from http://www.partow.net/programming/hashfunctions/
@staticmethod
def elf_hash(key):
h = 0
x = 0
for i in range(len(key)):
h = (h << 4) + ord(key[i])
x = h & 0xF0000000
if x != 0:
h ^= (x >> 24)
h &= ~x
return h
class GNUHashTable(object):
"""
Functions to do lookup from a GNU_HASH section of an ELF file.
Information: https://blogs.oracle.com/ali/entry/gnu_hash_elf_sections
"""
def __init__(self, symtab, stream, offset, arch):
"""
@param symtab The symbol table to perform lookups from (as a pyelftools SymbolTableSection)
@param stream A file-like object to read from the ELF's memory
@param offset The offset in the object where the table starts
@param arch The ArchInfo object for the ELF file
"""
self.symtab = symtab
fmt = '<' if arch.memory_endness == 'Iend_LE' else '>'
self.c = arch.bits
fmtsz = 'I' if self.c == 32 else 'Q'
stream.seek(offset)
self.nbuckets, self.symndx, self.maskwords, self.shift2 = \
struct.unpack(fmt + 'IIII', stream.read(16))
self.bloom = struct.unpack(fmt + fmtsz*self.maskwords, stream.read(self.c*self.maskwords/8))
self.buckets = struct.unpack(fmt + 'I'*self.nbuckets, stream.read(4*self.nbuckets))
def _matches_bloom(self, H1):
C = self.c
H2 = H1 >> self.shift2
N = ((H1 / C) & (self.maskwords - 1))
BITMASK = (1 << (H1 % C)) | (1 << (H2 % C))
return (self.bloom[N] & BITMASK) == BITMASK
def get(self, k):
"""
Perform a lookup. Returns a pyelftools Symbol object, or None if there is no match.
@param k The string to look up
"""
h = self.gnu_hash(k)
if not self._matches_bloom(h):
return None
n = self.buckets[h % self.nbuckets]
if n == 0:
return None
try:
sym = self.symtab.get_symbol(n)
while True:
if sym.name == k:
return sym
n += 1
sym = self.symtab.get_symbol(n)
if (self.gnu_hash(sym.name) % self.nbuckets) != (h % self.nbuckets):
break
except KeyError:
pass
return None
@staticmethod
def gnu_hash(key):
h = 5381
for c in key:
h = h * 33 + ord(c)
return h & 0xFFFFFFFF
|
|
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
import sys
from .traversal import Traversal
from .traversal import TraversalStrategies
from .strategies import VertexProgramStrategy
from .traversal import Bytecode
from ..driver.remote_connection import RemoteStrategy
from .. import statics
from ..statics import long
class GraphTraversalSource(object):
def __init__(self, graph, traversal_strategies, bytecode=None):
self.graph = graph
self.traversal_strategies = traversal_strategies
if bytecode is None:
bytecode = Bytecode()
self.bytecode = bytecode
self.graph_traversal = GraphTraversal
def __repr__(self):
return "graphtraversalsource[" + str(self.graph) + "]"
def get_graph_traversal_source(self):
return self.__class__(self.graph, TraversalStrategies(self.traversal_strategies), Bytecode(self.bytecode))
def get_graph_traversal(self):
return self.graph_traversal(self.graph, self.traversal_strategies, Bytecode(self.bytecode))
def withBulk(self, *args):
source = self.get_graph_traversal_source()
source.bytecode.add_source("withBulk", *args)
return source
def withPath(self, *args):
source = self.get_graph_traversal_source()
source.bytecode.add_source("withPath", *args)
return source
def withSack(self, *args):
source = self.get_graph_traversal_source()
source.bytecode.add_source("withSack", *args)
return source
def withSideEffect(self, *args):
source = self.get_graph_traversal_source()
source.bytecode.add_source("withSideEffect", *args)
return source
def withStrategies(self, *args):
source = self.get_graph_traversal_source()
source.bytecode.add_source("withStrategies", *args)
return source
def with_(self, *args):
source = self.get_graph_traversal_source()
source.bytecode.add_source("with", *args)
return source
def withoutStrategies(self, *args):
source = self.get_graph_traversal_source()
source.bytecode.add_source("withoutStrategies", *args)
return source
def withRemote(self, remote_connection):
source = self.get_graph_traversal_source()
source.traversal_strategies.add_strategies([RemoteStrategy(remote_connection)])
return source
def withComputer(self, graph_computer=None, workers=None, result=None, persist=None, vertices=None,
edges=None, configuration=None):
return self.withStrategies(VertexProgramStrategy(graph_computer, workers, result, persist, vertices,
edges, configuration))
def E(self, *args):
traversal = self.get_graph_traversal()
traversal.bytecode.add_step("E", *args)
return traversal
def V(self, *args):
traversal = self.get_graph_traversal()
traversal.bytecode.add_step("V", *args)
return traversal
def addE(self, *args):
traversal = self.get_graph_traversal()
traversal.bytecode.add_step("addE", *args)
return traversal
def addV(self, *args):
traversal = self.get_graph_traversal()
traversal.bytecode.add_step("addV", *args)
return traversal
def inject(self, *args):
traversal = self.get_graph_traversal()
traversal.bytecode.add_step("inject", *args)
return traversal
def io(self, *args):
traversal = self.get_graph_traversal()
traversal.bytecode.add_step("io", *args)
return traversal
class GraphTraversal(Traversal):
def __init__(self, graph, traversal_strategies, bytecode):
super(GraphTraversal, self).__init__(graph, traversal_strategies, bytecode)
def __getitem__(self, index):
if isinstance(index, int):
return self.range(long(index), long(index + 1))
elif isinstance(index, slice):
low = long(0) if index.start is None else long(index.start)
high = long(sys.maxsize) if index.stop is None else long(index.stop)
if low == long(0):
return self.limit(high)
else:
return self.range(low,high)
else:
raise TypeError("Index must be int or slice")
def __getattr__(self, key):
return self.values(key)
def V(self, *args):
self.bytecode.add_step("V", *args)
return self
def addE(self, *args):
self.bytecode.add_step("addE", *args)
return self
def addV(self, *args):
self.bytecode.add_step("addV", *args)
return self
def aggregate(self, *args):
self.bytecode.add_step("aggregate", *args)
return self
def and_(self, *args):
self.bytecode.add_step("and", *args)
return self
def as_(self, *args):
self.bytecode.add_step("as", *args)
return self
def barrier(self, *args):
self.bytecode.add_step("barrier", *args)
return self
def both(self, *args):
self.bytecode.add_step("both", *args)
return self
def bothE(self, *args):
self.bytecode.add_step("bothE", *args)
return self
def bothV(self, *args):
self.bytecode.add_step("bothV", *args)
return self
def branch(self, *args):
self.bytecode.add_step("branch", *args)
return self
def by(self, *args):
self.bytecode.add_step("by", *args)
return self
def cap(self, *args):
self.bytecode.add_step("cap", *args)
return self
def choose(self, *args):
self.bytecode.add_step("choose", *args)
return self
def coalesce(self, *args):
self.bytecode.add_step("coalesce", *args)
return self
def coin(self, *args):
self.bytecode.add_step("coin", *args)
return self
def connectedComponent(self, *args):
self.bytecode.add_step("connectedComponent", *args)
return self
def constant(self, *args):
self.bytecode.add_step("constant", *args)
return self
def count(self, *args):
self.bytecode.add_step("count", *args)
return self
def cyclicPath(self, *args):
self.bytecode.add_step("cyclicPath", *args)
return self
def dedup(self, *args):
self.bytecode.add_step("dedup", *args)
return self
def drop(self, *args):
self.bytecode.add_step("drop", *args)
return self
def emit(self, *args):
self.bytecode.add_step("emit", *args)
return self
def filter_(self, *args):
self.bytecode.add_step("filter", *args)
return self
def flatMap(self, *args):
self.bytecode.add_step("flatMap", *args)
return self
def fold(self, *args):
self.bytecode.add_step("fold", *args)
return self
def from_(self, *args):
self.bytecode.add_step("from", *args)
return self
def group(self, *args):
self.bytecode.add_step("group", *args)
return self
def groupCount(self, *args):
self.bytecode.add_step("groupCount", *args)
return self
def has(self, *args):
self.bytecode.add_step("has", *args)
return self
def hasId(self, *args):
self.bytecode.add_step("hasId", *args)
return self
def hasKey(self, *args):
self.bytecode.add_step("hasKey", *args)
return self
def hasLabel(self, *args):
self.bytecode.add_step("hasLabel", *args)
return self
def hasNot(self, *args):
self.bytecode.add_step("hasNot", *args)
return self
def hasValue(self, *args):
self.bytecode.add_step("hasValue", *args)
return self
def id_(self, *args):
self.bytecode.add_step("id", *args)
return self
def identity(self, *args):
self.bytecode.add_step("identity", *args)
return self
def inE(self, *args):
self.bytecode.add_step("inE", *args)
return self
def inV(self, *args):
self.bytecode.add_step("inV", *args)
return self
def in_(self, *args):
self.bytecode.add_step("in", *args)
return self
def index(self, *args):
self.bytecode.add_step("index", *args)
return self
def inject(self, *args):
self.bytecode.add_step("inject", *args)
return self
def is_(self, *args):
self.bytecode.add_step("is", *args)
return self
def key(self, *args):
self.bytecode.add_step("key", *args)
return self
def label(self, *args):
self.bytecode.add_step("label", *args)
return self
def limit(self, *args):
self.bytecode.add_step("limit", *args)
return self
def local(self, *args):
self.bytecode.add_step("local", *args)
return self
def loops(self, *args):
self.bytecode.add_step("loops", *args)
return self
def map(self, *args):
self.bytecode.add_step("map", *args)
return self
def match(self, *args):
self.bytecode.add_step("match", *args)
return self
def math(self, *args):
self.bytecode.add_step("math", *args)
return self
def max_(self, *args):
self.bytecode.add_step("max", *args)
return self
def mean(self, *args):
self.bytecode.add_step("mean", *args)
return self
def min_(self, *args):
self.bytecode.add_step("min", *args)
return self
def not_(self, *args):
self.bytecode.add_step("not", *args)
return self
def option(self, *args):
self.bytecode.add_step("option", *args)
return self
def optional(self, *args):
self.bytecode.add_step("optional", *args)
return self
def or_(self, *args):
self.bytecode.add_step("or", *args)
return self
def order(self, *args):
self.bytecode.add_step("order", *args)
return self
def otherV(self, *args):
self.bytecode.add_step("otherV", *args)
return self
def out(self, *args):
self.bytecode.add_step("out", *args)
return self
def outE(self, *args):
self.bytecode.add_step("outE", *args)
return self
def outV(self, *args):
self.bytecode.add_step("outV", *args)
return self
def pageRank(self, *args):
self.bytecode.add_step("pageRank", *args)
return self
def path(self, *args):
self.bytecode.add_step("path", *args)
return self
def peerPressure(self, *args):
self.bytecode.add_step("peerPressure", *args)
return self
def profile(self, *args):
self.bytecode.add_step("profile", *args)
return self
def program(self, *args):
self.bytecode.add_step("program", *args)
return self
def project(self, *args):
self.bytecode.add_step("project", *args)
return self
def properties(self, *args):
self.bytecode.add_step("properties", *args)
return self
def property(self, *args):
self.bytecode.add_step("property", *args)
return self
def propertyMap(self, *args):
self.bytecode.add_step("propertyMap", *args)
return self
def range_(self, *args):
self.bytecode.add_step("range", *args)
return self
def read(self, *args):
self.bytecode.add_step("read", *args)
return self
def repeat(self, *args):
self.bytecode.add_step("repeat", *args)
return self
def sack(self, *args):
self.bytecode.add_step("sack", *args)
return self
def sample(self, *args):
self.bytecode.add_step("sample", *args)
return self
def select(self, *args):
self.bytecode.add_step("select", *args)
return self
def shortestPath(self, *args):
self.bytecode.add_step("shortestPath", *args)
return self
def sideEffect(self, *args):
self.bytecode.add_step("sideEffect", *args)
return self
def simplePath(self, *args):
self.bytecode.add_step("simplePath", *args)
return self
def skip(self, *args):
self.bytecode.add_step("skip", *args)
return self
def store(self, *args):
self.bytecode.add_step("store", *args)
return self
def subgraph(self, *args):
self.bytecode.add_step("subgraph", *args)
return self
def sum_(self, *args):
self.bytecode.add_step("sum", *args)
return self
def tail(self, *args):
self.bytecode.add_step("tail", *args)
return self
def timeLimit(self, *args):
self.bytecode.add_step("timeLimit", *args)
return self
def times(self, *args):
self.bytecode.add_step("times", *args)
return self
def to(self, *args):
self.bytecode.add_step("to", *args)
return self
def toE(self, *args):
self.bytecode.add_step("toE", *args)
return self
def toV(self, *args):
self.bytecode.add_step("toV", *args)
return self
def tree(self, *args):
self.bytecode.add_step("tree", *args)
return self
def unfold(self, *args):
self.bytecode.add_step("unfold", *args)
return self
def union(self, *args):
self.bytecode.add_step("union", *args)
return self
def until(self, *args):
self.bytecode.add_step("until", *args)
return self
def value(self, *args):
self.bytecode.add_step("value", *args)
return self
def valueMap(self, *args):
self.bytecode.add_step("valueMap", *args)
return self
def values(self, *args):
self.bytecode.add_step("values", *args)
return self
def where(self, *args):
self.bytecode.add_step("where", *args)
return self
def with_(self, *args):
self.bytecode.add_step("with", *args)
return self
def write(self, *args):
self.bytecode.add_step("write", *args)
return self
# Deprecated - prefer the underscore suffixed versions e.g filter_()
def filter(self, *args):
self.bytecode.add_step("filter", *args)
return self
def id(self, *args):
self.bytecode.add_step("id", *args)
return self
def max(self, *args):
self.bytecode.add_step("max", *args)
return self
def min(self, *args):
self.bytecode.add_step("min", *args)
return self
def range(self, *args):
self.bytecode.add_step("range", *args)
return self
def sum(self, *args):
self.bytecode.add_step("sum", *args)
return self
class __(object):
graph_traversal = GraphTraversal
@classmethod
def start(cls):
return GraphTraversal(None, None, Bytecode())
@classmethod
def __(cls, *args):
return __.inject(*args)
@classmethod
def V(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).V(*args)
@classmethod
def addE(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).addE(*args)
@classmethod
def addV(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).addV(*args)
@classmethod
def aggregate(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).aggregate(*args)
@classmethod
def and_(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).and_(*args)
@classmethod
def as_(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).as_(*args)
@classmethod
def barrier(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).barrier(*args)
@classmethod
def both(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).both(*args)
@classmethod
def bothE(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).bothE(*args)
@classmethod
def bothV(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).bothV(*args)
@classmethod
def branch(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).branch(*args)
@classmethod
def cap(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).cap(*args)
@classmethod
def choose(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).choose(*args)
@classmethod
def coalesce(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).coalesce(*args)
@classmethod
def coin(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).coin(*args)
@classmethod
def constant(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).constant(*args)
@classmethod
def count(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).count(*args)
@classmethod
def cyclicPath(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).cyclicPath(*args)
@classmethod
def dedup(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).dedup(*args)
@classmethod
def drop(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).drop(*args)
@classmethod
def emit(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).emit(*args)
@classmethod
def filter_(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).filter_(*args)
@classmethod
def flatMap(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).flatMap(*args)
@classmethod
def fold(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).fold(*args)
@classmethod
def group(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).group(*args)
@classmethod
def groupCount(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).groupCount(*args)
@classmethod
def has(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).has(*args)
@classmethod
def hasId(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).hasId(*args)
@classmethod
def hasKey(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).hasKey(*args)
@classmethod
def hasLabel(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).hasLabel(*args)
@classmethod
def hasNot(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).hasNot(*args)
@classmethod
def hasValue(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).hasValue(*args)
@classmethod
def id_(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).id_(*args)
@classmethod
def identity(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).identity(*args)
@classmethod
def inE(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).inE(*args)
@classmethod
def inV(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).inV(*args)
@classmethod
def in_(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).in_(*args)
@classmethod
def index(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).index(*args)
@classmethod
def inject(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).inject(*args)
@classmethod
def is_(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).is_(*args)
@classmethod
def key(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).key(*args)
@classmethod
def label(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).label(*args)
@classmethod
def limit(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).limit(*args)
@classmethod
def local(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).local(*args)
@classmethod
def loops(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).loops(*args)
@classmethod
def map(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).map(*args)
@classmethod
def match(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).match(*args)
@classmethod
def math(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).math(*args)
@classmethod
def max_(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).max_(*args)
@classmethod
def mean(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).mean(*args)
@classmethod
def min_(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).min_(*args)
@classmethod
def not_(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).not_(*args)
@classmethod
def optional(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).optional(*args)
@classmethod
def or_(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).or_(*args)
@classmethod
def order(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).order(*args)
@classmethod
def otherV(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).otherV(*args)
@classmethod
def out(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).out(*args)
@classmethod
def outE(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).outE(*args)
@classmethod
def outV(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).outV(*args)
@classmethod
def path(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).path(*args)
@classmethod
def project(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).project(*args)
@classmethod
def properties(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).properties(*args)
@classmethod
def property(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).property(*args)
@classmethod
def propertyMap(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).propertyMap(*args)
@classmethod
def range_(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).range_(*args)
@classmethod
def repeat(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).repeat(*args)
@classmethod
def sack(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).sack(*args)
@classmethod
def sample(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).sample(*args)
@classmethod
def select(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).select(*args)
@classmethod
def sideEffect(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).sideEffect(*args)
@classmethod
def simplePath(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).simplePath(*args)
@classmethod
def skip(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).skip(*args)
@classmethod
def store(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).store(*args)
@classmethod
def subgraph(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).subgraph(*args)
@classmethod
def sum_(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).sum_(*args)
@classmethod
def tail(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).tail(*args)
@classmethod
def timeLimit(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).timeLimit(*args)
@classmethod
def times(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).times(*args)
@classmethod
def to(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).to(*args)
@classmethod
def toE(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).toE(*args)
@classmethod
def toV(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).toV(*args)
@classmethod
def tree(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).tree(*args)
@classmethod
def unfold(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).unfold(*args)
@classmethod
def union(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).union(*args)
@classmethod
def until(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).until(*args)
@classmethod
def value(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).value(*args)
@classmethod
def valueMap(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).valueMap(*args)
@classmethod
def values(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).values(*args)
@classmethod
def where(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).where(*args)
# Deprecated - prefer the underscore suffixed versions e.g filter_()
@classmethod
def filter(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).filter_(*args)
@classmethod
def id(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).id_(*args)
@classmethod
def max(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).max_(*args)
@classmethod
def min(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).min_(*args)
@classmethod
def range(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).range_(*args)
@classmethod
def sum(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).sum_(*args)
def V(*args):
return __.V(*args)
def addE(*args):
return __.addE(*args)
def addV(*args):
return __.addV(*args)
def aggregate(*args):
return __.aggregate(*args)
def and_(*args):
return __.and_(*args)
def as_(*args):
return __.as_(*args)
def barrier(*args):
return __.barrier(*args)
def both(*args):
return __.both(*args)
def bothE(*args):
return __.bothE(*args)
def bothV(*args):
return __.bothV(*args)
def branch(*args):
return __.branch(*args)
def cap(*args):
return __.cap(*args)
def choose(*args):
return __.choose(*args)
def coalesce(*args):
return __.coalesce(*args)
def coin(*args):
return __.coin(*args)
def constant(*args):
return __.constant(*args)
def count(*args):
return __.count(*args)
def cyclicPath(*args):
return __.cyclicPath(*args)
def dedup(*args):
return __.dedup(*args)
def drop(*args):
return __.drop(*args)
def emit(*args):
return __.emit(*args)
def filter_(*args):
return __.filter_(*args)
def flatMap(*args):
return __.flatMap(*args)
def fold(*args):
return __.fold(*args)
def group(*args):
return __.group(*args)
def groupCount(*args):
return __.groupCount(*args)
def has(*args):
return __.has(*args)
def hasId(*args):
return __.hasId(*args)
def hasKey(*args):
return __.hasKey(*args)
def hasLabel(*args):
return __.hasLabel(*args)
def hasNot(*args):
return __.hasNot(*args)
def hasValue(*args):
return __.hasValue(*args)
def id_(*args):
return __.id_(*args)
def identity(*args):
return __.identity(*args)
def inE(*args):
return __.inE(*args)
def inV(*args):
return __.inV(*args)
def in_(*args):
return __.in_(*args)
def index(*args):
return __.index(*args)
def inject(*args):
return __.inject(*args)
def is_(*args):
return __.is_(*args)
def key(*args):
return __.key(*args)
def label(*args):
return __.label(*args)
def limit(*args):
return __.limit(*args)
def local(*args):
return __.local(*args)
def loops(*args):
return __.loops(*args)
def map(*args):
return __.map(*args)
def match(*args):
return __.match(*args)
def math(*args):
return __.math(*args)
def max_(*args):
return __.max_(*args)
def mean(*args):
return __.mean(*args)
def min_(*args):
return __.min_(*args)
def not_(*args):
return __.not_(*args)
def optional(*args):
return __.optional(*args)
def or_(*args):
return __.or_(*args)
def order(*args):
return __.order(*args)
def otherV(*args):
return __.otherV(*args)
def out(*args):
return __.out(*args)
def outE(*args):
return __.outE(*args)
def outV(*args):
return __.outV(*args)
def path(*args):
return __.path(*args)
def project(*args):
return __.project(*args)
def properties(*args):
return __.properties(*args)
def property(*args):
return __.property(*args)
def propertyMap(*args):
return __.propertyMap(*args)
def range_(*args):
return __.range_(*args)
def repeat(*args):
return __.repeat(*args)
def sack(*args):
return __.sack(*args)
def sample(*args):
return __.sample(*args)
def select(*args):
return __.select(*args)
def sideEffect(*args):
return __.sideEffect(*args)
def simplePath(*args):
return __.simplePath(*args)
def skip(*args):
return __.skip(*args)
def store(*args):
return __.store(*args)
def subgraph(*args):
return __.subgraph(*args)
def sum_(*args):
return __.sum_(*args)
def tail(*args):
return __.tail(*args)
def timeLimit(*args):
return __.timeLimit(*args)
def times(*args):
return __.times(*args)
def to(*args):
return __.to(*args)
def toE(*args):
return __.toE(*args)
def toV(*args):
return __.toV(*args)
def tree(*args):
return __.tree(*args)
def unfold(*args):
return __.unfold(*args)
def union(*args):
return __.union(*args)
def until(*args):
return __.until(*args)
def value(*args):
return __.value(*args)
def valueMap(*args):
return __.valueMap(*args)
def values(*args):
return __.values(*args)
def where(*args):
return __.where(*args)
# Deprecated - prefer the underscore suffixed versions e.g filter_()
def filter(*args):
return __.filter_(*args)
def id(*args):
return __.id_(*args)
def max(*args):
return __.max_(*args)
def min(*args):
return __.min_(*args)
def range(*args):
return __.range_(*args)
def sum(*args):
return __.sum_(*args)
statics.add_static('V', V)
statics.add_static('addE', addE)
statics.add_static('addV', addV)
statics.add_static('aggregate', aggregate)
statics.add_static('and_', and_)
statics.add_static('as_', as_)
statics.add_static('barrier', barrier)
statics.add_static('both', both)
statics.add_static('bothE', bothE)
statics.add_static('bothV', bothV)
statics.add_static('branch', branch)
statics.add_static('cap', cap)
statics.add_static('choose', choose)
statics.add_static('coalesce', coalesce)
statics.add_static('coin', coin)
statics.add_static('constant', constant)
statics.add_static('count', count)
statics.add_static('cyclicPath', cyclicPath)
statics.add_static('dedup', dedup)
statics.add_static('drop', drop)
statics.add_static('emit', emit)
statics.add_static('filter_', filter_)
statics.add_static('flatMap', flatMap)
statics.add_static('fold', fold)
statics.add_static('group', group)
statics.add_static('groupCount', groupCount)
statics.add_static('has', has)
statics.add_static('hasId', hasId)
statics.add_static('hasKey', hasKey)
statics.add_static('hasLabel', hasLabel)
statics.add_static('hasNot', hasNot)
statics.add_static('hasValue', hasValue)
statics.add_static('id_', id_)
statics.add_static('identity', identity)
statics.add_static('inE', inE)
statics.add_static('inV', inV)
statics.add_static('in_', in_)
statics.add_static('index', index)
statics.add_static('inject', inject)
statics.add_static('is_', is_)
statics.add_static('key', key)
statics.add_static('label', label)
statics.add_static('limit', limit)
statics.add_static('local', local)
statics.add_static('loops', loops)
statics.add_static('map', map)
statics.add_static('match', match)
statics.add_static('math', math)
statics.add_static('max_', max_)
statics.add_static('mean', mean)
statics.add_static('min_', min_)
statics.add_static('not_', not_)
statics.add_static('optional', optional)
statics.add_static('or_', or_)
statics.add_static('order', order)
statics.add_static('otherV', otherV)
statics.add_static('out', out)
statics.add_static('outE', outE)
statics.add_static('outV', outV)
statics.add_static('path', path)
statics.add_static('project', project)
statics.add_static('properties', properties)
statics.add_static('property', property)
statics.add_static('propertyMap', propertyMap)
statics.add_static('range_', range_)
statics.add_static('repeat', repeat)
statics.add_static('sack', sack)
statics.add_static('sample', sample)
statics.add_static('select', select)
statics.add_static('sideEffect', sideEffect)
statics.add_static('simplePath', simplePath)
statics.add_static('skip', skip)
statics.add_static('store', store)
statics.add_static('subgraph', subgraph)
statics.add_static('sum_', sum_)
statics.add_static('tail', tail)
statics.add_static('timeLimit', timeLimit)
statics.add_static('times', times)
statics.add_static('to', to)
statics.add_static('toE', toE)
statics.add_static('toV', toV)
statics.add_static('tree', tree)
statics.add_static('unfold', unfold)
statics.add_static('union', union)
statics.add_static('until', until)
statics.add_static('value', value)
statics.add_static('valueMap', valueMap)
statics.add_static('values', values)
statics.add_static('where', where)
# Deprecated - prefer the underscore suffixed versions e.g filter_()
statics.add_static('filter', filter)
statics.add_static('id', id)
statics.add_static('max', max)
statics.add_static('min', min)
statics.add_static('range', range)
statics.add_static('sum', sum)
|
|
#!/usr/bin/env python
"""
MIT License
Copyright (c) 2017 Guillaume Papin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
A wrapper script around clang-format, suitable for linting multiple files
and to use for continuous integration.
This is an alternative API for the clang-format command line.
It runs over multiple files and directories in parallel.
A diff output is produced and a sensible exit code is returned.
"""
import argparse
import difflib
import fnmatch
import multiprocessing
import os
import signal
import subprocess
import sys
import traceback
from functools import partial
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, "wb")
DEFAULT_EXTENSIONS = "c,h,C,H,cpp,hpp,cc,hh,c++,h++,cxx,hxx,cu"
class ExitStatus:
SUCCESS = 0
DIFF = 1
TROUBLE = 2
def list_files(files, recursive=False, extensions=None, exclude=None):
if extensions is None:
extensions = []
if exclude is None:
exclude = []
out = []
for file in files:
if recursive and os.path.isdir(file):
for dirpath, dnames, fnames in os.walk(file):
fpaths = [os.path.join(dirpath, fname) for fname in fnames]
for pattern in exclude:
# os.walk() supports trimming down the dnames list
# by modifying it in-place,
# to avoid unnecessary directory listings.
dnames[:] = [x for x in dnames if not fnmatch.fnmatch(os.path.join(dirpath, x), pattern)]
fpaths = [x for x in fpaths if not fnmatch.fnmatch(x, pattern)]
for f in fpaths:
ext = os.path.splitext(f)[1][1:]
if ext in extensions:
out.append(f)
else:
out.append(file)
return out
def make_diff(file, original, reformatted):
return list(
difflib.unified_diff(
original, reformatted, fromfile=f"{file}\t(original)", tofile=f"{file}\t(reformatted)", n=3
)
)
class DiffError(Exception):
def __init__(self, message, errs=None):
super().__init__(message)
self.errs = errs or []
class UnexpectedError(Exception):
def __init__(self, message, exc=None):
super().__init__(message)
self.formatted_traceback = traceback.format_exc()
self.exc = exc
def run_clang_format_diff_wrapper(args, file):
try:
ret = run_clang_format_diff(args, file)
return ret
except DiffError:
raise
except Exception as e:
raise UnexpectedError(f"{file}: {e.__class__.__name__}: {e}", e)
def run_clang_format_diff(args, file):
try:
with open(file, encoding="utf-8") as f:
original = f.readlines()
except OSError as exc:
raise DiffError(str(exc))
invocation = [args.clang_format_executable, file]
# Use of utf-8 to decode the process output.
#
# Hopefully, this is the correct thing to do.
#
# It's done due to the following assumptions (which may be incorrect):
# - clang-format will returns the bytes read from the files as-is,
# without conversion, and it is already assumed that the files use utf-8.
# - if the diagnostics were internationalized, they would use utf-8:
# > Adding Translations to Clang
# >
# > Not possible yet!
# > Diagnostic strings should be written in UTF-8,
# > the client can translate to the relevant code page if needed.
# > Each translation completely replaces the format string
# > for the diagnostic.
# > -- http://clang.llvm.org/docs/InternalsManual.html#internals-diag-translation
try:
proc = subprocess.Popen(
invocation, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, encoding="utf-8"
)
except OSError as exc:
raise DiffError(f"Command '{subprocess.list2cmdline(invocation)}' failed to start: {exc}")
proc_stdout = proc.stdout
proc_stderr = proc.stderr
# hopefully the stderr pipe won't get full and block the process
outs = list(proc_stdout.readlines())
errs = list(proc_stderr.readlines())
proc.wait()
if proc.returncode:
raise DiffError(
"Command '{}' returned non-zero exit status {}".format(
subprocess.list2cmdline(invocation), proc.returncode
),
errs,
)
return make_diff(file, original, outs), errs
def bold_red(s):
return "\x1b[1m\x1b[31m" + s + "\x1b[0m"
def colorize(diff_lines):
def bold(s):
return "\x1b[1m" + s + "\x1b[0m"
def cyan(s):
return "\x1b[36m" + s + "\x1b[0m"
def green(s):
return "\x1b[32m" + s + "\x1b[0m"
def red(s):
return "\x1b[31m" + s + "\x1b[0m"
for line in diff_lines:
if line[:4] in ["--- ", "+++ "]:
yield bold(line)
elif line.startswith("@@ "):
yield cyan(line)
elif line.startswith("+"):
yield green(line)
elif line.startswith("-"):
yield red(line)
else:
yield line
def print_diff(diff_lines, use_color):
if use_color:
diff_lines = colorize(diff_lines)
sys.stdout.writelines(diff_lines)
def print_trouble(prog, message, use_colors):
error_text = "error:"
if use_colors:
error_text = bold_red(error_text)
print(f"{prog}: {error_text} {message}", file=sys.stderr)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--clang-format-executable",
metavar="EXECUTABLE",
help="path to the clang-format executable",
default="clang-format",
)
parser.add_argument(
"--extensions",
help=f"comma separated list of file extensions (default: {DEFAULT_EXTENSIONS})",
default=DEFAULT_EXTENSIONS,
)
parser.add_argument("-r", "--recursive", action="store_true", help="run recursively over directories")
parser.add_argument("files", metavar="file", nargs="+")
parser.add_argument("-q", "--quiet", action="store_true")
parser.add_argument(
"-j",
metavar="N",
type=int,
default=0,
help="run N clang-format jobs in parallel (default number of cpus + 1)",
)
parser.add_argument(
"--color", default="auto", choices=["auto", "always", "never"], help="show colored diff (default: auto)"
)
parser.add_argument(
"-e",
"--exclude",
metavar="PATTERN",
action="append",
default=[],
help="exclude paths matching the given glob-like pattern(s) from recursive search",
)
args = parser.parse_args()
# use default signal handling, like diff return SIGINT value on ^C
# https://bugs.python.org/issue14229#msg156446
signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
signal.SIGPIPE
except AttributeError:
# compatibility, SIGPIPE does not exist on Windows
pass
else:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
colored_stdout = False
colored_stderr = False
if args.color == "always":
colored_stdout = True
colored_stderr = True
elif args.color == "auto":
colored_stdout = sys.stdout.isatty()
colored_stderr = sys.stderr.isatty()
version_invocation = [args.clang_format_executable, "--version"]
try:
subprocess.check_call(version_invocation, stdout=DEVNULL)
except subprocess.CalledProcessError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
return ExitStatus.TROUBLE
except OSError as e:
print_trouble(
parser.prog,
f"Command '{subprocess.list2cmdline(version_invocation)}' failed to start: {e}",
use_colors=colored_stderr,
)
return ExitStatus.TROUBLE
retcode = ExitStatus.SUCCESS
files = list_files(
args.files, recursive=args.recursive, exclude=args.exclude, extensions=args.extensions.split(",")
)
if not files:
return
njobs = args.j
if njobs == 0:
njobs = multiprocessing.cpu_count() + 1
njobs = min(len(files), njobs)
if njobs == 1:
# execute directly instead of in a pool,
# less overhead, simpler stacktraces
it = (run_clang_format_diff_wrapper(args, file) for file in files)
pool = None
else:
pool = multiprocessing.Pool(njobs)
it = pool.imap_unordered(partial(run_clang_format_diff_wrapper, args), files)
while True:
try:
outs, errs = next(it)
except StopIteration:
break
except DiffError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
retcode = ExitStatus.TROUBLE
sys.stderr.writelines(e.errs)
except UnexpectedError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
sys.stderr.write(e.formatted_traceback)
retcode = ExitStatus.TROUBLE
# stop at the first unexpected error,
# something could be very wrong,
# don't process all files unnecessarily
if pool:
pool.terminate()
break
else:
sys.stderr.writelines(errs)
if outs == []:
continue
if not args.quiet:
print_diff(outs, use_color=colored_stdout)
if retcode == ExitStatus.SUCCESS:
retcode = ExitStatus.DIFF
return retcode
if __name__ == "__main__":
sys.exit(main())
|
|
from datetime import datetime
from abc import ABCMeta, abstractmethod
import argparse
import os.path
import numpy as np
import gdal
import netCDF4
import yaml
from eodatasets import serialise
from ingester.utils import _get_nbands_lats_lons_from_gdalds
EPOCH = datetime(1970, 1, 1, 0, 0, 0)
class BaseNetCDF(object):
"""
Base class for creating a NetCDF file based upon GeoTIFF data.
Sub-classes will create the NetCDF in different structures.
"""
__metaclass__ = ABCMeta
def __init__(self, netcdf_path, mode='r', chunk_x=400, chunk_y=400, chunk_time=1):
self.nco = netCDF4.Dataset(netcdf_path, mode)
self.netcdf_path = netcdf_path
self.chunk_x = chunk_x
self.chunk_y = chunk_y
self.chunk_time = chunk_time
self.tile_spec = TileSpec()
def close(self):
self.nco.close()
def _create_standard_dimensions(self, lats, lons):
"""
Creates latitude, longitude and time dimension
Time is unlimited
Latitude and longitude are given the values in lats,lons
"""
self.nco.createDimension('longitude', len(lons))
self.nco.createDimension('latitude', len(lats))
self.nco.createDimension('time', None)
timeo = self.nco.createVariable('time', 'f4', 'time')
timeo.units = 'seconds since 1970-01-01 00:00:00'
timeo.standard_name = 'time'
timeo.long_name = 'Time, unix time-stamp'
timeo.calendar = 'standard'
timeo.axis = "T"
lon = self.nco.createVariable('longitude', 'f4', 'longitude')
lon.units = 'degrees_east'
lon.standard_name = 'longitude'
lon.long_name = 'longitude'
lon.axis = "X"
lat = self.nco.createVariable('latitude', 'f4', 'latitude')
lat.units = 'degrees_north'
lat.standard_name = 'latitude'
lat.long_name = 'latitude'
lat.axis = "Y"
lon[:] = lons
lat[:] = lats
def _set_wgs84_crs(self):
crso = self.nco.createVariable('crs', 'i4')
crso.long_name = "Lon/Lat Coords in WGS84"
crso.grid_mapping_name = "latitude_longitude"
crso.longitude_of_prime_meridian = 0.0
crso.semi_major_axis = 6378137.0
crso.inverse_flattening = 298.257223563
return crso
def _set_global_attributes(self):
self.nco.spatial_coverage = "1.000000 degrees grid"
self.nco.geospatial_lat_min = self.tile_spec.get_lat_min()
self.nco.geospatial_lat_max = self.tile_spec.get_lat_max()
self.nco.geospatial_lat_units = "degrees_north"
self.nco.geospatial_lat_resolution = "0.00025"
self.nco.geospatial_lon_min = self.tile_spec.get_lon_min()
self.nco.geospatial_lon_max = self.tile_spec.get_lon_max()
self.nco.geospatial_lon_units = "degrees_east"
self.nco.geospatial_lon_resolution = "0.00025"
creation_date = datetime.utcnow().strftime("%Y%m%d")
self.nco.history = "NetCDF-CF file created %s." % creation_date
# Attributes for NCI Compliance
self.nco.title = "Experimental Data files From the Australian Geoscience Data Cube - DO NOT USE"
self.nco.summary = "These files are experimental, short lived, and the format will change."
self.nco.source = "This data is a reprojection and retile of the Landsat L1T surface reflectance " \
"scene data available from /g/data/rs0/scenes/"
self.nco.product_version = "0.0.0"
self.nco.date_created = datetime.today().isoformat()
self.nco.Conventions = 'CF-1.6'
self.nco.license = "Creative Commons Attribution 4.0 International CC BY 4.0"
def _add_time(self, start_date):
# Convert to datetime at midnight
start_datetime = datetime.combine(start_date, datetime.min.time())
# Convert to seconds since epoch (1970-01-01)
start_datetime_delta = start_datetime - EPOCH
times = self.nco.variables['time']
# Save as next coordinate in file
times[len(times)] = start_datetime_delta.total_seconds()
@classmethod
def create_from_tile_spec(cls, file_path, tile_spec):
netcdf = cls(file_path, mode='w')
netcdf.tile_spec = tile_spec
netcdf._set_wgs84_crs()
netcdf._set_global_attributes()
netcdf._create_variables()
return netcdf
@classmethod
def open_with_tile_spec(cls, file_path, tile_spec):
netcdf = cls(file_path, mode='a')
netcdf.tile_spec = tile_spec
return netcdf
@abstractmethod
def _create_variables(self):
"""
Create the structure of the NetCDF file, ie, which variables with which dimensions
"""
pass
@abstractmethod
def _write_data_to_netcdf(self, dataset, eodataset):
"""
Read in all the data from the geotiff `dataset` and write it as a new time
slice to the NetCDF file
:param dataset: open GDAL dataset
:return:
"""
pass
def append_gdal_tile(self, geotiff, eodataset):
"""
Read a geotiff file and append it to the open NetCDF file
:param geotiff:string path to a geotiff file
:return:
"""
gdal_dataset = gdal.Open(geotiff)
self._add_time(eodataset.acquisition.aos)
self._write_data_to_netcdf(gdal_dataset, eodataset)
del gdal_dataset
class MultiVariableNetCDF(BaseNetCDF):
"""
Create individual datasets for each `band` of data
This closely matches the existing GeoTiff tile file structure
"""
def _create_variables(self):
self._create_standard_dimensions(self.tile_spec.lats, self.tile_spec.lons)
self._create_bands(self.tile_spec.bands)
# Create Variable Length Variable to store extra metadata
extra_meta = self.nco.createVariable('extra_metadata', str, 'time')
extra_meta.long_name = 'Extra source metadata'
def _create_bands(self, bands):
for i, band in enumerate(bands, 1):
band = self.nco.createVariable('band' + str(i), 'i2', ('time', 'latitude', 'longitude'),
zlib=True, chunksizes=[self.chunk_time, self.chunk_y, self.chunk_x],
fill_value=-999)
band.grid_mapping = 'crs'
band.set_auto_maskandscale(False)
band.units = '1'
srcfilename = self.nco.createVariable('srcfilename_band' + str(i), str, 'time')
srcfilename.long_name = 'Source filename from data import'
def _get_netcdf_bands(self, bands):
netcdfbands = []
for i, _ in enumerate(bands, 1):
band = self.nco.variables['band' + str(i)]
netcdfbands.append(band)
return netcdfbands
def _write_data_to_netcdf(self, gdal_dataset, eodataset):
netcdfbands = self._get_netcdf_bands(self.tile_spec.bands)
gdal_bands = [gdal_dataset.GetRasterBand(idx + 1) for idx in range(gdal_dataset.RasterCount)]
metadata_bands = sorted(eodataset.image.bands.values(), key=lambda band: band.number)
time_index = len(self.nco.variables['time']) - 1
for in_band, out_band, metadata in zip(gdal_bands, netcdfbands, metadata_bands):
out_band.long_name = metadata.number
out_band.missing_value = -999
out_band[time_index, :, :] = in_band.ReadAsArray()
extra_meta = self.nco.variables['extra_metadata']
# FIXME Yucky, we don't really want to be using yaml and private methods here
extra_meta[time_index] = yaml.dump(eodataset, Dumper=serialise._create_relative_dumper('/'))
class SingleVariableNetCDF(BaseNetCDF):
"""
Store all data values in a single dataset with an extra dimension for `band`
"""
def _create_variables(self):
lats = self.tile_spec.lats
lons = self.tile_spec.lons
self._create_standard_dimensions(lats, lons)
self._create_band_dimension()
self._create_data_variable()
def _create_band_dimension(self):
nbands = len(self.tile_spec.bands)
self.nco.createDimension('band', nbands)
band = self.nco.createVariable('band_name', str, 'band')
band.long_name = "Surface reflectance band name/number"
def _create_data_variable(self):
chunk_band = 1
observations = self.nco.createVariable('observation', 'i2', ('band', 'time', 'latitude', 'longitude'),
zlib=True,
chunksizes=[chunk_band, self.chunk_time, self.chunk_y, self.chunk_x],
fill_value=-999)
observations.long_name = "Surface reflectance factor"
observations.units = '1'
observations.grid_mapping = 'crs'
observations.set_auto_maskandscale(False)
observations.coordinates = 'band_name'
def _write_data_to_netcdf(self, gdal_dataset, eodataset):
nbands, lats, lons = _get_nbands_lats_lons_from_gdalds(gdal_dataset)
time_index = len(self.nco.dimensions['time']) - 1
band_var = self.nco.variables['band_name']
ds_bands = sorted(eodataset.image.bands.values(), key=lambda band: band.number)
observation = self.nco.variables['observation']
for band_idx in range(nbands):
in_band = gdal_dataset.GetRasterBand(band_idx + 1)
metadata = ds_bands[band_idx]
band_var[band_idx] = metadata.number
observation[band_idx, time_index, :, :] = in_band.ReadAsArray()
class TileSpec(object):
bands = []
lats = []
lons = []
lat_resolution = None
lon_resolution = None
def __init__(self, bands=None, lats=None, lons=None, lat_resultion=None, lon_resolution=None):
self.bands = [] if bands is None else bands
self.lats = [] if lats is None else lats
self.lons = [] if lons is None else lons
self.lat_resolution = lat_resultion
self.lon_resolution = lon_resolution
def get_lat_min(self):
return min(self.lats)
def get_lat_max(self):
return max(self.lats)
def get_lon_min(self):
return min(self.lons)
def get_lon_max(self):
return max(self.lons)
class Messenger:
def __init__(self, **kwargs):
self.__dict__ = kwargs
def get_input_spec_from_file(filename):
gdal_dataset = gdal.Open(filename)
return tile_spec_from_gdal_dataset(gdal_dataset)
def input_spec_from_eodataset(eodataset):
pass
def tile_spec_from_gdal_dataset(gdal_dataset):
"""
Return a specification of a GDAL dataset, used for creating a new NetCDF file to hold the same data
Example specification:
dict(bands=[{'dtype': 'Int16',
'name': 'Photosynthetic Vegetation',
'no_data': -999.0},
{'dtype': 'Int16',
'name': 'Non-Photosynthetic Vegetation',
'no_data': -999.0},
{'dtype': 'Int16', 'name': 'Bare Soil', 'no_data': -999.0},
{'dtype': 'Int16', 'name': 'Unmixing Error', 'no_data': -999.0}],
lats=array([-33., -33.00025, -33.0005, ..., -33.99925, -33.9995,
-33.99975]),
lons=array([150., 150.00025, 150.0005, ..., 150.99925, 150.9995,
150.99975]))
:param gdal_dataset: a gdal dataset
:return: nested dictionary describing the structure
"""
nbands, nlats, nlons = gdal_dataset.RasterCount, gdal_dataset.RasterYSize, gdal_dataset.RasterXSize
geotransform = gdal_dataset.GetGeoTransform()
lons = np.arange(nlons)*geotransform[1]+geotransform[0]
lats = np.arange(nlats)*geotransform[5]+geotransform[3]
bands = []
for band_idx in range(nbands):
src_band = gdal_dataset.GetRasterBand(band_idx + 1)
src_metadata = src_band.GetMetadata() # eg. filename: 'source.tif', name: 'Photosynthetic Vegetation'
name = src_metadata.get('name')
dtype = gdal.GetDataTypeName(src_band.DataType)
no_data = src_band.GetNoDataValue()
bands.append(dict(name=name, dtype=dtype, no_data=no_data))
return TileSpec(bands=bands, lats=lats, lons=lons, lat_resultion=geotransform[5], lon_resolution=geotransform[1])
def append_to_netcdf(gdal_tile, netcdf_path, eodataset, netcdf_class=MultiVariableNetCDF):
"""
Append a raster slice to a new or existing NetCDF file
:param gdal_tile: pathname to raster slice, readable by gdal
:param netcdf_path: pathname to
:param eodataset:
:param netcdf_class:
:return:
"""
tile_spec = get_input_spec_from_file(gdal_tile)
if not os.path.isfile(netcdf_path):
ncfile = netcdf_class.create_from_tile_spec(netcdf_path, tile_spec)
else:
ncfile = netcdf_class.open_with_tile_spec(netcdf_path, tile_spec)
ncfile.append_gdal_tile(gdal_tile, eodataset)
ncfile.close()
def main():
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--create", action='store_true', help="Create a new, empty, NetCDF file")
group.add_argument("--append", action='store_true', help="Append the geotiff to a new portion of the NetCDF")
parser.add_argument("-b", "--band_as_dimension", action="store_true",
help="Store bands as a dimension instead of as new dataset")
parser.add_argument("geotiff", help="Input GeoTIFF filename")
parser.add_argument("netcdf", help="NetCDF file to create or write to")
args = parser.parse_args()
if args.band_as_dimension:
netcdf_class = SingleVariableNetCDF
else:
netcdf_class = MultiVariableNetCDF
if args.create:
dcnc = netcdf_class(args.netcdf, mode='w')
tile_spec = get_input_spec_from_file(args.geotiff)
dcnc.create_from_tile_spec(tile_spec)
dcnc.close()
elif args.append:
dcnc = netcdf_class(args.netcdf, mode='a')
dcnc.append_gdal_tile(args.geotiff)
dcnc.close()
else:
print 'Unknown action'
if __name__ == '__main__':
main()
|
|
from __future__ import unicode_literals
from django.apps import apps
from django.conf import settings
from django.contrib.sites import models
from django.contrib.sites.management import create_default_site
from django.contrib.sites.middleware import CurrentSiteMiddleware
from django.contrib.sites.models import Site, clear_site_cache
from django.contrib.sites.requests import RequestSite
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db.models.signals import post_migrate
from django.http import HttpRequest
from django.test import TestCase, modify_settings, override_settings
from django.test.utils import captured_stdout
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'})
class SitesFrameworkTests(TestCase):
multi_db = True
def setUp(self):
self.site = Site(
id=settings.SITE_ID,
domain="example.com",
name="example.com",
)
self.site.save()
def tearDown(self):
Site.objects.clear_cache()
def test_site_manager(self):
# Make sure that get_current() does not return a deleted Site object.
s = Site.objects.get_current()
self.assertIsInstance(s, Site)
s.delete()
with self.assertRaises(ObjectDoesNotExist):
Site.objects.get_current()
def test_site_cache(self):
# After updating a Site object (e.g. via the admin), we shouldn't return a
# bogus value from the SITE_CACHE.
site = Site.objects.get_current()
self.assertEqual("example.com", site.name)
s2 = Site.objects.get(id=settings.SITE_ID)
s2.name = "Example site"
s2.save()
site = Site.objects.get_current()
self.assertEqual("Example site", site.name)
def test_delete_all_sites_clears_cache(self):
# When all site objects are deleted the cache should also
# be cleared and get_current() should raise a DoesNotExist.
self.assertIsInstance(Site.objects.get_current(), Site)
Site.objects.all().delete()
with self.assertRaises(Site.DoesNotExist):
Site.objects.get_current()
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_get_current_site(self):
# Test that the correct Site object is returned
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
site = get_current_site(request)
self.assertIsInstance(site, Site)
self.assertEqual(site.id, settings.SITE_ID)
# Test that an exception is raised if the sites framework is installed
# but there is no matching Site
site.delete()
with self.assertRaises(ObjectDoesNotExist):
get_current_site(request)
# A RequestSite is returned if the sites framework is not installed
with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'}):
site = get_current_site(request)
self.assertIsInstance(site, RequestSite)
self.assertEqual(site.name, "example.com")
@override_settings(SITE_ID='', ALLOWED_HOSTS=['example.com'])
def test_get_current_site_no_site_id(self):
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
del settings.SITE_ID
site = get_current_site(request)
self.assertEqual(site.name, "example.com")
@override_settings(SITE_ID='', ALLOWED_HOSTS=['example.com', 'example.net'])
def test_get_current_site_no_site_id_and_handle_port_fallback(self):
request = HttpRequest()
s1 = self.site
s2 = Site.objects.create(domain='example.com:80', name='example.com:80')
# Host header without port
request.META = {'HTTP_HOST': 'example.com'}
site = get_current_site(request)
self.assertEqual(site, s1)
# Host header with port - match, no fallback without port
request.META = {'HTTP_HOST': 'example.com:80'}
site = get_current_site(request)
self.assertEqual(site, s2)
# Host header with port - no match, fallback without port
request.META = {'HTTP_HOST': 'example.com:81'}
site = get_current_site(request)
self.assertEqual(site, s1)
# Host header with non-matching domain
request.META = {'HTTP_HOST': 'example.net'}
with self.assertRaises(ObjectDoesNotExist):
get_current_site(request)
# Ensure domain for RequestSite always matches host header
with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'}):
request.META = {'HTTP_HOST': 'example.com'}
site = get_current_site(request)
self.assertEqual(site.name, 'example.com')
request.META = {'HTTP_HOST': 'example.com:80'}
site = get_current_site(request)
self.assertEqual(site.name, 'example.com:80')
def test_domain_name_with_whitespaces(self):
# Regression for #17320
# Domain names are not allowed contain whitespace characters
site = Site(name="test name", domain="test test")
with self.assertRaises(ValidationError):
site.full_clean()
site.domain = "test\ttest"
with self.assertRaises(ValidationError):
site.full_clean()
site.domain = "test\ntest"
with self.assertRaises(ValidationError):
site.full_clean()
def test_clear_site_cache(self):
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
self.assertEqual(models.SITE_CACHE, {})
get_current_site(request)
expected_cache = {self.site.id: self.site}
self.assertEqual(models.SITE_CACHE, expected_cache)
with self.settings(SITE_ID=''):
get_current_site(request)
expected_cache.update({self.site.domain: self.site})
self.assertEqual(models.SITE_CACHE, expected_cache)
clear_site_cache(Site, instance=self.site, using='default')
self.assertEqual(models.SITE_CACHE, {})
@override_settings(SITE_ID='')
def test_clear_site_cache_domain(self):
site = Site.objects.create(name='example2.com', domain='example2.com')
request = HttpRequest()
request.META = {
"SERVER_NAME": "example2.com",
"SERVER_PORT": "80",
}
get_current_site(request) # prime the models.SITE_CACHE
expected_cache = {site.domain: site}
self.assertEqual(models.SITE_CACHE, expected_cache)
# Site exists in 'default' database so using='other' shouldn't clear.
clear_site_cache(Site, instance=site, using='other')
self.assertEqual(models.SITE_CACHE, expected_cache)
# using='default' should clear.
clear_site_cache(Site, instance=site, using='default')
self.assertEqual(models.SITE_CACHE, {})
def test_unique_domain(self):
site = Site(domain=self.site.domain)
msg = 'Site with this Domain name already exists.'
with self.assertRaisesMessage(ValidationError, msg):
site.validate_unique()
def test_site_natural_key(self):
self.assertEqual(Site.objects.get_by_natural_key(self.site.domain), self.site)
self.assertEqual(self.site.natural_key(), (self.site.domain,))
class JustOtherRouter(object):
def allow_migrate(self, db, app_label, **hints):
return db == 'other'
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'})
class CreateDefaultSiteTests(TestCase):
multi_db = True
def setUp(self):
self.app_config = apps.get_app_config('sites')
# Delete the site created as part of the default migration process.
Site.objects.all().delete()
def test_basic(self):
"""
#15346, #15573 - create_default_site() creates an example site only if
none exist.
"""
with captured_stdout() as stdout:
create_default_site(self.app_config)
self.assertEqual(Site.objects.count(), 1)
self.assertIn("Creating example.com", stdout.getvalue())
with captured_stdout() as stdout:
create_default_site(self.app_config)
self.assertEqual(Site.objects.count(), 1)
self.assertEqual("", stdout.getvalue())
@override_settings(DATABASE_ROUTERS=[JustOtherRouter()])
def test_multi_db_with_router(self):
"""
#16353, #16828 - The default site creation should respect db routing.
"""
create_default_site(self.app_config, using='default', verbosity=0)
create_default_site(self.app_config, using='other', verbosity=0)
self.assertFalse(Site.objects.using('default').exists())
self.assertTrue(Site.objects.using('other').exists())
def test_multi_db(self):
create_default_site(self.app_config, using='default', verbosity=0)
create_default_site(self.app_config, using='other', verbosity=0)
self.assertTrue(Site.objects.using('default').exists())
self.assertTrue(Site.objects.using('other').exists())
def test_save_another(self):
"""
#17415 - Another site can be created right after the default one.
On some backends the sequence needs to be reset after saving with an
explicit ID. Test that there isn't a sequence collisions by saving
another site. This test is only meaningful with databases that use
sequences for automatic primary keys such as PostgreSQL and Oracle.
"""
create_default_site(self.app_config, verbosity=0)
Site(domain='example2.com', name='example2.com').save()
def test_signal(self):
"""
#23641 - Sending the ``post_migrate`` signal triggers creation of the
default site.
"""
post_migrate.send(sender=self.app_config, app_config=self.app_config, verbosity=0)
self.assertTrue(Site.objects.exists())
@override_settings(SITE_ID=35696)
def test_custom_site_id(self):
"""
#23945 - The configured ``SITE_ID`` should be respected.
"""
create_default_site(self.app_config, verbosity=0)
self.assertEqual(Site.objects.get().pk, 35696)
@override_settings() # Restore original ``SITE_ID`` afterwards.
def test_no_site_id(self):
"""
#24488 - The pk should default to 1 if no ``SITE_ID`` is configured.
"""
del settings.SITE_ID
create_default_site(self.app_config, verbosity=0)
self.assertEqual(Site.objects.get().pk, 1)
class MiddlewareTest(TestCase):
def test_request(self):
""" Makes sure that the request has correct `site` attribute. """
middleware = CurrentSiteMiddleware()
request = HttpRequest()
middleware.process_request(request)
self.assertEqual(request.site.id, settings.SITE_ID)
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.format.policy_templates.writers.plist_strings_writer'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
import unittest
from grit.format.policy_templates.writers import writer_unittest_common
class PListStringsWriterUnittest(writer_unittest_common.WriterUnittestCommon):
'''Unit tests for PListStringsWriter.'''
def testEmpty(self):
# Test PListStringsWriter in case of empty polices.
grd = self.PrepareTest('''
{
'policy_definitions': [],
'placeholders': [],
'messages': {
'mac_chrome_preferences': {
'text': '$1 preferen"ces',
'desc': 'blah'
}
}
}''')
output = self.GetOutput(
grd,
'fr',
{'_chromium': '1', 'mac_bundle_id': 'com.example.Test'},
'plist_strings',
'en')
expected_output = (
'Chromium.pfm_title = "Chromium";\n'
'Chromium.pfm_description = "Chromium preferen\\"ces";')
self.assertEquals(output.strip(), expected_output.strip())
def testEmptyVersion(self):
# Test PListStringsWriter in case of empty polices.
grd = self.PrepareTest('''
{
'policy_definitions': [],
'placeholders': [],
'messages': {
'mac_chrome_preferences': {
'text': '$1 preferen"ces',
'desc': 'blah'
}
}
}''')
output = self.GetOutput(
grd,
'fr',
{'_chromium': '1',
'mac_bundle_id': 'com.example.Test',
'version': '39.0.0.0'},
'plist_strings',
'en')
expected_output = (
'/* chromium version: 39.0.0.0 */\n'
'Chromium.pfm_title = "Chromium";\n'
'Chromium.pfm_description = "Chromium preferen\\"ces";')
self.assertEquals(output.strip(), expected_output.strip())
def testMainPolicy(self):
# Tests a policy group with a single policy of type 'main'.
grd = self.PrepareTest('''
{
'policy_definitions': [
{
'name': 'MainGroup',
'type': 'group',
'caption': 'Caption of main.',
'desc': 'Description of main.',
'policies': [{
'name': 'MainPolicy',
'type': 'main',
'supported_on': ['chrome.mac:8-'],
'caption': 'Caption of main policy.',
'desc': 'Description of main policy.',
}],
},
],
'placeholders': [],
'messages': {
'mac_chrome_preferences': {
'text': 'Preferences of $1',
'desc': 'blah'
}
}
}''')
output = self.GetOutput(
grd,
'fr',
{'_google_chrome' : '1', 'mac_bundle_id': 'com.example.Test'},
'plist_strings',
'en')
expected_output = (
'Google_Chrome.pfm_title = "Google Chrome";\n'
'Google_Chrome.pfm_description = "Preferences of Google Chrome";\n'
'MainPolicy.pfm_title = "Caption of main policy.";\n'
'MainPolicy.pfm_description = "Description of main policy.";')
self.assertEquals(output.strip(), expected_output.strip())
def testStringPolicy(self):
# Tests a policy group with a single policy of type 'string'. Also test
# inheriting group description to policy description.
grd = self.PrepareTest('''
{
'policy_definitions': [
{
'name': 'StringGroup',
'type': 'group',
'caption': 'Caption of group.',
'desc': """Description of group.
With a newline.""",
'policies': [{
'name': 'StringPolicy',
'type': 'string',
'caption': 'Caption of policy.',
'desc': """Description of policy.
With a newline.""",
'supported_on': ['chrome.mac:8-'],
}],
},
],
'placeholders': [],
'messages': {
'mac_chrome_preferences': {
'text': 'Preferences of $1',
'desc': 'blah'
}
}
}''')
output = self.GetOutput(
grd,
'fr',
{'_chromium' : '1', 'mac_bundle_id': 'com.example.Test'},
'plist_strings',
'en')
expected_output = (
'Chromium.pfm_title = "Chromium";\n'
'Chromium.pfm_description = "Preferences of Chromium";\n'
'StringPolicy.pfm_title = "Caption of policy.";\n'
'StringPolicy.pfm_description = '
'"Description of policy.\\nWith a newline.";')
self.assertEquals(output.strip(), expected_output.strip())
def testStringListPolicy(self):
# Tests a policy group with a single policy of type 'list'.
grd = self.PrepareTest('''
{
'policy_definitions': [
{
'name': 'ListGroup',
'type': 'group',
'caption': '',
'desc': '',
'policies': [{
'name': 'ListPolicy',
'type': 'list',
'caption': 'Caption of policy.',
'desc': """Description of policy.
With a newline.""",
'schema': {
'type': 'array',
'items': { 'type': 'string' },
},
'supported_on': ['chrome.mac:8-'],
}],
},
],
'placeholders': [],
'messages': {
'mac_chrome_preferences': {
'text': 'Preferences of $1',
'desc': 'blah'
}
}
}''')
output = self.GetOutput(
grd,
'fr',
{'_chromium' : '1', 'mac_bundle_id': 'com.example.Test'},
'plist_strings',
'en')
expected_output = (
'Chromium.pfm_title = "Chromium";\n'
'Chromium.pfm_description = "Preferences of Chromium";\n'
'ListPolicy.pfm_title = "Caption of policy.";\n'
'ListPolicy.pfm_description = '
'"Description of policy.\\nWith a newline.";')
self.assertEquals(output.strip(), expected_output.strip())
def testStringEnumListPolicy(self):
# Tests a policy group with a single policy of type 'string-enum-list'.
grd = self.PrepareTest('''
{
'policy_definitions': [
{
'name': 'EnumGroup',
'type': 'group',
'caption': '',
'desc': '',
'policies': [{
'name': 'EnumPolicy',
'type': 'string-enum-list',
'caption': 'Caption of policy.',
'desc': """Description of policy.
With a newline.""",
'schema': {
'type': 'array',
'items': { 'type': 'string' },
},
'items': [
{
'name': 'ProxyServerDisabled',
'value': 'one',
'caption': 'Option1'
},
{
'name': 'ProxyServerAutoDetect',
'value': 'two',
'caption': 'Option2'
},
],
'supported_on': ['chrome.mac:8-'],
}],
},
],
'placeholders': [],
'messages': {
'mac_chrome_preferences': {
'text': 'Preferences of $1',
'desc': 'blah'
}
}
}''')
output = self.GetOutput(
grd,
'fr',
{'_chromium' : '1', 'mac_bundle_id': 'com.example.Test'},
'plist_strings',
'en')
expected_output = (
'Chromium.pfm_title = "Chromium";\n'
'Chromium.pfm_description = "Preferences of Chromium";\n'
'EnumPolicy.pfm_title = "Caption of policy.";\n'
'EnumPolicy.pfm_description = '
'"one - Option1\\ntwo - Option2\\n'
'Description of policy.\\nWith a newline.";')
self.assertEquals(output.strip(), expected_output.strip())
def testIntEnumPolicy(self):
# Tests a policy group with a single policy of type 'int-enum'.
grd = self.PrepareTest('''
{
'policy_definitions': [
{
'name': 'EnumGroup',
'type': 'group',
'desc': '',
'caption': '',
'policies': [{
'name': 'EnumPolicy',
'type': 'int-enum',
'desc': 'Description of policy.',
'caption': 'Caption of policy.',
'items': [
{
'name': 'ProxyServerDisabled',
'value': 0,
'caption': 'Option1'
},
{
'name': 'ProxyServerAutoDetect',
'value': 1,
'caption': 'Option2'
},
],
'supported_on': ['chrome.mac:8-'],
}],
},
],
'placeholders': [],
'messages': {
'mac_chrome_preferences': {
'text': '$1 preferences',
'desc': 'blah'
}
}
}''')
output = self.GetOutput(
grd,
'fr',
{'_google_chrome': '1', 'mac_bundle_id': 'com.example.Test2'},
'plist_strings',
'en')
expected_output = (
'Google_Chrome.pfm_title = "Google Chrome";\n'
'Google_Chrome.pfm_description = "Google Chrome preferences";\n'
'EnumPolicy.pfm_title = "Caption of policy.";\n'
'EnumPolicy.pfm_description = '
'"0 - Option1\\n1 - Option2\\nDescription of policy.";\n')
self.assertEquals(output.strip(), expected_output.strip())
def testStringEnumPolicy(self):
# Tests a policy group with a single policy of type 'string-enum'.
grd = self.PrepareTest('''
{
'policy_definitions': [
{
'name': 'EnumGroup',
'type': 'group',
'desc': '',
'caption': '',
'policies': [{
'name': 'EnumPolicy',
'type': 'string-enum',
'desc': 'Description of policy.',
'caption': 'Caption of policy.',
'items': [
{
'name': 'ProxyServerDisabled',
'value': 'one',
'caption': 'Option1'
},
{
'name': 'ProxyServerAutoDetect',
'value': 'two',
'caption': 'Option2'
},
],
'supported_on': ['chrome.mac:8-'],
}],
},
],
'placeholders': [],
'messages': {
'mac_chrome_preferences': {
'text': '$1 preferences',
'desc': 'blah'
}
}
}''')
output = self.GetOutput(
grd,
'fr',
{'_google_chrome': '1', 'mac_bundle_id': 'com.example.Test2'},
'plist_strings',
'en')
expected_output = (
'Google_Chrome.pfm_title = "Google Chrome";\n'
'Google_Chrome.pfm_description = "Google Chrome preferences";\n'
'EnumPolicy.pfm_title = "Caption of policy.";\n'
'EnumPolicy.pfm_description = '
'"one - Option1\\ntwo - Option2\\nDescription of policy.";\n')
self.assertEquals(output.strip(), expected_output.strip())
def testNonSupportedPolicy(self):
# Tests a policy that is not supported on Mac, so its strings shouldn't
# be included in the plist string table.
grd = self.PrepareTest('''
{
'policy_definitions': [
{
'name': 'NonMacGroup',
'type': 'group',
'caption': '',
'desc': '',
'policies': [{
'name': 'NonMacPolicy',
'type': 'string',
'caption': '',
'desc': '',
'supported_on': ['chrome_os:8-'],
}],
},
],
'placeholders': [],
'messages': {
'mac_chrome_preferences': {
'text': '$1 preferences',
'desc': 'blah'
}
}
}''')
output = self.GetOutput(
grd,
'fr',
{'_google_chrome': '1', 'mac_bundle_id': 'com.example.Test2'},
'plist_strings',
'en')
expected_output = (
'Google_Chrome.pfm_title = "Google Chrome";\n'
'Google_Chrome.pfm_description = "Google Chrome preferences";')
self.assertEquals(output.strip(), expected_output.strip())
if __name__ == '__main__':
unittest.main()
|
|
import testing as tm
import pytest
import xgboost as xgb
import numpy as np
from hypothesis import given, strategies, settings, note
exact_parameter_strategy = strategies.fixed_dictionaries({
'nthread': strategies.integers(1, 4),
'max_depth': strategies.integers(1, 11),
'min_child_weight': strategies.floats(0.5, 2.0),
'alpha': strategies.floats(0.0, 2.0),
'lambda': strategies.floats(1e-5, 2.0),
'eta': strategies.floats(0.01, 0.5),
'gamma': strategies.floats(0.0, 2.0),
'seed': strategies.integers(0, 10),
# We cannot enable subsampling as the training loss can increase
# 'subsample': strategies.floats(0.5, 1.0),
'colsample_bytree': strategies.floats(0.5, 1.0),
'colsample_bylevel': strategies.floats(0.5, 1.0),
})
hist_parameter_strategy = strategies.fixed_dictionaries({
'max_depth': strategies.integers(1, 11),
'max_leaves': strategies.integers(0, 1024),
'max_bin': strategies.integers(2, 512),
'grow_policy': strategies.sampled_from(['lossguide', 'depthwise']),
}).filter(lambda x: (x['max_depth'] > 0 or x['max_leaves'] > 0) and (
x['max_depth'] > 0 or x['grow_policy'] == 'lossguide'))
def train_result(param, dmat, num_rounds):
result = {}
xgb.train(param, dmat, num_rounds, [(dmat, 'train')], verbose_eval=False,
evals_result=result)
return result
class TestTreeMethod:
@given(exact_parameter_strategy, strategies.integers(1, 20),
tm.dataset_strategy)
@settings(deadline=None)
def test_exact(self, param, num_rounds, dataset):
param['tree_method'] = 'exact'
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)
assert tm.non_increasing(result['train'][dataset.metric])
@given(
exact_parameter_strategy,
hist_parameter_strategy,
strategies.integers(1, 20),
tm.dataset_strategy,
)
@settings(deadline=None)
def test_approx(self, param, hist_param, num_rounds, dataset):
param["tree_method"] = "approx"
param = dataset.set_params(param)
param.update(hist_param)
result = train_result(param, dataset.get_dmat(), num_rounds)
note(result)
assert tm.non_increasing(result["train"][dataset.metric])
@pytest.mark.skipif(**tm.no_sklearn())
def test_pruner(self):
import sklearn
params = {'tree_method': 'exact'}
cancer = sklearn.datasets.load_breast_cancer()
X = cancer['data']
y = cancer["target"]
dtrain = xgb.DMatrix(X, y)
booster = xgb.train(params, dtrain=dtrain, num_boost_round=10)
grown = str(booster.get_dump())
params = {'updater': 'prune', 'process_type': 'update', 'gamma': '0.2'}
booster = xgb.train(params, dtrain=dtrain, num_boost_round=10,
xgb_model=booster)
after_prune = str(booster.get_dump())
assert grown != after_prune
booster = xgb.train(params, dtrain=dtrain, num_boost_round=10,
xgb_model=booster)
second_prune = str(booster.get_dump())
# Second prune should not change the tree
assert after_prune == second_prune
@given(exact_parameter_strategy, hist_parameter_strategy, strategies.integers(1, 20),
tm.dataset_strategy)
@settings(deadline=None)
def test_hist(self, param, hist_param, num_rounds, dataset):
param['tree_method'] = 'hist'
param = dataset.set_params(param)
param.update(hist_param)
result = train_result(param, dataset.get_dmat(), num_rounds)
note(result)
assert tm.non_increasing(result['train'][dataset.metric])
def test_hist_categorical(self):
# hist must be same as exact on all-categorial data
dpath = 'demo/data/'
ag_dtrain = xgb.DMatrix(dpath + 'agaricus.txt.train')
ag_dtest = xgb.DMatrix(dpath + 'agaricus.txt.test')
ag_param = {'max_depth': 2,
'tree_method': 'hist',
'eta': 1,
'verbosity': 0,
'objective': 'binary:logistic',
'eval_metric': 'auc'}
hist_res = {}
exact_res = {}
xgb.train(ag_param, ag_dtrain, 10,
[(ag_dtrain, 'train'), (ag_dtest, 'test')],
evals_result=hist_res)
ag_param["tree_method"] = "exact"
xgb.train(ag_param, ag_dtrain, 10,
[(ag_dtrain, 'train'), (ag_dtest, 'test')],
evals_result=exact_res)
assert hist_res['train']['auc'] == exact_res['train']['auc']
assert hist_res['test']['auc'] == exact_res['test']['auc']
@pytest.mark.skipif(**tm.no_sklearn())
def test_hist_degenerate_case(self):
# Test a degenerate case where the quantile sketcher won't return any
# quantile points for a particular feature (the second feature in
# this example). Source: https://github.com/dmlc/xgboost/issues/2943
nan = np.nan
param = {'missing': nan, 'tree_method': 'hist'}
model = xgb.XGBRegressor(**param)
X = np.array([[6.18827160e+05, 1.73000000e+02], [6.37345679e+05, nan],
[6.38888889e+05, nan], [6.28086420e+05, nan]])
y = [1000000., 0., 0., 500000.]
w = [0, 0, 1, 0]
model.fit(X, y, sample_weight=w)
def run_invalid_category(self, tree_method: str) -> None:
rng = np.random.default_rng()
# too large
X = rng.integers(low=0, high=4, size=1000).reshape(100, 10)
y = rng.normal(loc=0, scale=1, size=100)
X[13, 7] = np.iinfo(np.int32).max + 1
# Check is performed during sketching.
Xy = xgb.DMatrix(X, y, feature_types=["c"] * 10)
with pytest.raises(ValueError):
xgb.train({"tree_method": tree_method}, Xy)
X[13, 7] = 16777216
Xy = xgb.DMatrix(X, y, feature_types=["c"] * 10)
with pytest.raises(ValueError):
xgb.train({"tree_method": tree_method}, Xy)
# mixed positive and negative values
X = rng.normal(loc=0, scale=1, size=1000).reshape(100, 10)
y = rng.normal(loc=0, scale=1, size=100)
Xy = xgb.DMatrix(X, y, feature_types=["c"] * 10)
with pytest.raises(ValueError):
xgb.train({"tree_method": tree_method}, Xy)
if tree_method == "gpu_hist":
import cupy as cp
X, y = cp.array(X), cp.array(y)
with pytest.raises(ValueError):
Xy = xgb.DeviceQuantileDMatrix(X, y, feature_types=["c"] * 10)
def test_invalid_category(self) -> None:
self.run_invalid_category("approx")
def run_categorical_basic(self, rows, cols, rounds, cats, tree_method):
onehot, label = tm.make_categorical(rows, cols, cats, True)
cat, _ = tm.make_categorical(rows, cols, cats, False)
by_etl_results = {}
by_builtin_results = {}
predictor = "gpu_predictor" if tree_method == "gpu_hist" else None
# Use one-hot exclusively
parameters = {
"tree_method": tree_method, "predictor": predictor, "max_cat_to_onehot": 9999
}
m = xgb.DMatrix(onehot, label, enable_categorical=False)
xgb.train(
parameters,
m,
num_boost_round=rounds,
evals=[(m, "Train")],
evals_result=by_etl_results,
)
m = xgb.DMatrix(cat, label, enable_categorical=True)
xgb.train(
parameters,
m,
num_boost_round=rounds,
evals=[(m, "Train")],
evals_result=by_builtin_results,
)
# There are guidelines on how to specify tolerance based on considering output as
# random variables. But in here the tree construction is extremely sensitive to
# floating point errors. An 1e-5 error in a histogram bin can lead to an entirely
# different tree. So even though the test is quite lenient, hypothesis can still
# pick up falsifying examples from time to time.
np.testing.assert_allclose(
np.array(by_etl_results["Train"]["rmse"]),
np.array(by_builtin_results["Train"]["rmse"]),
rtol=1e-3,
)
assert tm.non_increasing(by_builtin_results["Train"]["rmse"])
by_grouping: xgb.callback.TrainingCallback.EvalsLog = {}
parameters["max_cat_to_onehot"] = 1
parameters["reg_lambda"] = 0
m = xgb.DMatrix(cat, label, enable_categorical=True)
xgb.train(
parameters,
m,
num_boost_round=rounds,
evals=[(m, "Train")],
evals_result=by_grouping,
)
rmse_oh = by_builtin_results["Train"]["rmse"]
rmse_group = by_grouping["Train"]["rmse"]
# always better or equal to onehot when there's no regularization.
for a, b in zip(rmse_oh, rmse_group):
assert a >= b
parameters["reg_lambda"] = 1.0
by_grouping = {}
xgb.train(
parameters,
m,
num_boost_round=32,
evals=[(m, "Train")],
evals_result=by_grouping,
)
assert tm.non_increasing(by_grouping["Train"]["rmse"]), by_grouping
@given(strategies.integers(10, 400), strategies.integers(3, 8),
strategies.integers(1, 2), strategies.integers(4, 7))
@settings(deadline=None)
@pytest.mark.skipif(**tm.no_pandas())
def test_categorical(self, rows, cols, rounds, cats):
self.run_categorical_basic(rows, cols, rounds, cats, "approx")
self.run_categorical_basic(rows, cols, rounds, cats, "hist")
|
|
#! /usr/bin/env python
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from __future__ import print_function
import sys, os
import logging
from ginga import AstroImage, colors
import ginga.toolkit as ginga_toolkit
from ginga.canvas.CanvasObject import get_canvas_types
from ginga.util.toolbox import ModeIndicator
from ginga.misc import log
class FitsViewer(object):
def __init__(self, logger):
self.logger = logger
self.drawcolors = colors.get_colors()
self.dc = get_canvas_types()
from ginga.gw import Widgets, Viewers
self.app = Widgets.Application(logger=logger)
#self.app.add_callback('shutdown', self.quit)
self.top = self.app.make_window("Ginga example2")
self.top.add_callback('close', self.closed)
vbox = Widgets.VBox()
vbox.set_border_width(2)
vbox.set_spacing(1)
hbox = Widgets.HBox()
hbox.set_border_width(2)
hbox.set_spacing(4)
v1 = Viewers.CanvasView(logger)
v1.enable_autocuts('on')
v1.set_autocut_params('zscale')
v1.enable_autozoom('on')
v1.set_zoom_algorithm('rate')
v1.set_zoomrate(1.4)
v1.show_pan_mark(True)
v1.set_callback('drag-drop', self.drop_file)
v1.set_callback('none-move', self.motion)
v1.set_bg(0.2, 0.2, 0.2)
v1.ui_setActive(True)
v1.set_name('tweedledee')
self.viewer1 = v1
self._mi1 = ModeIndicator(v1)
bd = v1.get_bindings()
bd.enable_all(True)
# shared canvas between the two viewers
canvas = self.dc.DrawingCanvas()
canvas.enable_draw(True)
canvas.enable_edit(True)
canvas.set_drawtype('rectangle', color='lightblue')
self.canvas = canvas
# Tell viewer1 to use this canvas
v1.set_canvas(canvas)
self.drawtypes = canvas.get_drawtypes()
self.drawtypes.sort()
v1.set_desired_size(300, 300)
iw = Viewers.GingaViewerWidget(viewer=v1)
hbox.add_widget(iw, stretch=1)
# Add a second viewer viewing the same canvas
v2 = Viewers.CanvasView(logger)
v2.enable_autocuts('on')
v2.set_autocut_params('zscale')
v2.enable_autozoom('on')
v2.set_zoom_algorithm('rate')
v2.set_zoomrate(1.4)
v2.show_pan_mark(True)
v2.set_callback('drag-drop', self.drop_file)
v2.set_callback('none-move', self.motion)
v2.set_bg(0.2, 0.2, 0.2)
v2.ui_setActive(True)
v1.set_name('tweedledum')
self.viewer2 = v2
self._mi2 = ModeIndicator(v2)
# Tell viewer2 to use this same canvas
v2.set_canvas(canvas)
bd = v2.get_bindings()
bd.enable_all(True)
v2.set_desired_size(300, 300)
iw = Viewers.GingaViewerWidget(viewer=v2)
hbox.add_widget(iw, stretch=1)
vbox.add_widget(hbox, stretch=1)
self.readout = Widgets.Label("")
vbox.add_widget(self.readout, stretch=0)
hbox = Widgets.HBox()
hbox.set_border_width(2)
wdrawtype = Widgets.ComboBox()
for name in self.drawtypes:
wdrawtype.append_text(name)
index = self.drawtypes.index('rectangle')
wdrawtype.set_index(index)
wdrawtype.add_callback('activated', lambda w, idx: self.set_drawparams())
self.wdrawtype = wdrawtype
wdrawcolor = Widgets.ComboBox()
for name in self.drawcolors:
wdrawcolor.append_text(name)
index = self.drawcolors.index('lightblue')
wdrawcolor.set_index(index)
wdrawcolor.add_callback('activated', lambda w, idx: self.set_drawparams())
self.wdrawcolor = wdrawcolor
wfill = Widgets.CheckBox("Fill")
wfill.add_callback('activated', lambda w, tf: self.set_drawparams())
self.wfill = wfill
walpha = Widgets.SpinBox(dtype=float)
walpha.set_limits(0.0, 1.0, incr_value=0.1)
walpha.set_value(1.0)
walpha.set_decimals(2)
walpha.add_callback('value-changed', lambda w, val: self.set_drawparams())
self.walpha = walpha
wclear = Widgets.Button("Clear Canvas")
wclear.add_callback('activated', lambda w: self.clear_canvas())
wopen = Widgets.Button("Open File")
wopen.add_callback('activated', lambda w: self.open_file())
wquit = Widgets.Button("Quit")
wquit.add_callback('activated', lambda w: self.quit())
hbox.add_widget(Widgets.Label(''), stretch=1)
for w in (wopen, wdrawtype, wdrawcolor, wfill,
Widgets.Label('Alpha:'), walpha, wclear, wquit):
hbox.add_widget(w, stretch=0)
vbox.add_widget(hbox, stretch=0)
self.top.set_widget(vbox)
def set_drawparams(self):
index = self.wdrawtype.get_index()
kind = self.drawtypes[index]
index = self.wdrawcolor.get_index()
fill = self.wfill.get_state()
alpha = self.walpha.get_value()
coord = 'data'
params = { 'color': self.drawcolors[index],
'alpha': alpha,
'coord': coord,
}
if kind in ('circle', 'rectangle', 'polygon', 'triangle',
'righttriangle', 'ellipse', 'square', 'box'):
params['fill'] = fill
params['fillalpha'] = alpha
self.canvas.set_drawtype(kind, **params)
def clear_canvas(self):
self.canvas.deleteAllObjects()
def load_file(self, viewer, filepath):
image = AstroImage.AstroImage(logger=self.logger)
image.load_file(filepath)
viewer.set_image(image)
self.top.set_title(filepath)
def open_file(self):
res = Widgets.FileDialog.getOpenFileName(self, "Open FITS file",
".", "FITS files (*.fits)")
if isinstance(res, tuple):
fileName = res[0]
else:
fileName = str(res)
if len(fileName) != 0:
self.load_file(self.viewer1, fileName)
def drop_file(self, viewer, paths):
fileName = paths[0]
#print(fileName)
self.load_file(viewer, fileName)
def motion(self, viewer, button, data_x, data_y):
# Get the value under the data coordinates
try:
#value = viewer.get_data(data_x, data_y)
# We report the value across the pixel, even though the coords
# change halfway across the pixel
value = viewer.get_data(int(data_x+0.5), int(data_y+0.5))
except Exception:
value = None
fits_x, fits_y = data_x + 1, data_y + 1
# Calculate WCS RA
try:
# NOTE: image function operates on DATA space coords
image = viewer.get_image()
if image is None:
# No image loaded
return
ra_txt, dec_txt = image.pixtoradec(fits_x, fits_y,
format='str', coords='fits')
except Exception as e:
self.logger.warn("Bad coordinate conversion: %s" % (
str(e)))
ra_txt = 'BAD WCS'
dec_txt = 'BAD WCS'
text = "RA: %s DEC: %s X: %.2f Y: %.2f Value: %s" % (
ra_txt, dec_txt, fits_x, fits_y, value)
self.readout.set_text(text)
def closed(self, w):
self.logger.info("Top window closed.")
self.top = None
sys.exit()
def quit(self, *args):
self.logger.info("Attempting to shut down the application...")
if not self.top is None:
self.top.close()
sys.exit()
def mainloop(self):
while True:
self.app.process_events()
def main(options, args):
logger = log.get_logger("example2", options=options)
if options.toolkit is None:
logger.error("Please choose a GUI toolkit with -t option")
# decide our toolkit, then import
ginga_toolkit.use(options.toolkit)
viewer = FitsViewer(logger)
viewer.top.resize(700, 540)
if len(args) > 0:
viewer.load_file(viewer.viewer1, args[0])
viewer.top.show()
viewer.top.raise_()
try:
viewer.mainloop()
except KeyboardInterrupt:
print("Terminating viewer...")
if viewer.top is not None:
viewer.top.close()
if __name__ == "__main__":
# Parse command line options with nifty optparse module
from optparse import OptionParser
usage = "usage: %prog [options] cmd [args]"
optprs = OptionParser(usage=usage, version=('%%prog'))
optprs.add_option("--debug", dest="debug", default=False, action="store_true",
help="Enter the pdb debugger on main()")
optprs.add_option("--log", dest="logfile", metavar="FILE",
help="Write logging output to FILE")
optprs.add_option("--loglevel", dest="loglevel", metavar="LEVEL",
type='int', default=logging.INFO,
help="Set logging level to LEVEL")
optprs.add_option("--stderr", dest="logstderr", default=False,
action="store_true",
help="Copy logging also to stderr")
optprs.add_option("-t", "--toolkit", dest="toolkit", metavar="NAME",
default='qt',
help="Choose GUI toolkit (gtk|qt)")
optprs.add_option("--profile", dest="profile", action="store_true",
default=False,
help="Run the profiler on main()")
(options, args) = optprs.parse_args(sys.argv[1:])
# Are we debugging this?
if options.debug:
import pdb
pdb.run('main(options, args)')
# Are we profiling this?
elif options.profile:
import profile
print(("%s profile:" % sys.argv[0]))
profile.run('main(options, args)')
else:
main(options, args)
# END
|
|
#------------------------------------------------------------------------------
# setlx2py: setlx_parser.py
#
# Parser class: parser for the setlx language
#
# Copyright (C) 2013, Jan-Christoph Klie
# License: Apache v2
#------------------------------------------------------------------------------
from ply import yacc
from setlx2py.setlx_lexer import Lexer
from setlx2py.setlx_ast import *
from setlx2py.setlx_semcheck import *
class Parser():
tokens = Lexer.tokens
def __init__(self, yacc_optimize=True):
# Lexer
self.lexer = Lexer()
self.lexer.build()
# Parser
self.parser = yacc.yacc(module=self,
start='file_input',
optimize=yacc_optimize)
def parse(self, text):
self.lexer.reset()
return self.parser.parse(input=text, lexer=self.lexer)
def p_error(self, t):
if t is None:
raise SyntaxError("unexpected token", self.lexer, None)
else:
msg = "unexpected token: '{}' - Line: {} - Pos: {}" \
.format(t.value, t.lineno, t.lexpos)
raise SyntaxError(msg, self.lexer, t.value, t.lineno, t.lexpos)
##
## Precedence and associativity of operators
##
precedence = (
)
##
## Grammar productions
## Implementation of the BNF defined in Pure.g of setlx interpreter
##
def p_file_input_1(self, p):
""" file_input : statement_list """
p[0] = FileAST([]) if p[1] is None else FileAST(p[1])
def p_file_input_2(self, p):
""" file_input : expression """
p[0] = p[1]
def p_epsilon(self, p):
""" epsilon : """
p[0] = None
##
## Statements
##
def p_statement_list_1(self, p):
""" statement_list : statement """
p[0] = p[1]
def p_statement_list_2(self, p):
""" statement_list : statement_list statement """
if p[2] is not None:
p[1].extend(p[2])
p[0] = p[1]
def p_statement(self, p):
""" statement : simple_statement SEMICOLON
| compound_statement
"""
p[0] = [p[1]]
####
##
## Simple statements
##
####
def p_simple_statement(self, p):
""" simple_statement : expression_statement
| assert_statement
| assignment_statement
| augmented_assign_statement
| backtrack_statement
| break_statement
| continue_statement
| exit_statement
| return_statement
"""
p[0] = p[1]
def p_expression_statement(self, p):
""" expression_statement : expression """
p[0] = p[1]
def p_backtrack_statement(self, p):
""" backtrack_statement : BACKTRACK """
p[0] = Backtrack()
def p_break_statement(self, p):
""" break_statement : BREAK """
p[0] = Break()
def p_continue_statement(self, p):
""" continue_statement : CONTINUE """
p[0] = Continue()
def p_exit_statement(self, p):
""" exit_statement : EXIT """
p[0] = Exit()
def p_return_statement_1(self, p):
""" return_statement : RETURN """
p[0] = Return(None)
def p_return_statement_2(self, p):
""" return_statement : RETURN expression """
p[0] = Return(p[2], p[2].coord)
##
## Expressions
##
def p_expression_list_1(self, p):
""" expression_list : expression """
p[0] = p[1]
def p_expression_list_2(self, p):
""" expression_list : expression_list COMMA expression """
if not isinstance(p[1], ExprList):
p[1] = ExprList([p[1]], p[1].coord)
p[1].exprs.append(p[3])
p[0] = p[1]
def p_expression_1(self, p):
""" expression : implication
| lambda_definition
"""
p[0] = p[1]
def p_expression_2(self, p):
""" expression : implication EQUIVALENT implication
| implication ANTIVALENT implication
"""
p[0] = BinaryOp(p[2], p[1], p[3], p[1].coord)
# Implication
def p_implication_1(self, p):
""" implication : disjunction """
p[0] = p[1]
def p_implication_2(self, p):
""" implication : disjunction IMPLICATES disjunction """
p[0] = BinaryOp(p[2], p[1], p[3], p[1].coord)
# Disjunction
def p_disjunction_1(self, p):
""" disjunction : conjunction """
p[0] = p[1]
def p_disjunction_2(self, p):
""" disjunction : disjunction OR conjunction """
p[0] = BinaryOp(p[2], p[1], p[3], p[1].coord)
# Conjunction
def p_conjunction_1(self, p):
""" conjunction : comparison """
p[0] = p[1]
def p_conjunction_2(self, p):
""" conjunction : conjunction AND comparison """
p[0] = BinaryOp(p[2], p[1], p[3], p[1].coord)
# Comparison
def p_comparison_1(self, p):
""" comparison : sum """
p[0] = p[1]
def p_comparison_2(self, p):
""" comparison : sum EQ sum
| sum NEQ sum
| sum LT sum
| sum LE sum
| sum GT sum
| sum GE sum
| sum IN sum
| sum NOTIN sum
"""
p[0] = BinaryOp(p[2], p[1], p[3], p[1].coord)
# Sum
def p_sum_1(self, p):
""" sum : product """
p[0] = p[1]
def p_sum_2(self, p):
""" sum : sum PLUS product
| sum MINUS product
"""
p[0] = BinaryOp(p[2], p[1], p[3], p[1].coord)
# Product
def p_product_1(self, p):
""" product : reduce """
p[0] = p[1]
def p_product_2(self, p):
""" product : product TIMES reduce
| product DIVIDE reduce
| product IDIVIDE reduce
| product MOD reduce
| product CARTESIAN reduce
"""
p[0] = BinaryOp(p[2], p[1], p[3], p[1].coord)
# Reduce
def p_reduce_1(self, p):
""" reduce : unary_expression """
p[0] = p[1]
def p_reduce_2(self, p):
""" reduce : reduce SUM unary_expression
| reduce PRODUCT unary_expression
"""
p[0] = BinaryOp(p[2], p[1], p[3], p[1].coord)
# Unary expression
def p_unary_expression_1(self, p):
""" unary_expression : power """
p[0] = p[1]
def p_unary_expression_2(self, p):
""" unary_expression : SUM unary_expression
| PRODUCT unary_expression
| HASH unary_expression
| MINUS unary_expression
| AT unary_expression
"""
p[0] = UnaryOp(p[1], p[2], p[2].coord)
def p_unary_expression_3(self, p):
""" unary_expression : BANG unary_expression """
p[0] = UnaryOp('not', p[2], p[2].coord)
def p_unary_expression_4(self, p):
""" unary_expression : quantor
| term
"""
p[0] = p[1]
def p_power_1(self, p):
""" power : primary """
p[0] = p[1]
def p_power_2(self, p):
""" power : primary POW power """
p[0] = BinaryOp(p[2], p[1], p[3], p[1].coord)
## Primary
def p_primary_1(self, p):
""" primary : atom
| attributeref
| subscription
| slicing
| procedure
| call
"""
p[0] = p[1]
def p_primary_2(self, p):
""" primary : primary BANG """
p[0] = UnaryOp('fac', p[1], p[1].coord)
# Atom
def p_atom(self, p):
""" atom : identifier
| literal
| enclosure
"""
p[0] = p[1]
def p_variable(self, p):
""" identifier : IDENTIFIER
| UNUSED
"""
p[0] = Identifier(p[1])
# Attribute Ref
def p_attributeref(self, p):
""" attributeref : primary DOT identifier """
p[0] = AttributeRef(p[1], p[3], p[1].coord)
# Subscription
def p_subscription(self, p):
""" subscription : primary LBRACKET expression RBRACKET """
p[0] = Subscription(p[1], p[3], p[1].coord)
# Slicing
def p_slicing(self, p):
""" slicing : primary LBRACKET lower_bound RANGE upper_bound RBRACKET """
p[0] = Slice(p[1], p[3], p[5])
def p_lower_bound(self, p):
""" lower_bound : expression
| epsilon
"""
p[0] = p[1]
def p_upper_bound(self, p):
""" upper_bound : expression
| epsilon
"""
p[0] = p[1]
##
## Literals
##
def p_literal(self, p):
""" literal : stringliteral
| integer
| floatnumber
| boolean
"""
p[0] = p[1]
# String constants
def p_string_literal_1(self, p):
""" stringliteral : STRING """
p[0] = Constant('string', str(p[1]))
def p_string_literal_2(self, p):
""" stringliteral : LITERAL """
p[0] = Constant('literal', str(p[1]))
def p_string_literal_3(self, p):
""" stringliteral : INTERPOLATION """
lit = Constant('literal', str(p[1]))
lst = ExprList([])
p[0] = Interpolation(lit, lst)
# Numerical constants
def p_integer(self, p):
""" integer : INTEGER """
p[0] = Constant('int', int(p[1]))
def p_floatnumber(self, p):
""" floatnumber : DOUBLE """
p[0] = Constant('double', float(p[1]))
def p_boolean_1(self, p):
""" boolean : TRUE """
p[0] = Constant('bool', True)
def p_boolean_2(self, p):
""" boolean : FALSE """
p[0] = Constant('bool', False)
##
## Enclosures
##
def p_enclosure(self, p):
""" enclosure : set_range
| set_display
| set_comprehension
| list_range
| list_display
| list_comprehension
| parenth_form
"""
p[0] = p[1]
def p_parenth_form(self, p):
""" parenth_form : LPAREN expression RPAREN """
p[0] = p[2]
##
## Comprehension
##
def p_comprehension_condition_1(self, p):
""" comprehension_condition : PIPE expression """
p[0] = p[2]
def p_comprehension_condition_2(self, p):
""" comprehension_condition : epsilon """
p[0] = None
# Set comprehension
def p_set_comprehension(self, p):
""" set_comprehension : LBRACE expression COLON \
iterator_chain comprehension_condition RBRACE
"""
p[0] = Comprehension('set', p[2], p[4], p[5], p[2].coord)
# List comprehension
def p_list_comprehension(self, p):
""" list_comprehension : LBRACKET expression COLON \
iterator_chain comprehension_condition RBRACKET
"""
p[0] = Comprehension('list', p[2], p[4], p[5], p[2].coord)
##
## Range
##
# Set range
def p_set_range_1(self, p):
""" set_range : LBRACE expression RANGE expression RBRACE """
p[0] = Range('set', p[2], None, p[4], p[2].coord)
def p_set_range_2(self, p):
""" set_range : LBRACE expression \
COMMA expression RANGE expression RBRACE
"""
p[0] = Range('set', p[2], p[4], p[6], p[2].coord)
# List Range
def p_list_range_1(self, p):
""" list_range : LBRACKET expression RANGE expression RBRACKET """
p[0] = Range('list', p[2], None, p[4], p[2].coord)
def p_list_range_2(self, p):
""" list_range : LBRACKET expression \
COMMA expression RANGE expression RBRACKET """
p[0] = Range('list', p[2], p[4], p[6], p[2].coord)
##
## Displays
##
# Set Display
def p_set_display_1(self, p):
""" set_display : LBRACE expression RBRACE """
p[0] = Set([p[2]], p[2].coord)
def p_set_display_2(self, p):
""" set_display : LBRACE expression COMMA argument_list RBRACE """
lst = p[4].arguments
expr = p[2]
lst.insert(0, expr)
p[0] = Set(lst, expr.coord)
def p_set_display_3(self, p):
""" set_display : LBRACE RBRACE """
p[0] = Set([])
def p_set_display_4(self, p):
""" set_display : LBRACE expression PIPE expression RBRACE """
p[0] = Pattern(p[2], p[4], p[2].coord)
def p_set_display_5(self, p):
""" set_display : LBRACE expression COMMA argument_list PIPE expression RBRACE """
lst = p[4].arguments
expr = p[2]
lst.insert(0, expr)
p[2] = ExprList(lst)
p[0] = Pattern(p[2], p[6], p[2].coord)
# List Display
def p_list_display_1(self, p):
""" list_display : LBRACKET expression RBRACKET """
p[0] = List([p[2]], p[2].coord)
def p_list_display_2(self, p):
""" list_display : LBRACKET expression COMMA argument_list RBRACKET """
lst = p[4].arguments
expr = p[2]
lst.insert(0, expr)
p[0] = List(lst, expr.coord)
def p_list_display_3(self, p):
""" list_display : LBRACKET RBRACKET """
p[0] = List([])
def p_list_display_4(self, p):
""" list_display : LBRACKET expression PIPE expression RBRACKET """
p[0] = Pattern(p[2], p[4], p[2].coord)
def p_list_display_5(self, p):
""" list_display : LBRACKET expression COMMA argument_list PIPE expression RBRACKET """
lst = p[4].arguments
expr = p[2]
lst.insert(0, expr)
p[2] = ExprList(lst)
p[0] = Pattern(p[2], p[6], p[2].coord)
##
## Lambda Definitions
##
def p_lambda_definition(self, p):
""" lambda_definition : lambda_parameters LAMBDADEF expression """
p[0] = Lambda(p[1], p[3], p[1].coord)
def p_lambda_parameters_1(self, p):
""" lambda_parameters : identifier """
param = p[1]
p[0] = ParamList([param], p[1].coord)
def p_lambda_parameters_2(self, p):
""" lambda_parameters : list_display """
lst = p[1].items
params = ParamList(lst, p[1].coord)
check_lambda(params)
p[0] = params
##
## Assignment Statement
##
# TODO : recursive assignment
def p_assignment_statement(self, p):
""" assignment_statement : target ASSIGN expression """
p[0] = Assignment(p[2], p[1], p[3], p[3].coord)
def p_target(self, p):
""" target : expression """
ast = p[1]
check_target(ast)
p[0] = p[1]
##
## Augmented Assignment Statement
##
def p_augmented_assign_statement(self, p):
""" augmented_assign_statement : augtarget augop expression """
p[0] = Assignment(p[2], p[1], p[3], p[3].coord)
def p_augtarget(self, p):
""" augtarget : identifier
| attributeref
| subscription
"""
p[0] = p[1]
def p_augop(self, p):
""" augop : PLUS_EQUAL
| MINUS_EQUAL
| TIMES_EQUAL
| DIVIDE_EQUAL
| IDIVIDE_EQUAL
| MOD_EQUAL
"""
p[0] = p[1]
##
## Assert
##
def p_assert_statement(self, p):
""" assert_statement : ASSERT LPAREN expression COMMA expression RPAREN """
p[0] = Assert(p[3], p[5], p[3].coord)
##
## Term
##
def p_term(self, p):
""" term : TERM LPAREN argument_list RPAREN """
p[0] = Term(p[1], p[3], p[3].coord)
def p_term_2(self, p):
""" term : TERM LPAREN RPAREN """
lst = ArgumentList([])
p[0] = Term(p[1], lst)
##
## Procedures
##
def p_procedure_1(self, p):
""" procedure : PROCEDURE LPAREN parameter_list RPAREN \
LBRACE block RBRACE
"""
p[0] = Procedure('', 'vanilla', p[3], p[6], p[6].coord)
def p_procedure_2(self, p):
""" procedure : CPROCEDURE LPAREN parameter_list RPAREN \
LBRACE block RBRACE
"""
p[0] = Procedure('', 'cached', p[3], p[6], p[6].coord)
def p_parameter_list_1(self, p):
""" parameter_list : params """
p[0] = p[1]
def p_parameter_list_2(self, p):
""" parameter_list : epsilon """
p[0] = ParamList([])
def p_params(self, p):
""" params : procedure_param
| params COMMA procedure_param
"""
if len(p) == 2: # single parameter
p[0] = ParamList([p[1]], p[1].coord)
else:
p[1].params.append(p[3])
p[0] = p[1]
def p_procedure_param(self, p):
""" procedure_param : identifier """
p[0] = Param(p[1].name, p[1].coord)
##
## Call
##
def p_call(self, p):
""" call : primary LPAREN argument_list RPAREN
| primary LPAREN RPAREN
"""
argumentlist = p[3] if len(p) == 5 else ArgumentList([])
p[0] = Call(p[1], argumentlist, p[1].coord)
def p_argument_list(self, p):
""" argument_list : expression
| argument_list COMMA expression
"""
if len(p) == 2: # single expr
p[0] = ArgumentList([p[1]], p[1].coord)
else:
p[1].arguments.append(p[3])
p[0] = p[1]
##
## Quantor
##
def p_quantor_1(self, p):
""" quantor : FORALL LPAREN iterator_chain PIPE expression RPAREN """
p[0] = Quantor('all', p[3], p[5], p[3].coord)
def p_quantor_2(self, p):
""" quantor : EXISTS LPAREN iterator_chain PIPE expression RPAREN """
p[0] = Quantor('any', p[3], p[5], p[3].coord)
##
## Iterator
##
def p_iterator(self, p):
""" iterator : comparison """
ast = p[1]
check_iterator(ast)
p[0] = Iterator(ast.left, ast.right, ast.coord)
def p_iterator_chain_1(self, p):
""" iterator_chain : iterator """
p[0] = p[1]
def p_iterator_chain_2(self, p):
""" iterator_chain : iterator_chain COMMA iterator """
if not isinstance(p[1], IteratorChain):
p[1] = IteratorChain('', [p[1]], p[1].coord)
p[1].iterators.append(p[3])
p[0] = p[1]
####
##
## Compound statement
##
####
def p_compound_statement(self, p):
""" compound_statement : if_statement
| switch_statement
| match_statement
| scan_statement
| while_loop
| do_while_loop
| for_loop
| class
| try_statement
| check_statement
"""
p[0] = p[1]
def p_block(self, p):
""" block : statement_list
| epsilon
"""
if p[1] is None:
p[0] = Block([])
else:
p[0] = Block(p[1])
##
## If Statements
##
def p_if_statement_1(self, p):
""" if_statement : IF LPAREN expression RPAREN LBRACE block RBRACE """
p[0] = If(p[3], p[6], None, p[3].coord)
def p_if_statement_2(self, p):
""" if_statement : IF LPAREN expression RPAREN \
LBRACE block RBRACE \
ELSE LBRACE block RBRACE """
p[0] = If(p[3], p[6], p[10], p[3].coord)
def p_if_statement_3(self, p):
""" if_statement : IF LPAREN expression RPAREN \
LBRACE block RBRACE \
ELSE if_statement """
p[0] = If(p[3], p[6], p[9], p[3].coord)
##
## Switch Statement
##
def p_switch_statement(self, p):
""" switch_statement : SWITCH LBRACE case_statements default_case RBRACE """
p[0] = Switch(p[3], p[4], p[3].coord)
def p_case_statements(self, p):
""" case_statements : case_list
| epsilon
"""
if p[1] is None:
p[0] = CaseList([])
else:
p[0] = CaseList(p[1])
def p_case_list_1(self, p):
""" case_list : case_statement """
p[0] = [p[1]]
def p_case_list_2(self, p):
""" case_list : case_list case_statement """
if p[2] is not None:
p[1].append(p[2])
p[0] = p[1]
def p_case_statement(self, p):
""" case_statement : CASE expression COLON block """
p[0] = Case(p[2], p[4], p[2].coord)
def p_default_case_1(self, p):
""" default_case : DEFAULT COLON block """
p[0] = Default(p[3], p[3].coord)
def p_default_case_2(self, p):
""" default_case : epsilon """
p[0] = None
##
## Match
##
def p_match_statement(self, p):
""" match_statement : MATCH LPAREN expression RPAREN \
LBRACE match_list default_case RBRACE
"""
if not isinstance(p[6], CaseList):
p[6] = CaseList([p[6]], p[3].coord)
p[0] = Match(p[3], p[6], p[7], p[3].coord)
def p_match_list_1(self, p):
""" match_list : matchee """
p[0] = p[1]
def p_match_list_2(self, p):
""" match_list : match_list matchee """
if not isinstance(p[1], CaseList):
p[1] = CaseList([p[1]], p[1].coord)
p[1].cases.append(p[2])
p[0] = p[1]
def p_matche(self, p):
""" matchee : match_case
| regex_branch
"""
p[0] = p[1]
def p_match_case(self, p):
""" match_case : CASE expression case_condition COLON block """
p[0] = MatchCase(p[2], p[3], p[5], p[2].coord)
# Regex case
def p_regex_branch(self, p):
""" regex_branch : REGEX expression as case_condition COLON block """
p[0] = Regex(p[2], p[3], p[4], p[6], p[2].coord)
def p_as_1(self, p):
""" as : AS expression """
p[0] = As(p[2], p[2].coord)
def p_as_2(self, p):
""" as : epsilon """
p[0] = None
def p_case_condition_1(self, p):
""" case_condition : PIPE expression """
p[0] = p[2]
def p_case_condition_2(self, p):
""" case_condition : epsilon """
p[0] = None
##
## Scan
##
def p_scan_statement(self, p):
""" scan_statement : SCAN LPAREN expression RPAREN using \
LBRACE regex_list default_case RBRACE
"""
if not isinstance(p[7], CaseList):
p[7] = CaseList([p[7]], p[7].coord)
p[0] = Scan(p[3], p[5], p[7], p[8], p[3].coord)
def p_using_1(self, p):
""" using : USING identifier """
p[0] = As(p[2], p[2].coord)
def p_using_2(self, p):
""" using : epsilon """
def p_regex_list_1(self, p):
""" regex_list : regex_branch """
p[0] = p[1]
def p_regex_list_2(self, p):
""" regex_list : regex_list regex_branch """
if not isinstance(p[1], CaseList):
p[1] = CaseList([p[1]], p[1].coord)
p[1].cases.append(p[2])
p[0] = p[1]
##
## Loops
##
def p_while_loop(self, p):
""" while_loop : WHILE LPAREN expression RPAREN LBRACE block RBRACE """
p[0] = While(p[3], p[6], p[3].coord)
def p_do_while_loop(self, p):
""" do_while_loop : DO LBRACE block RBRACE \
WHILE LPAREN expression RPAREN SEMICOLON
"""
p[0] = DoWhile(p[7], p[3], p[3].coord)
def p_for_loop(self, p):
""" for_loop : FOR LPAREN iterator_chain RPAREN LBRACE block RBRACE """
p[0] = For(p[3], p[6], p[3].coord)
##
## Class
##
def p_class(self, p):
""" class : CLASS identifier LPAREN parameter_list RPAREN \
LBRACE block static_block RBRACE
"""
p[0] = Class(p[2], p[4], p[7],p[8], p[2].coord)
def p_static_block_1(self, p):
""" static_block : STATIC LBRACE block RBRACE """
p[0] = p[3]
def p_static_block_2(self, p):
""" static_block : epsilon """
p[0] = Block([])
##
## Try/Catch
##
def p_try_statement(self, p):
""" try_statement : TRY LBRACE block RBRACE catches """
p[0] = Try(p[3], p[5], p[3].coord)
def p_catches_1(self, p):
""" catches : catch_clause """
p[0] = Catches([p[1]], p[1].coord)
def p_catches_2(self, p):
""" catches : catches catch_clause """
p[1].clauses.append(p[2].clauses)
p[0] = p[1]
def p_catch_clause(self, p):
""" catch_clause : catch_type LPAREN identifier RPAREN \
LBRACE block RBRACE
"""
p[0] = CatchClause(p[1], p[3], p[6], p[3].coord)
def p_catch_type(self, p):
""" catch_type : CATCH
| CATCH_USR
| CATCH_LNG
"""
p[0] = p[1]
##
## Backtrack
##
def p_check_statement(self, p):
""" check_statement : CHECK LBRACE block RBRACE """
p[0] = Check(p[3], p[3].coord)
|
|
import json
import time
class CbFeed(object):
def __init__(self, feedinfo, reports):
self.data = {'feedinfo': feedinfo,
'reports': reports}
def dump(self):
return json.dumps(self.data, indent=2)
class CbFeedInfo(object):
def __init__(self, **kwargs):
self.yieldable_atts = ("category", "icon", "icon_small", "version", "provider_url",
"display_name", "summary", "tech_data", "name")
self.data = kwargs
self.data["category"] = self.data.get("category", "MineMeld")
self.data["icon"] = self.data.get("icon", MinemeldIcon.MM_icon_png)
self.data["icon_small"] = self.data.get("icon_small", MinemeldIcon.MM_icon_small_png)
self.data["version"] = self.data.get("version", "0.1")
self.data["provider_url"] = self.data.get("provider_url",
"https://live.paloaltonetworks.com/t5/MineMeld/ct-p/MineMeld")
self.data["display_name"] = self.data.get("display_name", "MineMeld Feed")
self.data["summary"] = self.data.get("summary", "Indicators routed through MineMeld")
self.data["tech_data"] = self.data.get("tech_data", "Indicators routed through MineMeld")
if "name" not in kwargs:
raise ValueError("Mandatory 'name' attribute not provided")
def dump(self):
return self.data
def iterate(self):
last_element = len(self.yieldable_atts) - 1
for idx, id in enumerate(self.yieldable_atts):
final_comma = "," if idx < last_element else ""
if isinstance(self.data[id], int):
yield "\"{}\": {}{}".format(id, self.data[id], final_comma)
else:
yield "\"{}\": \"{}\"{}".format(id, self.data[id], final_comma)
class CbReport(object):
def __init__(self, **kwargs):
# these fields are optional
self.optional = ("tags", "description")
self.yieldable_atts = ("timestamp", "id", "link", "score", "description", "title")
if "timestamp" not in kwargs:
kwargs["timestamp"] = int(time.mktime(time.gmtime()))
if "id" not in kwargs:
raise ValueError("Mandatory 'id' attribute not provided")
self.data = kwargs
self.data["link"] = self.data.get("link", "https://live.paloaltonetworks.com/t5/MineMeld/ct-p/MineMeld")
self.data["score"] = self.data.get("score", 100)
if not isinstance(self.data["score"], int):
self.data["score"] = 100
self.data["description"] = self.data.get("description", "MineMeld Generated Report")
self.data["iocs"] = self.data.get("iocs", None)
self.data["title"] = self.data.get("title", "MineMeld Generated Report")
def dump(self):
return self.data
def iterate(self):
last_element = len(self.yieldable_atts) - 1
for idx, id in enumerate(self.yieldable_atts):
final_comma = "," if idx < last_element else ""
if isinstance(self.data[id], int):
yield "\"{}\": {}{}".format(id, self.data[id], final_comma)
else:
yield "\"{}\": \"{}\"{}".format(id, self.data[id], final_comma)
class MinemeldIcon(object):
MM_icon_small_png = ("iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAMFmlDQ1BJQ0MgUHJvZmlsZ"
"QAASImVVwdYU8kWnltSCAktEAEpoTdBehUIHQQB6WAjJAFCCZAQVOzIooJrQcWCFV0Bsa"
"0FkLUiioVFwF4XRFRW1sWCDZU3KaDP1753vm/u/Dlzzpn/zD13MgOAsi07NzcLVQEgW5A"
"vjAryZSYkJjFJPUABUAEFGACczRHl+kRGhgEoo/0/y7tbAJH0160lsf51/L+KKpcn4gCA"
"REKcwhVxsiE+BgCuyckV5gNAaIN6o9n5uRI8CLG6EBIEgIhLcJoMa0pwigxPkNrERPlBz"
"AKATGWzhWkAKEl4Mws4aTCOkoSjrYDLF0C8FWIvTjqbC/EDiCdkZ+dArEyG2Dzluzhp/x"
"QzZSwmm502hmW5SIXszxflZrHn/p/L8b8lO0s8OochbNR0YXCUJGe4bjWZOaESTIX4pCA"
"lPAJiNYgv8blSewm+ly4OjpXbD3BEfnDNAAMAFHDZ/qEQ60DMEGfG+sixPVso9YX2aDg/"
"PyRGjlOEOVHy+GiBICs8TB5neTovZBRv54kCokdtUvmBIRDDSkOPFabHxMt4oi0F/Lhwi"
"JUg7hBlRofKfR8VpvuFj9oIxVESzsYQv00VBkbJbDDNbNFoXpgNhy2dC9YCxspPjwmW+W"
"IJPFFC2CgHLs8/QMYB4/IEsXJuGKwu3yi5b0luVqTcHtvOywqKkq0zdlhUED3q25UPC0y"
"2DtjjDPbkSPlc73LzI2Nk3HAUhAE/4A+YQAxbCsgBGYDfPtAwAH/JRgIBGwhBGuABa7lm"
"1CNeOiKAz2hQCP6CiAdEY36+0lEeKID6L2Na2dMapEpHC6QemeApxNm4Nu6Fe+Bh8MmCz"
"R53xd1G/ZjKo7MSA4j+xGBiINFijAcHss6CTQj4/0YXCnsezE7CRTCaw7d4hKeETsJjwk"
"1CN+EuiANPpFHkVrP4RcIfmDPBFNANowXKs0v5PjvcFLJ2wn1xT8gfcscZuDawxh1hJj6"
"4N8zNCWq/Zyge4/ZtLX+cT8L6+3zkeiVLJSc5i5SxN+M3ZvVjFL/v1ogL+9AfLbHl2FGs"
"FTuHXcZOYg2AiZ3BGrE27JQEj1XCE2kljM4WJeWWCePwR21s62z7bT//MDdbPr9kvUT5v"
"Dn5ko/BLyd3rpCflp7P9IG7MY8ZIuDYTGDa29q5ACDZ22VbxxuGdM9GGFe+6fLOAuBWCp"
"Vp33RsIwBOPAWA/u6bzug1LPc1AJzq4IiFBTKdZDsGBPiPoQy/Ci2gB4yAOczHHjgDD8A"
"CAWAyiAAxIBHMhCueDrIh59lgPlgCSkAZWAM2gC1gB9gNasABcAQ0gJPgHLgIroIOcBPc"
"h3XRB16AQfAODCMIQkJoCB3RQvQRE8QKsUdcES8kAAlDopBEJBlJQwSIGJmPLEXKkHJkC"
"7ILqUV+RU4g55DLSCdyF+lB+pHXyCcUQ6moOqqLmqITUVfUBw1FY9AZaBqahxaixegqdB"
"Nahe5H69Fz6FX0JtqNvkCHMIApYgzMALPGXDE/LAJLwlIxIbYQK8UqsCrsINYE3/N1rBs"
"bwD7iRJyOM3FrWJvBeCzOwfPwhfhKfAteg9fjLfh1vAcfxL8SaAQdghXBnRBCSCCkEWYT"
"SggVhL2E44QL8LvpI7wjEokMohnRBX6XicQM4jziSuI24iHiWWInsZc4RCKRtEhWJE9SB"
"IlNyieVkDaT9pPOkLpIfaQPZEWyPtmeHEhOIgvIReQK8j7yaXIX+Rl5WEFFwUTBXSFCga"
"swV2G1wh6FJoVrCn0KwxRVihnFkxJDyaAsoWyiHKRcoDygvFFUVDRUdFOcqshXXKy4SfG"
"w4iXFHsWPVDWqJdWPOp0qpq6iVlPPUu9S39BoNFMai5ZEy6etotXSztMe0T4o0ZVslEKU"
"uEqLlCqV6pW6lF4qKyibKPsoz1QuVK5QPqp8TXlARUHFVMVPha2yUKVS5YTKbZUhVbqqn"
"WqEarbqStV9qpdVn6uR1EzVAtS4asVqu9XOq/XSMboR3Y/OoS+l76FfoPepE9XN1EPUM9"
"TL1A+ot6sPaqhpOGrEaczRqNQ4pdHNwBimjBBGFmM14wjjFuPTON1xPuN441aMOziua9x"
"7zfGaLE2eZqnmIc2bmp+0mFoBWplaa7UatB5q49qW2lO1Z2tv176gPTBefbzHeM740vFH"
"xt/TQXUsdaJ05uns1mnTGdLV0w3SzdXdrHted0CPocfSy9Bbr3dar1+fru+lz9dfr39G/"
"0+mBtOHmcXcxGxhDhroGAQbiA12GbQbDBuaGcYaFhkeMnxoRDFyNUo1Wm/UbDRorG88xX"
"i+cZ3xPRMFE1eTdJONJq0m703NTONNl5k2mD430zQLMSs0qzN7YE4z9zbPM68yv2FBtHC"
"1yLTYZtFhiVo6WaZbVlpes0KtnK34VtusOicQJrhNEEyomnDbmmrtY11gXWfdY8OwCbMp"
"smmweTnReGLSxLUTWyd+tXWyzbLdY3vfTs1usl2RXZPda3tLe459pf0NB5pDoMMih0aHV"
"45WjjzH7Y53nOhOU5yWOTU7fXF2cRY6H3TudzF2SXbZ6nLbVd010nWl6yU3gpuv2yK3k2"
"4f3Z3d892PuP/tYe2R6bHP4/kks0m8SXsm9XoaerI9d3l2ezG9kr12enV7G3izvau8H7O"
"MWFzWXtYzHwufDJ/9Pi99bX2Fvsd93/u5+y3wO+uP+Qf5l/q3B6gFxAZsCXgUaBiYFlgX"
"OBjkFDQv6GwwITg0eG3w7RDdEE5IbcjgZJfJCya3hFJDo0O3hD4OswwThjVNQadMnrJuy"
"oNwk3BBeEMEiAiJWBfxMNIsMi/yt6nEqZFTK6c+jbKLmh/VGk2PnhW9L/pdjG/M6pj7se"
"ax4tjmOOW46XG1ce/j/ePL47sTJiYsSLiaqJ3IT2xMIiXFJe1NGpoWMG3DtL7pTtNLpt+"
"aYTZjzozLM7VnZs08NUt5FnvW0WRCcnzyvuTP7Ah2FXsoJSRla8ogx4+zkfOCy+Ku5/bz"
"PHnlvGepnqnlqc/TPNPWpfWne6dXpA/w/fhb+K8ygjN2ZLzPjMiszhzJis86lE3OTs4+I"
"VATZApacvRy5uR05lrlluR257nnbcgbFIYK94oQ0QxRY746POa0ic3FP4l7CrwKKgs+zI"
"6bfXSO6hzBnLa5lnNXzH1WGFj4yzx8Hmde83yD+Uvm9yzwWbBrIbIwZWHzIqNFxYv6Fgc"
"trllCWZK55Pci26LyordL45c2FesWLy7u/Snop7oSpRJhye1lHst2LMeX85e3r3BYsXnF"
"11Ju6ZUy27KKss8rOSuv/Gz386afR1alrmpf7bx6+xriGsGaW2u919aUq5YXlveum7Kuf"
"j1zfen6txtmbbhc4VixYyNlo3hj96awTY2bjTev2fx5S/qWm5W+lYe26mxdsfX9Nu62ru"
"2s7Qd36O4o2/FpJ3/nnV1Bu+qrTKsqdhN3F+x+uiduT+svrr/U7tXeW7b3S7Wgursmqqa"
"l1qW2dp/OvtV1aJ24rn//9P0dB/wPNB60PrjrEONQ2WFwWHz4z1+Tf711JPRI81HXoweP"
"mRzbepx+vLQeqZ9bP9iQ3tDdmNjYeWLyieYmj6bjv9n8Vn3S4GTlKY1Tq09TThefHjlTe"
"GbobO7ZgXNp53qbZzXfP59w/kbL1Jb2C6EXLl0MvHi+1af1zCXPSycvu18+ccX1SsNV56"
"v1bU5tx393+v14u3N7/TWXa40dbh1NnZM6T3d5d5277n/94o2QG1dvht/svBV7687t6be"
"773DvPL+bdffVvYJ7w/cXPyA8KH2o8rDikc6jqj8s/jjU7dx9qse/p+1x9OP7vZzeF09E"
"Tz73FT+lPa14pv+s9rn985P9gf0df077s+9F7ovhgZK/VP/a+tL85bG/WX+3DSYM9r0Sv"
"hp5vfKN1pvqt45vm4cihx69y343/L70g9aHmo+uH1s/xX96Njz7M+nzpi8WX5q+hn59MJ"
"I9MpLLFrKlRwEMNjQ1FYDX1QDQEuHZoQMAipLs7iUVRHZflCLwn7DsfiYVZwCqWQDELgY"
"gDJ5RtsNmAjEV9pKjdwwLoA4OY00uolQHe1ksKrzBED6MjLzRBYDUBMAX4cjI8LaRkS97"
"INm7AJzNk935JEKE5/udEyWoo++PQfCD/AMf7G3o0obnYAAAAAlwSFlzAAAWJQAAFiUBS"
"VIk8AAAAgVpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD"
"0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDUuNC4wIj4KICAgPHJkZjp"
"SREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50"
"YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgI"
"CAgICAgeG1sbnM6ZXhpZj0iaHR0cDovL25zLmFkb2JlLmNvbS9leGlmLzEuMC8iCiAgIC"
"AgICAgICAgIHhtbG5zOnRpZmY9Imh0dHA6Ly9ucy5hZG9iZS5jb20vdGlmZi8xLjAvIj4"
"KICAgICAgICAgPGV4aWY6UGl4ZWxZRGltZW5zaW9uPjEwMjI8L2V4aWY6UGl4ZWxZRGlt"
"ZW5zaW9uPgogICAgICAgICA8ZXhpZjpQaXhlbFhEaW1lbnNpb24+OTE2PC9leGlmOlBpe"
"GVsWERpbWVuc2lvbj4KICAgICAgICAgPHRpZmY6T3JpZW50YXRpb24+MTwvdGlmZjpPcm"
"llbnRhdGlvbj4KICAgICAgPC9yZGY6RGVzY3JpcHRpb24+CiAgIDwvcmRmOlJERj4KPC9"
"4OnhtcG1ldGE+Cj6cgV0AAA+rSURBVGgFzVprcFXHfd/dc859IfGSBAaHGlkgg4TAiZKm"
"7bSxnDpN/MhrYhHbsfOa1E0m0/RbP3Xq637px2YmM52J+6GeTp2HNAmNXRtjZxKRh5PJo"
"EkISIDAwjgEMEQCrMe9uuec3f5+/z1HlpBkE7dOvXDv2bOP//u1e6XV26g5p/Sg6jeXh9"
"eZzqkx19c3lGqt3NuIxDcmxbmqWW5Vtbr8+HJr/9/HBgb6g5yIp0fue++zo/d/4cCxT96"
"Tj71lzNAEMuA6R/ZmnzkTT4/037B/9BPPPz92rxs6s9f9+Own3f6RTzx74MJDqwj7jZhZ"
"Vp0rEUVgA64/oN2ib8GQWskkVoKxcJwC2bt3MD3kHo6MtvvWtkR3xHGa1qbjeOpKI17bW"
"vhgOln7a+7ZdM9T81pbCCPvXzcjlByJ36sH04GRauH5F/vXkCGtq7a6gn3nSFZ6PjbcG3"
"Lud6OTd5Ur5k8u/26uobTWkE+Eh6rXElUx7k+55nzvcMrnSu26GCETlNyzI/3rnxnt/2q"
"TOjIS19Kx/aP3/veB4/3tVTDzZjTT2dssEcmq9F3awEqdMlCx9BxkZK1TrUW3g7CrWlmY"
"wYr0rjiRc05p50xYlQytXR/8XVRQ20ygNqxrje62afotrqVmaCr5vut46j41lElZ714YZ"
"AkHHGobW7UqtFtV7eRmgffIyIrw35CRLuU3W23/cX1bqefyxFwtbtjUpi6ZuFS3kOQf/3"
"DsgVuJiDngOhiQJRAQrceNjPQXYEw7k8SCdEyRA+oFojFQSXPFrJ54Nb2Fm4aGLr05RsQ"
"Z4ROAoZ2zfz47HVNUEd7p8AFwurVNgWovq24iansdRJxf2HIBjWt7E6zophjSBzwRBKkF"
"D6ZgVApGVGLj3Qv3Ltd/AwnCMtGeO37fJnRuSmJarvLWDOZSq2xTUaly2Hgn1/X1tYGW6"
"2s505EynYWSKUHDFjgEXw6hHCjvMNoJI5curQxfoka+8drn0NAQGbU2adwSRLoV6gcfYr"
"8ZRq0iZ8FZKozoTHvYc90MwbH2FIqBatTTFJsQrWhWaEBUCZ2mppSzu5xDgNQ6FZvDGi5"
"Z2F5XI2PNU15CRu8ulYXnlAP8EBLgGQO1wMZ3nD376RYCRoTxe/jyOu1g3xAoBAzt9oDI"
"RZRZgGTSKAXazM0Rvr55cvLBGz245eG/LiPrxm8WZIiCu4W6BXLAuwtgZiZNKUJElbntR"
"JRp0eNc4ZtSBTn20KGHI/R3JpS6l4//hoQKMKuCgUYS54JQrW80YoEPDMvSvOwgoRIZw6"
"7H4HaJ4vmSNaKOIDZYRbIKDm9MQyJXc67FfOEyz8FBH92uFKa3ANNWREFqJPc9JAzANYg"
"qoA7dtAJnCXTaQ1DDw5mVXAN3RUZyZN8/cv9G7OkQW9WMXrAoqIPISkAWos9kBkC9hN3b"
"67V4DZ5Fr239PozGodpRKJhmCEkcHSDnG2jPONPi8HAO8cPxzErmF2adFRlR/X4FDKfDB"
"LolVz+R0cz4JDKYl0oSvBknEqPDC7N++7LfY5lUtUp2RUUhgYx4uBQUGl0Sjk/T0An9UL"
"kujtNKloO/IiN5eHSh7S5V6HoqAVwKH3B8KwWCVM/BNMDZ9osX+2/gzOhof8SyhghZZF5"
"biz2c1U0IgT4/ZACFQMoEiMrIUtC6ZPhGA8HAuY4LFx7aQPi5tbCftxXDbx6zAfxW460X"
"shEhSYgRZKHwpKEtBxNZ34gTSu1Cd/dgI0cAtPCzQRBSNVJgol7y5YzTB0b3dqfUpuiAU"
"lKOwkcihI8IBAn3NOsoMi2plYBysa1taYZfnhFIglUuQUESu4hM6gmInWjxcRGQExmkZj"
"AZr1oVRsmVmBn+B8+d2HtXmqT3gcAtyKAnQFsVxF/IC8tqVannT/ztJuTTrUyyaJrSF23"
"jlY4eArbMeDaTVauC8NWplPB/2tfHLYub53vxmKqqqsjoqRMPtyLOb/NRxclaSVgZsiyq"
"yG7mAlRH73nq6N5/KJXV05Xm4KFCUfeta4v+BpH2uWdOfmo1NTE8fE7sVLmr28NIr/U1F"
"nMdiwYQj2fue6JvzwizIaW2x5O6NMMvq5HbsoxeSGodqbKbYy81b2BEhk+ODOkYPLhw6m"
"pDTTSC+4tFF85MJxhUDSw09Xpqm5qinpmZ+P3Y9l/jjcvEGVub9lQQtqcbCReH0J7QTXn"
"QP/g+rxIwIQwrX6rkAYXMA5a0ZTUy1twpGnFB0lVeJbx6M8s2cXeJw1jFviyGxV1tuLBW"
"Zxmr+ClggkGtMDsTI6jZCYypprZpzmGf7YG20cMXFgkMvGKDKwUBEgvnME4GcUZpSK6x2"
"86f72+TicxqfB8BIu8sfPIqRt6t3RPQECAbIuKHEuOzDDtmH10ZoBvVIVsQwlwj2gMQFz"
"LRODMZhZVxrv3FE/tjPsFrN8O2mAzfuBaVllPBK0UT/SjE4Ze88IMmjGDtDTDfDg5cW0E"
"sYYTE3X77UMLFTrseVKXsSkNPHB3lgy6Fws88gzSxOWQDjpJ0rsViGzIEKfPS+ztvPk8g"
"1aqyPz31xQ2Ygu8hSyHfcZxrWTyiBH0xNG5fAG8H4fPIAZcOj6iQSMi+toJYwghQiZR/c"
"vzvm+Fc27MDDyGSSmsA36hwIjLBBPtCAr6gDU1XwkLZT+LQg0aAwphjdPQBHKI4/Go82w"
"EaN0rEYtTjUgxkxI9XIv0TmhLkIXMULiUT0FC1FUaurSCWMDKYnQhr7uJWbIKjSx0kxIH"
"ItAKfiV34I8B8gX0oQrJyHXUxlcdcQML4zU00HZB4RMauKolYxsH3fJIV38M6uIGAwWlN"
"j61p3TCSxOoKcodAEt2jx3oPYHdxMHd49tmWMJJndOtiImN2ZukujBBMhEoxToOfQf0/j"
"2hj3oxUTUjyCylB/MPDhQ2U4aEpHCay3NFTl96KMz8XCfXCOdIk1wZBCO19bQ42+yKSLL"
"dJcMBTZ1rqPHPmgXWcyK2HvSWMHOQoGqNKII4qgMgI8Rn6TKyjX4c6PSJZmWOguA7FySK"
"hCiuRPExg9NycqylTHsOImv5lk/geproIB2rwRSh0hr1BvWYbFV06ybUAcDRi1vV4ZUAK"
"V+U2RVG9nQMqsx72ljDyCC6OOeFc2sPrmLyhh6SkgpmpVCX16BSEeXJqKmF4ChLQU08QT"
"4gXH6yj8hwJwb+XN8UbzxIOC77vHf887oDstsz3ZD2xRZA+aujfNBebXuZaqPMwPmyEyh"
"6ZTpubeWVgpYBU6rVSZREjlBCJOHSuWgEyHHiEJ1kDaBYhEedede7iml1n67Z0FuHzYhE"
"ENFCP0jeJkSjxn6woOjqC0rHu7mrj64d6SYEq2fgmDN6Y+V6O39JMgf/YO9u/eoXrEEgO"
"17y9wn4lehG8ZTgHjXL2wXUHGZSWA5KXQbVX3iemzv0RKNnC0gQ0CVFCGBgBp2Nf2f6Vu"
"Q0bBqcB8iTtGPicODpQEXL2kOIJFP2awDtbmsXREzt7S7mMjIdCC+tInOccPWOc+BKH0r"
"R4Ej4xBcKB3+sGPXF4ECkOj2I095/FpsXfJQjEuPotvNngLQmwiJY4zvAHA/IRSBaqoxy"
"rscBf0MgMjN740iYQRpTaKisCHe6m72EH/YWi5mFDGhL4L3xH6c2bd/wWyF7KHF4kD3aQ"
"GCX3dJ6U2k3xDtrT7EH4785en9FTF++JCkAG+4KIRa9Api0tzYa/yveAw18mUAUtINMbp"
"UuyXGCAtGbrQaJHub6WvCLEwKnO0ozQWORABE6vay1FM6+mP7yze/BJTjhc9DHvoDsqeU"
"jGOKMQPChbtWX16tl2DjyS3T4KNxxgO/jokKgKPtWTOZqf8N9BbQYUR2YkH4xUcvTqdKq"
"Q0RnffEMHFMN5Jb6eqc+Vz3Dizm3vldLkQzu/9W+4rfwPzBtkcjBjLC6vnwhLLR/nOn8I"
"y5zY6iM0JzYPFkJlLquEOAn5E+Pw8PhijVBFLB8GBgYCZ92ORVEF8RJ1EMFdKOu14wIZX"
"wjmp+qJe0VB/MDkJe6RSmAAnLEPv/uxWSCH8qrzd8N3dX33MzbR727U3Ud0EnTf1f3dBz"
"/Q8dhVf1nNUn/+GuqIaADc5jjxlGiIiJpleH8RTvVK6+ryd7wtuw68o+GyAw9NGeQhytt"
"CIQhQhpx83/avXcq2aL3xyVeOnP74CdRTG01sYxCMe0ex+QQRjj8NvMC1Q0N9UM8QArS4"
"rcgYmXkYU/x4LTwKPN6csguMYTi/PlavJ7PwwwovKLBUGGJaQKj3x2RchFNQ84zkx8dY1"
"bbDP5rgVGRBNjJosA4CGWJWPI9zPYtLBLbHipXgfQiVRayDJ6pg7fpi+epkfBqR5+sk9G"
"BfH4gYYnfel3iWZxXRh7mcAVkgXz4azcw0/6ZcunIGDr+T5xqQAd/BYQZFHZ6dzn22pPX"
"jdWpyocoyOHF3AbcK8MH8VpEmI0UbJCwRKGOCrq97O/Y9MXVl7p9gRq4QSdGfgolnorB4"
"xz27v3FZfiDKJJ0hkAeP0hTEUiY8s5Rye/vjddjECUYuvOemK9eoCN9bLl2a2uJhjniJ8"
"yW/bOBdbFY9Yy8jFv+rsD6bKhMUXwu91E8G+q927nsENVJH2tB/meryzju7vnP3HZ3fGM"
"9/IPLIfr9vb45Eog5DgPOb8Y7LTWdR0hdtHO/kBH9uENPCJNb6ywZs7ZJ7KjDhd+McCkd"
"PYzdZUuYUx3KmX7P5KvZXT2OKHyLXvLLJbyo59mabMeYwcwcaYzZFR76SIqJePbC8jJCQ"
"nflIFURX3ffH798Y1+baJaOz9PESByP4oSJRp2/vevwCNqr+/gHYvOeTzHBv7jdkEkLBf"
"C4Y7vj9W/4TBaR/fLaGDIaaDpjo8IKYDoMkJw5PPxNGsmOjbcym22BsG7JbxfxkgYxOV7"
"JHSQ4JhkhERHzP2/+F9HNY/ukdfvPmfcfOn/voC62thb+YmGjMQXAR/CNB5MIvGvodXEs"
"/E2fPLxsQDxYdeEQn4B/Mgw9fB12+2Scgj+yt+/Zm6/+QIAjd5ycnG6daWopl3J+Fa9ZE"
"ZSlIjf53UuAQAUUj63ovU2VkrUdqcUQIAJIGHrTcazktoTf/JdbPvrXf9FtaAArUU6dPf"
"+w9brLxJWD8M1COFO3+84Ybn8x+iB1MhZHRRwcpc5rhVj7RkeM3ng0cZ8u1aXu6bDZKcj"
"v46Gs5Qda+xV9yaU1zbh9kef/PC9H5IOU9WUzrttv65Inj9zc5XGkKy0VcLq1eUyjPTqf"
"1wJjP3d79r9NyIY0/GlgI7A/R13IDXzXO9fG2GTmNff8XGDn+zIDklX23/+i996EQ+DL4"
"KYGpQ84E/3L3zm+P/W9yQo7sD/Ykt8shoyaWG39bj2VEC0PsUxNva4Iz4v4H8vqTm++oi"
"2AAAAAASUVORK5C")
MM_icon_png = ("iVBORw0KGgoAAAANSUhEUgAAASwAAAFLCAYAAABsjLGXAAAMFmlDQ1BJQ0MgUHJvZmlsZ"
"QAASImVVwdYU8kWnltSCAktEAEpoTdBehUIHQQB6WAjJAFCCZAQVOzIooJrQcWCFV0Bsa"
"0FkLUiioVFwF4XRFRW1sWCDZU3KaDP1753vm/u/Dlzzpn/zD13MgOAsi07NzcLVQEgW5A"
"vjAryZSYkJjFJPUABUAEFGACczRHl+kRGhgEoo/0/y7tbAJH0160lsf51/L+KKpcn4gCA"
"REKcwhVxsiE+BgCuyckV5gNAaIN6o9n5uRI8CLG6EBIEgIhLcJoMa0pwigxPkNrERPlBz"
"AKATGWzhWkAKEl4Mws4aTCOkoSjrYDLF0C8FWIvTjqbC/EDiCdkZ+dArEyG2Dzluzhp/x"
"QzZSwmm502hmW5SIXszxflZrHn/p/L8b8lO0s8OochbNR0YXCUJGe4bjWZOaESTIX4pCA"
"lPAJiNYgv8blSewm+ly4OjpXbD3BEfnDNAAMAFHDZ/qEQ60DMEGfG+sixPVso9YX2aDg/"
"PyRGjlOEOVHy+GiBICs8TB5neTovZBRv54kCokdtUvmBIRDDSkOPFabHxMt4oi0F/Lhwi"
"JUg7hBlRofKfR8VpvuFj9oIxVESzsYQv00VBkbJbDDNbNFoXpgNhy2dC9YCxspPjwmW+W"
"IJPFFC2CgHLs8/QMYB4/IEsXJuGKwu3yi5b0luVqTcHtvOywqKkq0zdlhUED3q25UPC0y"
"2DtjjDPbkSPlc73LzI2Nk3HAUhAE/4A+YQAxbCsgBGYDfPtAwAH/JRgIBGwhBGuABa7lm"
"1CNeOiKAz2hQCP6CiAdEY36+0lEeKID6L2Na2dMapEpHC6QemeApxNm4Nu6Fe+Bh8MmCz"
"R53xd1G/ZjKo7MSA4j+xGBiINFijAcHss6CTQj4/0YXCnsezE7CRTCaw7d4hKeETsJjwk"
"1CN+EuiANPpFHkVrP4RcIfmDPBFNANowXKs0v5PjvcFLJ2wn1xT8gfcscZuDawxh1hJj6"
"4N8zNCWq/Zyge4/ZtLX+cT8L6+3zkeiVLJSc5i5SxN+M3ZvVjFL/v1ogL+9AfLbHl2FGs"
"FTuHXcZOYg2AiZ3BGrE27JQEj1XCE2kljM4WJeWWCePwR21s62z7bT//MDdbPr9kvUT5v"
"Dn5ko/BLyd3rpCflp7P9IG7MY8ZIuDYTGDa29q5ACDZ22VbxxuGdM9GGFe+6fLOAuBWCp"
"Vp33RsIwBOPAWA/u6bzug1LPc1AJzq4IiFBTKdZDsGBPiPoQy/Ci2gB4yAOczHHjgDD8A"
"CAWAyiAAxIBHMhCueDrIh59lgPlgCSkAZWAM2gC1gB9gNasABcAQ0gJPgHLgIroIOcBPc"
"h3XRB16AQfAODCMIQkJoCB3RQvQRE8QKsUdcES8kAAlDopBEJBlJQwSIGJmPLEXKkHJkC"
"7ILqUV+RU4g55DLSCdyF+lB+pHXyCcUQ6moOqqLmqITUVfUBw1FY9AZaBqahxaixegqdB"
"Nahe5H69Fz6FX0JtqNvkCHMIApYgzMALPGXDE/LAJLwlIxIbYQK8UqsCrsINYE3/N1rBs"
"bwD7iRJyOM3FrWJvBeCzOwfPwhfhKfAteg9fjLfh1vAcfxL8SaAQdghXBnRBCSCCkEWYT"
"SggVhL2E44QL8LvpI7wjEokMohnRBX6XicQM4jziSuI24iHiWWInsZc4RCKRtEhWJE9SB"
"IlNyieVkDaT9pPOkLpIfaQPZEWyPtmeHEhOIgvIReQK8j7yaXIX+Rl5WEFFwUTBXSFCga"
"swV2G1wh6FJoVrCn0KwxRVihnFkxJDyaAsoWyiHKRcoDygvFFUVDRUdFOcqshXXKy4SfG"
"w4iXFHsWPVDWqJdWPOp0qpq6iVlPPUu9S39BoNFMai5ZEy6etotXSztMe0T4o0ZVslEKU"
"uEqLlCqV6pW6lF4qKyibKPsoz1QuVK5QPqp8TXlARUHFVMVPha2yUKVS5YTKbZUhVbqqn"
"WqEarbqStV9qpdVn6uR1EzVAtS4asVqu9XOq/XSMboR3Y/OoS+l76FfoPepE9XN1EPUM9"
"TL1A+ot6sPaqhpOGrEaczRqNQ4pdHNwBimjBBGFmM14wjjFuPTON1xPuN441aMOziua9x"
"7zfGaLE2eZqnmIc2bmp+0mFoBWplaa7UatB5q49qW2lO1Z2tv176gPTBefbzHeM740vFH"
"xt/TQXUsdaJ05uns1mnTGdLV0w3SzdXdrHted0CPocfSy9Bbr3dar1+fru+lz9dfr39G/"
"0+mBtOHmcXcxGxhDhroGAQbiA12GbQbDBuaGcYaFhkeMnxoRDFyNUo1Wm/UbDRorG88xX"
"i+cZ3xPRMFE1eTdJONJq0m703NTONNl5k2mD430zQLMSs0qzN7YE4z9zbPM68yv2FBtHC"
"1yLTYZtFhiVo6WaZbVlpes0KtnK34VtusOicQJrhNEEyomnDbmmrtY11gXWfdY8OwCbMp"
"smmweTnReGLSxLUTWyd+tXWyzbLdY3vfTs1usl2RXZPda3tLe459pf0NB5pDoMMih0aHV"
"45WjjzH7Y53nOhOU5yWOTU7fXF2cRY6H3TudzF2SXbZ6nLbVd010nWl6yU3gpuv2yK3k2"
"4f3Z3d892PuP/tYe2R6bHP4/kks0m8SXsm9XoaerI9d3l2ezG9kr12enV7G3izvau8H7O"
"MWFzWXtYzHwufDJ/9Pi99bX2Fvsd93/u5+y3wO+uP+Qf5l/q3B6gFxAZsCXgUaBiYFlgX"
"OBjkFDQv6GwwITg0eG3w7RDdEE5IbcjgZJfJCya3hFJDo0O3hD4OswwThjVNQadMnrJuy"
"oNwk3BBeEMEiAiJWBfxMNIsMi/yt6nEqZFTK6c+jbKLmh/VGk2PnhW9L/pdjG/M6pj7se"
"ax4tjmOOW46XG1ce/j/ePL47sTJiYsSLiaqJ3IT2xMIiXFJe1NGpoWMG3DtL7pTtNLpt+"
"aYTZjzozLM7VnZs08NUt5FnvW0WRCcnzyvuTP7Ah2FXsoJSRla8ogx4+zkfOCy+Ku5/bz"
"PHnlvGepnqnlqc/TPNPWpfWne6dXpA/w/fhb+K8ygjN2ZLzPjMiszhzJis86lE3OTs4+I"
"VATZApacvRy5uR05lrlluR257nnbcgbFIYK94oQ0QxRY746POa0ic3FP4l7CrwKKgs+zI"
"6bfXSO6hzBnLa5lnNXzH1WGFj4yzx8Hmde83yD+Uvm9yzwWbBrIbIwZWHzIqNFxYv6Fgc"
"trllCWZK55Pci26LyordL45c2FesWLy7u/Snop7oSpRJhye1lHst2LMeX85e3r3BYsXnF"
"11Ju6ZUy27KKss8rOSuv/Gz386afR1alrmpf7bx6+xriGsGaW2u919aUq5YXlveum7Kuf"
"j1zfen6txtmbbhc4VixYyNlo3hj96awTY2bjTev2fx5S/qWm5W+lYe26mxdsfX9Nu62ru"
"2s7Qd36O4o2/FpJ3/nnV1Bu+qrTKsqdhN3F+x+uiduT+svrr/U7tXeW7b3S7Wgursmqqa"
"l1qW2dp/OvtV1aJ24rn//9P0dB/wPNB60PrjrEONQ2WFwWHz4z1+Tf711JPRI81HXoweP"
"mRzbepx+vLQeqZ9bP9iQ3tDdmNjYeWLyieYmj6bjv9n8Vn3S4GTlKY1Tq09TThefHjlTe"
"GbobO7ZgXNp53qbZzXfP59w/kbL1Jb2C6EXLl0MvHi+1af1zCXPSycvu18+ccX1SsNV56"
"v1bU5tx393+v14u3N7/TWXa40dbh1NnZM6T3d5d5277n/94o2QG1dvht/svBV7687t6be"
"773DvPL+bdffVvYJ7w/cXPyA8KH2o8rDikc6jqj8s/jjU7dx9qse/p+1x9OP7vZzeF09E"
"Tz73FT+lPa14pv+s9rn985P9gf0df077s+9F7ovhgZK/VP/a+tL85bG/WX+3DSYM9r0Sv"
"hp5vfKN1pvqt45vm4cihx69y343/L70g9aHmo+uH1s/xX96Njz7M+nzpi8WX5q+hn59MJ"
"I9MpLLFrKlRwEMNjQ1FYDX1QDQEuHZoQMAipLs7iUVRHZflCLwn7DsfiYVZwCqWQDELgY"
"gDJ5RtsNmAjEV9pKjdwwLoA4OY00uolQHe1ksKrzBED6MjLzRBYDUBMAX4cjI8LaRkS97"
"INm7AJzNk935JEKE5/udEyWoo++PQfCD/AMf7G3o0obnYAAAAAlwSFlzAAAWJQAAFiUBS"
"VIk8AAAAgRpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD"
"0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDUuNC4wIj4KICAgPHJkZjp"
"SREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50"
"YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgI"
"CAgICAgeG1sbnM6ZXhpZj0iaHR0cDovL25zLmFkb2JlLmNvbS9leGlmLzEuMC8iCiAgIC"
"AgICAgICAgIHhtbG5zOnRpZmY9Imh0dHA6Ly9ucy5hZG9iZS5jb20vdGlmZi8xLjAvIj4"
"KICAgICAgICAgPGV4aWY6UGl4ZWxZRGltZW5zaW9uPjk5NjwvZXhpZjpQaXhlbFlEaW1l"
"bnNpb24+CiAgICAgICAgIDxleGlmOlBpeGVsWERpbWVuc2lvbj45MDI8L2V4aWY6UGl4Z"
"WxYRGltZW5zaW9uPgogICAgICAgICA8dGlmZjpPcmllbnRhdGlvbj4xPC90aWZmOk9yaW"
"VudGF0aW9uPgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g"
"6eG1wbWV0YT4KTlGaRAAAQABJREFUeAHsvQmYXNlVJvi2WHPRmirtS2qXajNZA8ZuqJDB"
"XxsGA20mhdtuFg9QYMyOh6U/BoWmjcHdA3xA21DFzLDaMMpv+gNc2GCXrZRtDIbKqrJdU"
"qm0lfaUlJJyz4h46/z/vfFSKeUWEflexI1U3CplbO/dd+6555577lk1rdVaGGhhoIWBFg"
"ZaGGhhoIWBFgZaGGhhoIWBFgZaGGhhoIWBFgZaGGhhoIWBFgZaGGhhoIWBFgZaGGhhoIW"
"BFgZaGGhhoIWBFgZaGGhhoIWBFgZaGGhhoIWBFgYeCgzoD8UoW4NsYUARDARBgDV3VO/r"
"O6l3dR2cXn9DQyeD3t6DgablA13X8NpqLQy0MNDCQJ0xEASafizoNY8fz1n5QDMqeTyv5"
"z/eW8n1D9M1LYQ8TLPdGmvdMJAP8saGgU+aP/7UgDPzoZCwrK9c+4P1CTfTYSTc1UEwld"
"b1dKFgF+52ZFcMP/bIj92ccb1+/HjePHQo78747qF+22JYD/X0twYfNQbIqJ7u7zcOHeq"
"fZjJfPpff5enFb/W94rd6vn3QDSZ3ekExbeiJFI5/hh8E+N8pWXp2zDCSr+pG6gupoOPv"
"3rrnw68QvnxeM44cyWu6nvejhrfZ+msxrGabsRa8SmKAx7d+LWce0iWjOnvnLzuv3vnKu"
"xxv/D1+UHxrpk3P6oanua6tObamBWA9vu8LZRWYlmbopmaYgZZMmZppWtrkuOaZWuazyc"
"Ta//b0rv/z8xz08SBnhf0riYQ6ANViWHVAcusRyxsD1Dcd1vs8jvLkrWPtg8P9P+75Yz+"
"ZyjrdWuBpxaKn+R7eaLqvQ06CRt0Q2imhgJ+JG/wEaQvsTNMNzcq2W5pdMPEp/acr/L0f"
"/Kb9//kOdWEzpbeZdz8M71sM62GY5dYYY8PAsy/2JEI91Qun3/+fvGD0aLbD7y4Vbc0u+"
"WRS4E9gP1pQkcJ9GlAdzC3QPPxvda5M6ZOj+uWEseo9b9v3R/+UP65Z+UPa9JFz+p6H4E"
"2LYT0Ek9waYjwYOA7GcQiM4+zg73ddHHv5jxKpqXe5rqPZRd8Fj8LaCswInkwXByeVMZJ"
"2UfdSxqp3vW3fH//dwypptRhWBBTV6uLhwgD1VX041h3WNe9L537y0KQz/PFMu7thYtzx"
"tIASlWZFjpFAd6yklvCh2Uqaa7/rbXv+8O8fRqZVnZga+Sy0OmxhoLkwQGZ19Kimk1mdO"
"PPMeybsW5+zkqUNE2OurQc8+sXArIgiPUh4juYYpqeV3Nt9J87/8mPUZZFpNRcGlwZti2"
"EtDX+tux8yDBzt10y4GfjHz/zAT3jm8Mc1w9PtokZfqySVVXGiAxr5hOsEdqZNyxRKV/7"
"sxeDFBJlWPp9/aNZxrAiOc/JafbcwUG8MhDqrL5372e8vaYN/bTsO3ROo/K63lON0rEgm"
"7ImV//u37Xv2Q8cCDVZKTVgp642Tej/voeHM9UZs63nLCwO0BlLB/qULv/otE/aNv3KgX"
"Ncaw6wgx+nG1KStuf74L754/fe2klkdO9YbhYJf+UlrMSzlp6gFYKMxQGZA14WvXfr4qq"
"nStT9Pph098HQHR8B6S1YSFUFgeq7mZDv9lePjZ9/HL7t6hx6K01JjEN5oCmw9v4WBSjE"
"AJfthrU+ExAwVjv/3dHth+8RYAF91LVlpFzFdZ9i2rcFBtRfxiR/WdTBQOKKi0Uq5bFtL"
"wlq2U9saWBQYeHbgGQuq9OALZ3/+e4zkxHvGxxwyr0QUfS+xDwP+XhrCfg7864Vfe4J99"
"fcfWvbHwpaEtUSqad2+fDFQllgouVifOf1D/0fCcsC7dDKshq8bxh9C+POybZo5XrzxjY"
"DpxeU7E/dG1pKw7uGi9a6FgfswEEos/Wd/5j+lsqXHi1OeBz6hhBQjcmUhdlp4fvnuXgJ"
"+Itcvjq73DWKZfWj4TrHM8NkazjLBQFm6cvFqfvb0D33AQrQNvTfj9rWqFn0IqoaxsiCO"
"hHkdJ0SIXcs5Y2lLwqqWQlrXPxQY6O8/KiSpL1745ZxhlZ4qTrnMbayEdDU9AdS0u65m6"
"enNQXBshfwebGsZtxbDWsaT2xpa7RgYyuWFtc12br8rmYbYopmuatIVbAFGqRRoCb24bX"
"j4lS1ytCdbDKv2aW/d2cJA82GAx6rQc9z1x3o8HLvwn3KMgAzUx4kwm9EtvNklMX2vsEX"
"zYX5xiFsS1uI4al3xkGGgr69XrIvPvd67KQi8XY4tMoMqt1bIQeF65XW24dUfOsBpGhj4"
"pHKMNUryaSnda8SmsNJg3w1LNrEbWabpCLZi0EyrVFONmG38bV1d0mvccYNupLVa47m+n"
"NHGgzYXBHoS4Yy6Ye3jjz09A8s6prDFsOYigTm+Y0T8009rxpmOT+qDIApYYmBCFpuZ0H"
"XIW5AlScvzrX480KychrBU7bC/nK02HOxybZCrnmhrY3515LlSTeEOgEh4TBPIJMqePy4"
"YFumybOGcQZfLZ4ZaDGuRuQyQr7uvr087fDhPor3Pz4Um76/cPdo2OvzVtGms0dfoB0tP"
"bv+5cRhvvEM6o/gPi94fxkRri6BV6Z9P5HKY537CeAAMgJyBWnflWplhGboL0tT1Lbduf"
"XT9unUfuNHXd5jH12UpaSk4DWrQBerBWYdO5H0ITIJJXbny5cxVv/8bbGf0m31/6smSO7"
"QDkKZBzSuCwM4iw4dhGtYUnKDHTCM7lDRWfDGZ7Pj7N2/79QGOKJ/XjAMHevXDh2WxAjV"
"G2YLiQQzM9GP61Ml3fTHbZv67qQn4DuiN925/EFYo3DUU2fEPrvQMCwdD3dz/VFfXfxlY"
"zhtkS8J6gAoYmX/qVF8QFq986epvPzE2dek9r008+92BVtqX7QCP9x0tDU2VTxMNLUiiv"
"gA99myUanI0wyiCeMa+fWRcP/LC6R/5x2RixW9/687f+Zym9WnLmZgeQGVTfjyq5THB+e"
"BTZ9/Xpduj2x2GDrLGjWIHLEoaBCkF2CBO+ZmsaRSKE4/iK7FBNiXyKwC6xbBCJIHfPDv"
"QYx1+qk9U6v3nSx9+61TxygeHxgfemWn3TNN2tBLKNY2P6DjqgVxIMfwrX8NeQEWCssHO"
"PLzzE4kO5ztKxYnv+MxrP/DX7cbWD75l729cI9PK5fqpB1NsGdwbxsP67oAm/ZgMR9uCy"
"dnkkmFVWGK+3jgjqaWNQDdBbFbCM7Spyd2EAbS1bOlKOVNtvSedz2O1Xs4wcx69dPkjGz"
"//+o/+6UTx1S9ZmaHv9bVJc2K05JaKyICk4TJdZ/Q+GT3/Ud95/z+pnLXQHyL6jWBq3Hd"
"su6RlOkvvHvPODrxw5offzrS2zAtO5Sj6aDWFMDA8sEqsCRz7H01nTcbiUBek5DyRZtNl"
"kYOSPtI1P05UYiPkbqkkzIRvKe2hl7DEEU3Pu3lg8bOv/+gP3pp4+XezHd7qwhgyOo6bD"
"vYvE6xF4IkEUmVDcqIgwRxF4yOOg8Lkj/jexGc+//qPfOBte//vj4G0DDAtENjyzmFUJc"
"4aevmenjNimk3D2GMluOrpmxlJua5Ix0UgaSFMcasFh3I9FOzxi9tBT0nQE2tL44rlx7M"
"eaglLKNYh7ZCSPnPqB/4wkRr7MzNhr0YFFIfSFJlNWUHFS5bSSDlJpxi4PmprGqmJj/af"
"+emfyUOhD0Fr+VHVUjDV4HtzWr+wrgWa8wQqyWPZqykFkx1ZZFiIboSgbqBoK77xd9y69"
"aEtRGHZUsi3y6o9tAzreAAr4KG8++L531rxmdPvfSG7wv6JqSnbc2yWFBcJ2qJnJDhKYh"
"F4U5NFzTdv/96Jsx88nIfu/niQe+glXRVWFY/o1CuePHkM2USN7XQYRYueDpY4WAKE3VR"
"LYPVa+Eem6iLS0TTddsNwtrL7rq7lGaLzUDKs8Bj4tUu/ueqO/crnM+3Ot42NQKsuswvF"
"HZGP/g3X84ta0b3+xy8P/u72Q3q/ewz+Xkuk49btS8QAvO3EehhM/AOyH3g7yiE5yjEsD"
"pMSFo+DlLKkVkF329oMvL/1GH/P5a4rCTdhW0p76BgW3Rao9L5y5VjmxtSrn850+N8wPu"
"qUgESmva3XJFuOrdlglJ13Rk/9N07gYR15w5epopTja4bW1S9DcowgscO09DbPUzckhxJ"
"WmltfmWJRSAfiIbQbvr5L4vo5IR42A96rgfGhYlgU+UPHzdMTn/rLTIfzTROjYB3YrKpB"
"WkTXJibGbLhwjf8vXzj7q0+jz+B4f74lZUWE3KV0Y3uTsBByaejKWgi5tZJhkV9R2sJfn"
"VklNG1yWVsKHyqG1d+vC4bwudefOZLuKLwLkhWU6w0rKAB1iekm055mezc/QJI7kYNnfa"
"s1DAMnTvQL/MMXcx8lGDT5t2EQzf1gAsWFmzahvSpfQvWbg5BHfLP5zp1Pdcqv82X5q3z"
"RMnh5aBiW0FuhEOYXzv1vb3X9kfzEGE+BOqa8bsfAWeSCZ5vFgouCmIV3/PMbv7kd5OU/"
"LAUxZyGjwV9QD0SrLcHwA/sgpHFF2ZUEi7qr5P2rFwyL7Ku0NQi+vInj6O/vv/8Kftnkb"
"dkNaK754FGQeqsgOG4VnGu/l4BUg2SN8GgXMTVz3VKX72CQgnVH89NZv6PkXD7Eh4apTe"
"oCQOsh0xiQITkoPfN6fi1MhdscGzRiqOlyQl5KZkUrId9LMUoeCZNJLYHvhMf79OCW0Zu"
"HgmE9N/CUcBv4/Nm/fn+23espTnowAsPHqsGNtAZXat+AnOdpzrcQnPBY0mDQHrrHhyE5"
"U8HwRi9wN7uUVmSQqFK4IHOiGJgCzSAkZ/pIiK/AZ3U3kyHYQ/sl0P3yZRn9XfYMK0DYD"
"UNuzp79y07XG//5UgmCFb1tlGmIo4CXsueVRMZIHksoESoD3kMCSJcm/ZZsvXQgnTENTA"
"L5gprzAF4qFO73MywwKgbki9ODoKVcbvmF6Cx7hhWe4y+5X/qP6ay3Ax7BmFHE/6nSYI/"
"m8cPQjS0n3/joegnWUTUXiio4iwGOM2Fq4cDenUgy2tkgnajZQB2hhfBBAH14Jvv+WJjM"
"j2Lig5c09WdxVGrqESwAPCUVism8xA1Gf8gUvEq1uD2hx4IDYHG9b42sA6g3+soZAxYYW"
"uuniDEw+LxMLYxAhMeZNgj/KblpkP1wt6XT6GxWpKOKjocIVWPr5OSzG9rafnyQKbxx+e"
"xLI8Zfvbpb1gyr7Lnsff619/cUg6FvKhbEpqmOdCVmGelJfc/vbE8abVaGO+PXesXxhOm"
"WW60eGChvbNJC6LvdmA5xFlRxlVPJznAcKt35/v4mNz9NK6yfmrpMSyEY1vIq+7Wsj4Td"
"5VQhdnDnze0dhgGtBKUtpXZOpsSCP5aXTTsgwuui5PjAQF4pGO9fFMvvUxiS8y9X85uh0"
"hYhOWBZyq0NEgW5apIhOWRYeP8AoYh0ONksEozoUwc5UwPlNcD3y6EpNylRIvVCz7DcNQ"
"NXTB6Oh7P2pCifV2tf2CmhLXXhweoKL+Xnn1+e+bhrxU/c94UhOa7jgGG5q+gxrpJZZub"
"4ScBprFpaCOduhp9MetBjTe3i7z09zylJ83PDvvi3y/ZISCag6zJ/OqSYx0QqY0X1EuCq"
"yApIxfvozvB4Un5dVsS2ODk29oqSfeexTJupTU46SNYYMNOUeg1AhRZC5nSfzbfgj8Ua1"
"b77RBl4dY0HNWB3GUtY8lj1wmv/YQ2Y13am38Dszp7fGpAW+S1grcjfgDyBXvfw8HNbZP"
"8yc0Dkz2p1OAsDQ0P9IA6hE9qlG9g+AiaDVY9WCCQlP/pgLUDIiClkiE5pGwZh4nq8qGl"
"AmDURFXyxbCWssjuDb/vGViSNlLm559qQKkBSnJeQCJEURMQIGUZhhavd2YavLveXMwfE"
"+exW34JJYbuQR3A/KD4RQGzBvgZ+JXiYUigStAJORQlrfuh0vYRK1dTFjYz8Fje/i8gFQ"
"sFkWUhay1jCwhShwUx9EI6AOvxTSIMLbEzy+nr/FUQIRorqJx5KSkGFckvoseoNx8P6vK"
"NH84ImguDFLISRLS5ipTAnSq4LcFEtAWjDkJy554whOhiAYXdoTmkrr1lOm5+SEzP3RFT"
"37dBQl9iEzMDaa4koUTVThQgixCwkDRoE4IXvu8LpD5VPuE22WswYOHBAmv2/cuFvNnlB"
"qRsZZ/HExsaYzjVkclVCJpL2gV74fp7dV1gK27D5edqQSOY3V3/N+t2yPBJS/xAq3APdf"
"8JHBaSFZrhRkzdNhBCsTIDoUQjUx8N8RogO4TgE5I0Ccdk/V6YS7tN8w+g2LS2NjAdQ+K"
"iJdG5uzOFOCyFTX83DsDBnukjmh0uEpXA5bX5YJsuxcf/RtE+d/ekUXnbQEXCB2RXXNuq"
"PIELYo6Adhe4BbmKBv31s7BNrJTz5+WmyUQAvs+d2dHxS4Lho3z6QxqEQxUekeUbBcZKq"
"p0NyJInPC6Uo8htMJ/MTm9+8FzfRD8uSYYUVQwz7xhZQ4w6bSkhQoorzco8IA8Nmcaags"
"KVYPAN/ILbl5aUsx6TW3wsXBuTRWzf2+fCFo1SrFoQSGtIJ0yEzad/iTSbz0wJvaxAsr2"
"R+y/JIOJ1TSte3myZyc7s+7brKeY0+QIQ8h3jQPZjF0jgdXV8peykvC+vO4ous/leQOYU"
"WQs+f2M/CbmqyK6nR4FEQxhmpI8D7+Rs3P1KXve3u3S9y8zu1XDY/JaWO+Seisl86OsbF"
"dGL9P4bqvbhJV1LMJ0ndR4Sa9FIOggmheF9uXsqVzV49r8oLOrk08vwq0MhWh0dysjAFG"
"3kprYMLWwinAdcRhgaP9yDh+6bQY2nl9DnTVzTpm2XJsC70dAsxH+rTPSrPy2wiLBcS8O"
"0ny3C3pKtYJ1AeuW8OvbTR86c2eeBXEF+UY1gEiJtbBRbCe9hCTH02i/vKyfwGwvQ5965"
"oynfLjmFRzEfJrPJC95UNyZmLCEFBSJmMODDN7g6CsylaCPP5/LKbI1VWSuif5CfcfcmU"
"iSK33ELU9AonZFS4V0oMXAczk/n19Mj0OargvlY4Kh1/rf034D4p5v/DyR9ZDel+s8vqv"
"YqG5MwmQl2n7kHXnO2jo38tFO9HjrQU73ER0VCun4KLViiN7k6kuMep6asXjj+MIRRAh1"
"/O88qDLYO4A21cnDLwGexYTWY8zxDm/HrZMawww2jgO5sxQ1tEbm4oJuYcvQJf3k+EsuR"
"4IhFkPW9MFBIIpQAFQF1uIOiHGQiBBl89JO3D20o4QQOwQLBoIZw7ad98AHHzc3m+3TY+"
"/udMDKmF1vP57miG75cdwwqRrhvFfekUcnOLkBz1dpZ5iJCM1WMhAd8dFilxwvG0XqPFA"
"KQNSt6CRbneRLdgWFzeCjYCKYwztB9V2Dg6RBlBrCqsLxTOMpnfsqjItOwY1lBOhuQg/9"
"reZJoEqKaYfx8RimUzTYlYS6xa7wuPd3gpi7PK9K+tNxFh4KjgTq9c+cNNkL+77ZKwECq"
"5HshbZ5f1WgQNOP4xvXtb1kLG5MKy2fyUnKBFpmL+n6lw1/rKFkIXCndcej8zmP/eOv8i"
"iTCQZur7ns3KJ6C0oIjqw8xJL9KD3HdF68PSMRDmzbftSVoI1woLoYIKd3JVEjSzjFLKq"
"oKcxa2JBFLNeDKZHza/Km5fOo7j6GFZMSyREkQucFQb13cKMV/so3GgrvY+CRIph7smK/"
"jy/T0wWUiA+hR/5927R4UoXy4kUPsDW3fOwkCYPtvR7jyaaUvQqVhdSRbACQthmVZmDWa"
"BL6gRQVC9SOaHzY920HuktsB9qv60rBhWuLA/9dp7t+BYJUNyKrcE13WOKGHdr3APHx+I"
"kuOG4axx3clt8tuWpTDETlSv4+MydbDrFbsN04a7gCkk86j6j7ofQSvslPyn4oZEX3STC"
"QrbIa2zcDDurqqDip9UrwuXGcOSCzthZDYbprZKpIqdKbzUC6uVPAf73DxEyB3QbW+nhn"
"W4nBtr1TKbp0oQFO81h3IyoR183p5UOX022UsYQyhYTVXyEUxPkNZ13e8eHf2QcJNpdkv"
"hsloIz5UrhPje5GOZjDCpQJOqnggsiBCAMdXtXESIhMmBAVsBCE2E6Gjac0rv/vGyluh7"
"p24QVIGXwPL84hZPKrCqYgXRQzV3j6QPqg1ESjdBLHNfN/e3MnICDg6dtu1t4TUync7cV"
"zfDt8uKYe0ZPyOm1DDMXZCwQJPKxTsLmuBxkETIQNa5GwgNugffL4S6B6/ZdQ9zj7Mx3/"
"ZpfQLzLw9+dFOgueWkfdDwKNYIEGklCVoWMYT4XCWQuFyHtM4cBzKZXy53vcou1ELKvEt"
"GLTArgkY/JF0AcGa3H5devur5XwkixHBIhKK2HAjyQQoic3IcUcxz+8jIxxCYy5Z/8DL5"
"detv1Rjo6v+YwGXg2lv9oNTBzWHWJFTdazw3ULSmw2iVFsJpYCitI9kMTI2+CILu75e6u"
"+kLmuzNsmFYiLkTYv6L15/NYr1vRUoZToWSizwkwtBCOJtmZIgO0oNs9v1LQvcQevDPvr"
"b1Ta0YmCzdfDQLXSE2CNj+1UuLLMYFXjq3cabSUctjYaDLZH6HDiFZaRNbCpcNwzpQjrk"
"bGX0RC9zbJkNy1GRYVFwtTIR0+guCTMYwPXvk0UpJs3VdZRg4Uc6Xj7q6OwPNBpEoqjrA"
"cHhQncc4U9FgyZwYUA+V3eYg+GobbgL15ZXcyCsZ0LJJ4BdW7zV1o9s3jTSq+JanuxI01"
"O8aUIuQ+5g5cgGqwU+6i3xGVmHK2c9bloPTH8fR6MYFTH8kwuF4Y0+wDCEZlpiXRgM3x/"
"MpUaRxJAzpZo5LFvmK0jockf3S9tu3+yitv97MyfyWjYQVzlrJm0BZL+wniobkEE7aL6c"
"zR86zUrCqAhEGqRdFbiwssqYW5cP5afRrWNZraOh0B3z1NruOTVpRch2Qi1LPmQDB1M5R"
"5ZEwmYTa1NR2Ev/NHFCv5ETVQtQnTvSLXdMwdYS0YNcsb0q19BXnPSERUukuiHBeMYsOp"
"CK2aOf1659EKja4xZZr6MUJ33LvOyzrda30wgYvKO4QgriiZb1CXSdrEXJfm5dUFp407H"
"26y2R+jjN0YOFL1f91WTAsLHw9n5divufZ+1m9V8VGgiMRMi5sMSLEmMxybqwdicRxoXg"
"/Uq6hp+LYmgWm0A/JcUu7DMtLgVZgRVMTelIxJXE6ji6lcWNkMj8wrlC9wJ2wKduyYFih"
"EvGlwV/tQmj6NiGZIJpQxRkJiZBm6kUaKvj6AUT5jOY5e8W1vQcXv2uRTh/2n6fLejmD+"
"9KwJ4MduNzwVMQLGQ0V7mRYhLT2xoB6HDnKtQLAoOk0q+SYFxvjsmBYYeT95NjExsC3Nw"
"kLYVBxNtnFcBTt76A8OuFzV1+MCOFD42aojwvGhSi/XPJyR4vQ6nr7SLmsV6Cb+zwPhhl"
"l5SvJqFg4VbTFiGVBNOioosNkfvq2mzf/+BF56dGmZFjLwkrYJSqC9Gm2XjiQyiT0QqHk"
"Y3KUY8akOTKq+4hwAbLhzs+ME6xeTSJbLnm55YKp/19KFWjiOGS7w/utBGZkAfzXH8J7T"
"ySt1B5DeK8f+Y76UL4rbNSNyxvx5mazWgqVW9QSwdX9PVOuCOIFzu5EkvK9oewZnUQ4Xw"
"zhHKPmsRBSgL2s8nLPMc46fSWlipHga4weQFkvrGJwsDo9vKrHUA1LPWeFZb0W65tjRCZ"
"b1hefKkvrzRlQvywY1uDzsiJI4HvIzc29Sc3zOXUSJEIGsvJ9BU340MBDY+edO78kFO+a"
"dnhZzFkFY4/hEpnN48LlL2/w/MmNTCFMn4YYHrSkLgkQyaOqsl6LPtHwUyy04U+KWgEIq"
"F/0DhUvaPojYVnMFy4NQeDuYKoQNCWJkGKfIEJARygrABJeylDG6f5K6Ft24JarzexDA/"
"gb2kLcBcbEvkRKt4pFbhuKbm7AFFUH3J0ERS8Rc1wnMr7WFSmLnn9eptdZYrd1v73pGVa"
"flDi8E+d+YUvBvrbDgVcvJlhJOZ+EJ4iwzLAqmG24YOteW4dhToyNktC+WME9rUvmwUBY"
"1muieGt3IuNqxaJQHai5BkAsNM5QhRBR2XIwLIZMFneAeRk4G8KhQ+j0ouCH82A8+q+b/"
"ngxHZITWEgV4q3iLqKoWkJslZVaCGdMNagKNGX4QveAEB0hTc74vfW2MgxMl/WC3fUxWW"
"S0shvrfRU5iDTOROn9LEN0wKe2F+7+ziY5puZTLzQ9wwqJqejefjzbZiK1jOZTLAm/V+V"
"1JhFWC5OwFPrFx3gfCLnp83JXO/4oroc0wTM4p0Gz/THk++cBXTkyEUMlkPTTmz9fmris"
"qj9kgEJnp9krC1phC28Oj8hVddTgi5ueYQ0N9QsixEruRuofkiDV7spRItdLLUQIaVEvl"
"bC49KD7xo28KIgZOso2mHaa7PFHBU1cHPr/NuBcVC7rZShJ/6SVmWW9okA0j4Ho12vDpu"
"6618XmF0W/9e5DzfN7hVjABPD0x60SXKr4pLAQkn0px66k4rQWIiShOQ69lJ1NpjW6FaO"
"71aw+NJynRrU+7YCgijtTN1HWa6KLCVdANcpRCgEiCTN8SyR4xPvogNQDw3A1sOld6JYZ"
"QJpOvaDkDkNkVtLCYGAGB0Os2sIKIRR9VWshEdZopmbuE49FKQJ3XOyMA+Xc9aqNU2V4u"
"gdeELTuasMH020mcBooeyakhBVNSM79M4LdHRlAwBB9mcwPa6Xp1AtNzbDCyPvzBQYHOy"
"I3N+Y6DGa4f7Ya/GkpRIgQHd+yIE4avgheDUtUNXhITfX4EGcle2ynYZbgLtAkZb0ixDJ"
"o0HCpyAqCrUFwvF12nVdwi59/0E3NsMLIe983t5uWlsaRUOHIMCZiK4v3lPmraJAGhA8N"
"jr0iRCcnS1Q1FaFVMdxYLj10QmbzCFBUVAQCR3nSihBikoaIhgiT9kXYNzgVivTCKgXXh"
"tu3P1e2FDZXzcumZlhh5L3j3nk0nWV6Bl3JyHsSIY+qDMkRrXpWo7siN5bfHQTH2tEXeF"
"hz7YyRrrsqOyPD1/I8/kAf6E5uVbqsF0hEGGdiOCeQBrmpIwNICu+EHksTcbhVIrSBlzc"
"1w7pQjrzX9cRuqiQg8lbPCuqAfLIpUdYLRFhmWVU+FT40UvG+9c6dr5RDdJprZ6xywJFe"
"Hpb1Onf34xsR5gTVAZ2LuXzVa6SPmcaZKIHk+mAGECbz0/y75ZqXn4zyEbEjtGmthBL50"
"kKIuLBHTYbkKIh6gkQ/i+nacjVxLHEkDDJpI1m0Jw+iy9PN6EMTOzXP84CwrNd4YWKLF0"
"ytYDy5YFc1zcU8D4nga9IKQVpKWa/FwOC6EZu7rglH5L6+gaayFDaxhCWPRG8M/8lKKK4"
"2O7AQgmEpyLJmE2ENQGJkupdKk9pKYmdsFaVYbGnO/r1k3zhI52IsWmXLegG2WCyEITbI"
"qEVMYTAhkkIePtxctQKalmH19/cL2K/dvrAJSsQtzMWmajK2SIgQhCyUxYH9DWXigzZCT"
"QYdLg5VXj9Wdi52PXtXoJe4rykmW0lMCaDAUBapqLREtKKsKvWhgb59bOzZtbKzfA176B"
"LBqPH2pj0STo/XKO1Noh5IqWRjvmlfUavdI8KlnVi5M7osDotUM0FwMqnrB22hTG7xrAU"
"nnEwduBPOxZ42+Tid2siwxLwseGdjfuQuzAD5uODj8OmIjGR+61335ga8uV3e/JviaNi0"
"ElYYeW+7o3uTOCpxJfMP/inXJBEuNZBVh0kayuLA3XXr1p9u4SD7+poveLXekxM6F4O5w"
"7nY3uKoXtYLFFxFvrQa0BkW6UXRL2+M+tCmas3KsKYj78GiDjI4OLYtaYnTya2cFsKlEi"
"GlKegegkRCQ/VeW5ike1tFKRadndC5+NUrz23wvKJwLqaIteiNdb6AAFHEScL1RWQZxfu"
"YgES3updMekhbM7WTw2wmfWhTMiwsXs4mxSrN8ad2C4YV1/TyIUtokgjLqW7RzxKIEEcb"
"3W1rQwJobVyUr28VpVh8YkLnYjdwujXDzjJJAw6ES5iGxZ9Z6xUkaGZoqKCiUq2PmL6P+"
"lCk3hZFevFl0+hDm5JhoaSoILiXLv/BRrzZbotsBuoVnSCQkghRZDCCJUL9A6oVo09XxB"
"S2ilJMr79533R0XBeYn3Ku78uA2YNdRZQPb95H1vwD9+E4YghnAwSlHpL5aUFpOzZ/JHo"
"nmZJS1W9NybDCsl4FZ3iT7xfXeczNrWiqW9IBiZBK86WSBPugSRq6GBFTiM/03hYLUn1S"
"awyEFy48J5TJwNJez6eFUF2S5/ySVuJvZX2o5u0aHv5IOUTnaFPQkbqzt8CsdZezFTje8"
"KOZtgQZgbIKd54BBcPieJbIsbADo74c9XV+99DQf6aFB62leJd4mP2XEin8jATDctyRA2"
"VVwuwLFfiGpEFeRafRJZJJBaMJUCuAl9krHafYVJETTcmwwsh7Tysh8h75fZA/uIJZqvs"
"lIRGmQyJc4h5G+7zj+Mja4K01DHs7B9TyeF9oWvPEOPjUUIev+VsdFucjEhVsPA4y/xUj"
"Ivg+5kYcuB0dJnJj3Son82uOsl9NybByMlsBJtZ+glVy8J/SRJiIiAix8pA1Uvc62g3Nd"
"+4KxXvMhN3k3ct4y1euPr/eCwqbheqAbliKNQLEHZdJ+1gGjvwqbiARUyiS+SGH4U6io7"
"//ufjZJB+0xNZ0DIs6G+yReAksz3cQec/TYOzzWzWaQyKssqxXJc8RRSnApAXDasaskZU"
"MMoprQulT1yf3mpaf8qhuV3VzA2Shwj2KsS/WB9cRl44fTAhLYVkIiJtPLgbWor83HcMq"
"l/XS/uXib+Hs7e3gEUmqtBcda90v4PKg13IUFsIQeHBrZI2kpbAgGBaYd9OYpMMx1Ov1h"
"NYvHjVRGtyVSjMbrU7NjZqLEsQi8qUBujqJOtBjuUCGv/nKlS9npBCQVxM3Mwim6RjWvb"
"Je3lYEGXQ0jYUwOiqUinct2DU29uE1ci7VJ7QZNFe/t+WkfXjgo5DGVRWu5BEQrCLeGMI"
"H0c6YQkpY9o5U6jOb5K/qpyxq2ljCKe/2wUzW1MbHNQ9zDTlGrUb+RPUuFe5RblsQ5cGw"
"OFZnk+eN0sJzp1WUgvi4v/HIgyaMMSV3eDeOhJgPFo6Jbue4/4m1fyJEYdK++kFHS6GvJ"
"RJBWvPcboBwLjxC1z6S+O9sOgnrxHSlj2A3JCwyg/rNcRXzMYsIo+NacB4NvGzWNG37ti"
"g73ipKMdfEHBUYHxz/QhfkCJT1EhZCJemdVkEq20VITv2oGU9EVXEEevnBLZEbay4sqvZ"
"dU0lYmFhsmtKvxvUnH8OmCYal6K5JIsTyiIMIWZQCO6OpT/kip1FPT3NYeOpJ/KFz8a3h"
"M4+43vh6yFew1PjKMSxyVfIoYSEEdHwvOG19kMW9D4ZCK8yxJiTS+jy6tqcoN4ELDeOol"
"hdzefbOpzqhed7q4hCOCVZuDCER0kIYRyArjzsyN1YhzI0lTKUL4e5h+y10Lna10YOpjA"
"mLMmKaxCFdPUxQwgothGRY9WvY/RlT6I8JCYvCAIWC+j2/+icpt9gXGsKBPqkUHBn9+gY"
"vKG6n0hB7kpJjIBHSQsgMXTEQIRSmtBRqO65cORZaeJTEw0LzGedvA9pzovvJ0lC3mbCh"
"IzLB1GOYiQgGQaiiioaoBhwq+WwbhlNd39YsyfyaisjDyHsnsHdbiSAZYD9QdTsgEWbAs"
"AR8ka8TEhqld2d7KvVvVLyjqW/hkXDW5+/g8zJpn6b7KOuFjMh1PWlVPkaSBjc1Wgjr3S"
"Cplw04hU2FwuBG+Xy16aipdFhhWa+SN7Qvjcoftm3QFTBR74le7HkhEU6X9Vrshqp/p4U"
"n0NJpI+16U9RjnW0GC0/Vw6zxBh6Z0YQ+xvbGtplgWNw46s8SFh8AYaKFUORL4+Vih1v8"
"voiuYOpVP5NBxl57jAH1XysbcJRVMTQVw/pIWNZLM/Z7PkRZnrfrO8EV0UlIhMxtJBZJ9"
"DDqhqG7mUxgjY2IKjrPR5mELZ/PG0x819uLkA3UrTszIFO07Bk/EwwNdQWneg+iKmKeQx"
"PDqwgpdbyor6+PJwfvjVt/v/7C3b/dxWNPoCONIs/pijWpOijrOhsAHgw4XjrlGaViYbd"
"EjTxKK4amaXCahmGVd03B+R1/bG+CysLoGcE0YpbyhkS4tLJeiz8d+EDSBnAM3RXVoHGH"
"kCgWv3P2FcRtGEHQq/X5up6f0Vff7Bs0fIfN4nh/ziIDO3y4T6kdubdXwjw6NbjBCSZXB"
"/TUU5BaCBV5FKUrBj7zfb1JmnMPgwQe7AoXmZ6e8lEasKjYmoZhlZP2BVfHXlhz6vontj"
"kOlYU4/Su2a4ZEKGIIYyVCXXNFEjZ3TxAct3T9EKpei6MQ6b6ihurRRr/Wb+D4BGTeI9S"
"Tt4613x15aZ1hJtbY7s1NU/btjkQiXUyYnUP4N2gkskNv0fN3D2n9vE87hvyEWl+vpgrj"
"Co/HRf3Wo/BXM8Yn6M8QKFkqh+RLXSf1WOQb9W50bvZYqDEo7OCzpaWwOjqqJ8xNw7DCy"
"h6DIy9v8IOpDQGXiqImWNJdaKaOjwhlEjbsjDuHh/+BCtPLIVNfjIDyYFRPo0waJCliUU"
"hTJ87//GOOM/W2QHO+5dqdvz/o+VNb4JebSqQMK4s0JAHqqBVLt7WifbuoF81bnz39Qy9"
"ZZvsLmczev32z/jNXURJDO348Z+Fo6oHoG7D07o16qFzWy7Ynd2hpkbSP06AcrQskgWHQ"
"moyXBklYugj18gOve3LyIxvb2n75ejnHmlJSczi7yk1iCNh8r65bOJBKmWahIORYzrOSL"
"bQQkijjAVIq3k0zWOl5hZ14zOX+fuGTNuM49wBqeIzT8uahMqP68pXfWV2cev3drjf5Hw"
"ulq2/OthsWrGowZkDnwzgzoLhUwjqydeGfAz5kQHeWRpjL1mTK36rrpe8dHfuXD33+9fd"
"/fHX28d98csv7r3EToeR2/7HyATji/IjnHy4zYdQgfJLnZtXLetFC2CgOTwmLBhxYnFcX"
"Cs5mvLkeSqhxTlOtfePQ0hwtLOuFtMi7rCQFA4M7QDy8YAko4dQTqXXIHMmxu+3IjRV4Y"
"+VUM5+cFx/HjvViH9c0MqsbN/687XOvP/Mr4+MDL5uZkY8mssV/B0nNmhi3vckxz7GLyD"
"zik0npcBtBlzIlPYbFTBHgZY7mTY17zvhY0dPMqZWJtrsfuDnxxZePv/5zP4rLcZ7I+8c"
"C+Tw+s55NLH0qCpB+yPFQmh6SIXjYvHipJ2wPPovHwdBC+OBv9fpMNQLg8JnMz/cHlc+x"
"1hwMa8auaWge/Gp45q7XlFb3nGkiFOyhunurv5rrkEvUL2eNHJhTujoe5C2hX8JC/vxrz"
"7znlbuf+mqybew3jcTU1snxkluY9FwfDAqdca+Hm0jAeu6kDep9Zi52vic7ENeBmZmua/"
"jjI7ajm1NdZvbmH79w+sf+MgjeSB/W+1CUrBFM66iA99TgX+CY7O62UcsRECtJ5yTh6ZC"
"cxtEz8KX7BvZ/IGkXQNLCIzXfq9aUnMgHkTRj10TJk8ntPpXNam6ago/S6hPGEM5c7Q+O"
"a6mfiQLJvKeLUoBJ3MdghF6JUtXXLn181WdP/8jHjczox62Us3N8pIjIJhaf1akWwL/aI"
"gbAvQw8MeE6hjc+VvAynWPvfeH1D30aOzeqU/d5oWS31LFWfv8BgXI7KG72vKkVLOsFBh"
"vnNFQO2owrCRB3CLq+sG4l+VWjgAQdUSIFLU0KizPy4M+ioxmgN/RtUzCssKzX1679X5u"
"A1p22LU6DysE+TYSQP0IijHl2dVaDZvn6wcFfRVYCVoPuncYLleCHDvW7Xzn/oaduTH76"
"5XT7+HsmJ0u+XYQTmw5GBUkpOvgQjA2z7cjdkp3pHM999vQPC98CSnY8dkT3nIV7Ghh4Q"
"Yy/YF9/lMYCcALqD+r2/IWhu/9XSuNpbBXceBonYAl+btDqjrW1LQhehEs2VtzRvJI4my"
"bu+1Gp2qfyrmmPb/T8whrlk/YBq/UgQvAB4fGO8vXrLauwhbNGZ0826pDIrE6c/YXvHrF"
"PfdFKT20bH3VsiEMkREpVcTQ4muiJsWHHyXQWv/szr73vKB8S+njF8cAH+3x+XGaugGNx"
"d6AV8XOEPPnBhy3xMydCZBllP43kWJCSbZsAFHeMjv6NqMYUVswmaCq1pmBY4a5Z0u4+l"
"m2HYCB9hpTcATi59QtkZdYGzW9vt6A4HRV6LFp4jgc5izqkz51+f68dXPtb3SikiwWNoe"
"JJ4C5WvMn+dRN6LWjfx3/9M6d/5K2EhdJeHQhfz+ekP5nrj0HXiSfGOtraR0T2EMYQ8n2"
"j4US9y8A0tSy0LTsJTm/vQSUx1xQMKyzr5XqFbniNQDlokhSVayERMoawTkRIovItPA9e"
"hyKn0ebNGbgt9Lv9Z37x7Y5/5xhS4Gquw2MRlen1agHWou6kMtSxTf5XPpXSXtxHQ/iX0"
"aiJxwQp37NRJYc8WkiU9Rp4xc/hcZBqA+o6GytdCZABie61t0MScIZEkV5Nm9/iXPEgY7"
"iwKRjWofKuiSrPT7LyMf5TkvsLIgRGqXSvFxHCS51JK7BKp0T1k927P136yvkP7yl51/v"
"MhKP5rkjLWg/p5j7yBESJqQnPS2f9t3zutR/7Yf7Yrx2K9Xx2QJOqg1du/OkGXyuhQEmd"
"to37Rr74B7HL4LIkNhoyLDUEQXJ6ZLXQ9TLDmtvivPjo4r1CeYYldmW5ayJ8sATLD3Woj"
"RagZ0/KNBHCEUBYCOsEJPAD/YMgtF03bnywjZCN2Wf+LN3mrHBsDZqJekpWs/ACZurhjG"
"b/FH+h5Cfmc9Zl0XzR1f8xuZG5xe2BbrexZiV2DjU3N0BG/RX9sFRpMpnfuGBYkEuVtBQ"
"qz7D6NBF5r71y449gIXS6HZEHSlExn0QIGYJEKPb2+lCiVJgGpe2PPPJI22dP/+yvZVcU"
"3zwJx048PlkfEOZ9ilmYRGVuy+75l3NHD/Gq/v54pSw+Y7x4eX+mDbscjjn4qBBLIHTlB"
"gIRus760kr49NmvENWF9R3J/EZGPr5KXoAjtmJNeYYV7pqBU9qKGJF24Vej6K5JLtUIIu"
"RR1IDf56XBwm/Y3t1fmhwvgMz0WI9fFdIxloEBXRbCfYKJd/KeMGKhwvuruiwsUIL4IQS"
"EI4ZQzXhnsZlxy6WuUxmOAFBQphDHwuIm172wXiJevWR+dddtVEWBMy6edG48mmkztLEx"
"tct6hYGsM0CP+y1iaEQ4kDXinftRX5tE1SY6qNfmCBo1sNCCI50zmYfzFh5fwcGEXxZ1b"
"1E+C0wbXcpAbscbPsjSJCqX9aKkwM0tUiQsCaGsBO0HaUSKejLH+2sqxhQqL2GFuyaUIb"
"sgYWFHipbQlzTHD9wsiRBaI35fJ0qkdMUj6ISra9fGRnxEhIml+gBoDfsI8Awa6zxvbPu"
"ZwedWS0CORi5YhAVK7gRnO6GL2WbbiCFUNSQHSKGeU+g660QnFRAApWEvnfY1zx7bxeuj"
"TApZwfMrukRpCWvmrun6o48bdOoGw1Jnju/hmFLOfUQY+ZK896yZ70Jt3p0itKS+aSRQq"
"Ucl/BAaHadT27+rnb/xLwIrR2cOIKL3YYGSKxf+cb0XTG4VGjwF9VdEAOdHxBDiA98LpE"
"SEh6V0AwlYhHpB1pqZFDIEeSldR3av0hJWuGsGwekOVGna4sDsBbOrcjCHMyrKetWRCEN"
"iL0G1POzokLTUYlb3USlOg15bMTZe2lV2dHRNZ7dh+WlIWUxDoAovuB8VwEKMFZXue1Z1"
"H1j2i9WYit28D5sheFhsU1YdaOWrlVv8M0cR7ppfu/4PG32tuE34AcosAjMvU+I95zUkw"
"roBhGfSW3oczIohhXyvWiP/Rg4tLILgjtYxMUX4jmj5yFfBmQHp6Fj0bu2mkl8PDDo1KI"
"gRKVVRfyXmK3JMLIUCmBSSBTv8nbdvf2iT7Cn64/tSIFSaYYW7JhwAd5mml6KHZHgEWsq"
"g47iXdBcSYb1okLjgUXTUjmNEEfUJTmUhYTkW56V3bvykYFhxcJEXygVKQCIHfR9KM0UJ"
"hbRBRpXG0V20OJBR89QxKSRvLq1JaD6z2KKpZSlUmmF1lHfNgju4L5UVUw3HQ/V2zVlEK"
"Gc61r98JmmdySKnUGSBE8nvlGsQrgzpHTlG2JiNFIBHCiqOLXofUqKwf8cd2a3aMYZwzW"
"xEB6XxSJEw8wG1v6eCGCE6ll7yb4rY1HLZr9p7jPhOpRlWWNYLiqv9zBxJBVbE44+sO+6"
"a9SZCYoP6K5FhRlHMyOA+Cjz6dSK7L5Yd+6gY/XDw8koIdDscWAj5wMgmN8KOhHEGkClm"
"IZwxQh1B0BSz/J38sqdHZr+YcUFD3yprJeSuiSZ2TThD7jMYeq8kCcpjWb2S9s2kFqKDD"
"IvKGinEzPxVjfdgIHCJglhspF8lRF3IJhE1ZCETPH/xX2EhnFgfwMUD4kvkz1kq3ASIUh"
"VppVFlvSoZg8hy4U+K2FRcL9ZgJffV4xqFJSy5a44GX14d6MZWWdZLvV0zJMKZZb3qMXH"
"hM4o4DnIRKLc6JUwCNBHT57t3Qpijfp1mgubIvmTKSMLSBfOguhbCtOUw26GqDXosxKZq"
"/pYgOImssVrAwrqqAKsMILMRIpV9Z658FUn7JjaImGcFd03CzVUpFO6zBxHbN6R3Hi+KD"
"PVXtGG6oL/SjFJBKyYSm84SzKFcF9EVaQvDfSbtOzutJI6DKFCChyjHEiBtCqg603txeE"
"hB8KTwohqYjCnkFDndIyN9QvF+5Ig6indlGVYYFuAH4/uxa1rCr0bRXZN0WG8LIVe8YFi"
"gedVInrCJBhWuCeUe0DPRlknfEN/JoszlC6J5OXVU7BkwPASPq1qghA7POuI9TT0RJMwd"
"x7HNFUwhZtEhRqXGEJ1AMwy/zXX9HYQsXIsqQKkswwp3zZJ9G2W9HCxKVXdNaaaeTtpXh"
"1klhZNJMd2T7StsIQR8JrLUmUb2wjdu+vAwUdPbeyxSmZC6znxexhCWvLEdvoiOV4+FU7"
"pCUDZcGkw30BOfwBY3nEgQTuVCzcqWQmw07o0DnDOVmqoMa7oYJs7QjwkiVGwfCieR+6O"
"wEBKTdYSR2jwH0pVDW4R663MaPSa0y0gxeAVnIB+MhcHPEWPpqBj9ufH/sQ4+E7sYQwiE"
"KEjXGDckrITRMbGq7al/Qm2MG8kkbV6qSVhi6jBHvmaY5l5+QkxhpJuMeEKNfxScWE4h8"
"AUGz93T9ka6Vd01iXOuvmkLYY2TUMttXKV0ZyAlqcqv5CyiBpi15gzHeOBI9KD2lbOMjo"
"3e2uD6413IsIonKalwDxIJFNbWjRtrO3peC7QV1xgBQGD5R6UGGANWzPX88YOECxuiiKh"
"XAUYlGVZY1uvMVSj9Am2nXYJfiA55WrEmlgZgYiBrI8zUwkKoHLnfmyShvYJmCQvgNL8d"
"HuiJfA67y2W9iqVbB9NZy4JeT0VNNodPAwTSIq+4yA9AxGkTVR/QlJtByAtGSZTS07aPj"
"HxMqWR+kRMQZ2DpTebmngqGNrrBBMp6kTVAWaNgozBYb4W7wAaei0Lx0Yss0eEYEPpmcd"
"LUOpIbL7LbPePvjHxxhgVKnGBih2Ei/ZCiSfs0xJVZVprs+xRx4RtrztsousZ3/KxWC1C"
"nEFo3FqMt3BJlv1QJ0VEQWZo2XdbLu/toJosSVhrtv+pqasiwBDeNfDnOT8a029OlQV2s"
"wGOaYqemD2czm85xJLmcVI7PP6rqfzlxQvYZoEAJzjHKIgR8FHnBdC1prXuNo9SDxKmiT"
"F5BMauOlFMRjkX5OCTzSwbGuKjGpGlqlP1SkmFN75reVLduFrErWaBE1eZUQkQ1BANZBX"
"R1kAH5HE4ale2OyhZCoMeETtkyO6/sXfvuu3KZHIl0EmdaCF1/aquHonp1mIKKVvzMi0T"
"SycA3XdtCBVtTMCzL6r7sealbCRRfo0Fi5vVqvNd9JvPz9YndhGdgIK8EapVkWIfKZb08"
"v/QkvaTxnxLIepCQuProSpOs9x6JZ7IWh+IWQj+ZSlACPIcFWYzDQtjXd1jQ78lbf4Ic5"
"P5OBwhBYhnlaJoOWBZdGILkzbXtj14iHa1a9QPXQNjXaCmkdelB2lLhs8iNFXiPE5aeHj"
"VCdNSbXFgGsU1yDi3Xn9ziChd39Q4+ID8Aeb+FkN/Vo/E51F8pSeUhAgKIf4GJgPAucRw"
"8cCAG4adXPsx1Jze6fmEVNzf1KAUwQn9FCyGcRq9vXvO2QUINJu4ZRscp6Twqx6HSX+JR"
"OuEWuiW8tBQ2XnBQjmGFZb1ODf4FwgLcbu6aaPXiBVXRDBkGHUYpZdWTefBZJcYQ4o2Si"
"AF80NmA5qncM2TQc1cuclDDGMKJycGD2TaT8fHkkpE/pyqimOdigw60esdJHv+Ow6jMy+"
"A4cNYw8DZq17R5YKjua4bocO0F3Tdv/sYjvDeUaKvrJ9qrlWNYYVkvxyls9bRCh6KOy2I"
"WyDBYDJN6rHoxLK5GhuQUlDZDgFvRQjhlae1paSHM5X4ychQNDfWLPl3EvWmGKFCijL/Q"
"/csUITlGGv9MIW1uPvcdVCJoprn264UChoAIJnyMHD/3w1DdJ9A2gqAJkr02mfRETGFvW"
"aKtrqdor1aOYYXDm/KvcdekFIHQcTVKVoWw8VVQF7jHtIVw5o8xveczybBoIRQ5sGJ6zp"
"K7JbeysAYDYzSRWC0WaV9f5EGE+uFeaSH0g8knOCFCub1k4CPvgNNm2piwhNn1dfZ+1Xq"
"E08gj14WSbfhwIGXjdQq1QFgK2yC52vbNRwmYCjGFyjGssKyX73ko61VUlQgFYXFbrGcM"
"IR9K3UJoISTVK0blBFEAZVo49+grLu9f+y6RVuZU78FIQRV6aqnrTHi+A10nszQoqMEiI"
"4LA6TspL6mZbxA966a2Cz1HMvmWS7pmXU4mBf+KFD98zhIbgIKxwPJglQ52si8Vyn4pxb"
"AohuaxOxM5CLN4nEcuRXdNoT8ygb0U/hHOurTyc6haoLSu4PKUaAA3SSSSDE44D8kBpY4"
"4r/mIsXRUrPJXr39sva+Vdtsl4VqiFD2XaSJIMHYrMK9uW/HON/jdyZMHcKDXtJUrv2tY"
"hyLeojRaPyrioytqoC9E6CAzljYVJvNr+JFbqQk+ehTsCi0IbrV7gbPFsQWtKwVjONPkq"
"knD9S2DGqX6NSIoDMkRyKrfoyt+EhXuOA5qKWvN67zpmHQdixRP/f3CHU0ruvZWlKXvFE"
"kCFVS40+vZws5mmdnrq1btGCE+ent7ufDJpcDL01+3oJDH9hMpftj30psukvkhbcO2IDi"
"bIgMjxI1sSjGDAwdkorBXrv6PDX5Q3MHwACBIKRg5WUzo4geFYHX7k0Y6uQ2l4qewQusA"
"JuiaGFHepQF7sqGnNCMwRRhK98AzkSOno+O64NcF+/r+TJtILOWKqeEEKdS4wq0Eshhqq"
"a8RrGM0KuOYOFDGiaGZ5+Fei01avdAzMFWU/SLF2TtHRj4uQnQabSmMnJCWQitdXdL9H0"
"LoLt1w0oi0oG1cwcZE/ZOoZ7wLZunNI3wPIYIzG2sjKijPkWHJQP9YH1dj50K8MqcmDb+"
"zbZtQuI+Pb4wcN8+Py+IIEGCg67RV3NcE/pBVFCSc0pKJzAV+ETLvECeGtuJrhQLldfUs"
"hYAJyfzgiqsHHVBCbCf84Rrl+0Y0pRhWuGtO2df2prNAF/AFLq8cy/KhYetoWw9v86ljh"
"ua92ta2BnOHRNgxNq54IsIFbTNpH99HzgWigR8hOVAl66nhtN4pFmkuJ/WS0XQve8mXoy"
"Fcf1SUo1KPSiSc4FbCQgjG9FV+EzKqXO6kmD6kZL1cKpkl5CJR0FJIrOpuR4euwVIokvn"
"lcrJgrRxd/f8qxbA+cuE5bjVUP+5HWA6wpRR407NDBY3vwXPZ2vBFzci8ZllJQj39e1xv"
"KKirA7YAAEAASURBVG1KCyHIiBxLwQawEIbCqjDt57d3fedtCeKRSJEjiiJgEtCS+LfFd"
"XgaVJBl6QbEE1/3nHSpPb36CoEcKjMqBBMLnKxZ872XYJy4lGKNOOi4eY1ajYSGGE1dJv"
"NDVGFDYVSGI4Dw7hXD9Ib34bOKJIjJo5nah5k6g2wJyZPQ05zW9TZ8z3mMn4vQ/0pYCPE"
"0FRuOaEEymYGF0HwDIoObhx6EOpsoYT1QLoowcOH310OXuIMOjpiU+JFf5SCQaxjxlPBk"
"D7QrB9a/4yJvP1VmVJoGzKDp+lNTWpC+YkIxT5JXrnEHAs0FiiTzU4ZhhUn7hoM3VgI9W"
"xUuhulLM7V1+c2bv/OqZ7S/ViySzKiDiJfkuCJVLuslF5uPdapraWutCMnBOSJyRhI6XP"
"tGaQc8nNo9MCwIXJE/R46n9r9ko2REycSKS7q+cYo9HdGktAn2Ghw7Ji2Fmm5+TVoKa39"
"WbHcCtSyxh01n29DQ30KXhUFp+YbhWiGGJS2EVwb71yPoeZNLKZ90qFqD6Gchb4pltl8l"
"aAhgvVQspqGD4IKJVpKYOXQiIlS4z/xepfeUpGCvhz0gQ1OYSIvc1Z+LfA5Dj+sJ5+reL"
"IRbkAn1h5E/Z8m4xf5lWSJsUDBvMqiZ0mZXOb7SNLPnXFccCaHJIkNQqelgWJSyiqhT+G"
"/IisHWuLJfyjCskAg9d2KflQwSwmNNwV2Ty4OJ6Qwj8TKnrlDIXcb2czlF0V/sPnyJp/H"
"QqXKWUTAO6JADozChuR2ZXYJhDcUQQxhGQxi6tZeRW9gmFFvkPOpB3EbSPt+1IG2uEdbS"
"kEGF1JHLyXe+n361iGyMuAfnwvBXVV4D+GL5WiplpCy9JIpShGu1ERAqw7DCsl6T9o1dy"
"RTFK1PJXVM6RaYQyNomiHDdunUTutZx3hLZNeMhN9IwxQf1LYSI4gUeLDNzN5veKJTMCP"
"GPlq7BFfNlKcTxRg5Q18miCdE+ZOm9UTmAOROphv3AFBLW7F5lUdlkcu0Vz0uOSGEsPil"
"99vMr+gY2Ad3NZLg1jItkfogqrOjGOC5ShmEdllprrHj/cc8Hw1KOBAX6ee4zSxBzDL1N"
"mKnFt7r/KkNRAHRsUFOlHFoIVfXBgvIKTpJwudBTZzd3fpOIIYy6DmFeRkMEt4Lj7WAJ2"
"xycV4B0Zej43iJl1WuIfl5mdG12h2Des6VNWaNxxYoP4vfgagIOpnHS0D3YqntHqubGEP"
"iOSOYXpqWurpdorlZiooEMOhaJxY5dU+GyXpIIdT87viaz6XI4BYax6lTcBQUoYZUgc8b"
"q7BUOqMZXnswsK4Xae22X2EU+DgthORri7Ll/e8TzJ7e5IuZZCKA1Qh3bbYghpJpAv75r"
"3fcLfPRqvTzVTzfqs8AH8KK7ht5+rqx4j23Tm35w1W8Q4Is8T4Ff2MVb83moUwF31d1Ec"
"IMSDCu0EL5x6+/XAxEo68VimOqV9eI+QyIElV0BEV4GZZUnTYelULwVW2QE8zJnF8rHEA"
"If3HsSescrHMDT/dFLPqGntWUVdiFbZ1ZYCPlQ9RqiITQw7xVMEU0dBxnTLGY0MKAJ5ac"
"XeKdlELTcuNUajo4QHfBaXdsZJvNrlKVQCYYVFsMcnRrc4Abj6zxk08QJUVEiROUTcwV9"
"jJw+SBAkrFTqTRehg7iZxKkQ39+3i0ZBeAIbIHVRJSeKDmPog4sRWg5d87JaKtF+no/o6"
"Hgm8jk80yE9rafs27vTWW7zuppuadjVEokU0SBqMj77omRM/GJm6+mBHIpmWStfK5WILv"
"ViZwFTOZlfaV0y6TS07JcSDCsshunpwwfTGdiZqN/D0p85sUq8x5nHohu3rkkl6qmDlKi"
"0trbvvAlwrzFvN+h01i4aBeyhS4OCWBHDw6BhIcROXAym2qxNYpFe6Pn2yJn3qgvS0xoy"
"1QFf6bJevuHYupZNyLJee8qMaTYtyBAdqOFep6UQLVYpffbzK/qGyfy8bNYyvXIyv4GBV"
"Q3hHQ156IMoGu+RgawFd6TbsFjWy8TMxbLuH3x0VZ/hpGh4DgorGGuFyb678FayJwgXDP"
"jNfD2OggLEAjk3E1cwhpATph5mCBSC5pC0L2l13t61/onr+AZe3aciBZW4PnxYxiV6/tg"
"eUYeQD1KsYb6wtQUmaQWHZJFiJ6eFjOlBYGWIThBsu+L7yZsKl/2CxAg9VjmZX095zT44"
"mrg/K8GwDh0tE2Fgo6wXUjTINRr32KvqXxChSHUL6AxLpE3pERJEr8ChoZtnobFAnxHrU"
"4gMPJyVw+nWoKqEBZuJl0xy/AxVknmfQq/uqhC94MV58u5Aelzr212Hgrjg5wveVe8fwV"
"dhLYVSw0veXt2x74p8/nwZV48Ipr527U8N4rbrapf9gg0/mBTJ/I6W12y9cdtwhgURRde"
"E1YHn5ImtwqVBQYYFToF0sdw400NrEruuyomiBDHERQQ21fHVEsx4+CJakR698wGskhP5"
"+UoOIpK/zEtpmkk4Sa56gx0+6NUdxUP6yh7Wlwqvr/P8QjkaQkUWHvhUD2ATu75z9fcIa"
"RNBOYIxzcYDPUyllG5o7Sj7NfsKRb5BqhlsEBAFsWYNaSmMeHOuYKANZ1hhWa8zd/o2wm"
"jf7UCU4PxVAHtdL8EkwUKY4IZ+bdemd00TYX9/TsBhmqmLTBMist1iq4wSOHZG9QYIWz1"
"xojxQsHJ4aTOyJCkshKdiLOtVdIb3Wgkj7VHkjFqijWjiDOQUtIyO16AuwCqff9pA6Zxe"
"sQ6hJjpnGCaoP1LyiWhEGsp+kQDt7rt3j2KtsslCtvJ9ff42nGH1atITuliY2Oz6EyiGC"
"ZagHLuSkwGPX5ipBRFypVB3pYVOdLb9zbQcXkkyf7dgLdFMIFFBRiVCchTFC2YMEHqGa6"
"e1tpSUsJ7WctEgYEYvJ8rvS/bIzlSGoSzSXWDGJYq8ZdAzMq7qsqwXsowumLECm56YWUt"
"b83UG0mO+o5XSI8AKaV2W/fJXQKO6NYIua+qi4QwrjEsquTcOZNstzYPpB5uOgkuTRJgg"
"ZELhTiIExoMjR/JiO1y//t9PoujXZYbolHfNmiZk5k3smIhgWS+1YwhhIYTFAU6cE6a+R"
"SiZ7+V9mjmiJb7P9XOjQHID7VEIvMD+EvuL4XbMFwEzHaQWRlkvYU2uNAAcdsU3SiUNZb"
"/ml8hiALmiLnkMBMq9jg6kB3eGRDK/cO1W1EFEFzWcYYXFMG2vsCvQYSFE0jMgRjWGJYi"
"QlVkyyXX3ESGZE+AVzn+6oX89kWBeo+hkeiJC+bJeXKHQ7yXMzsF9699xi7TZO533KRpK"
"JU3ky2o8x5vYxTLqSsYQcrgwGrt2Av56UsLSFpE2c7mcYMSJxP/0BjyyIKXzlBG9P18EM"
"4GU5QBV10UQNMp+Cbgj6LfiLhrKsEiEoZka1geU9eKWGd1irxgLi11ItwXQkGubqHqkn5"
"eX52bclS+/zyBNCCWsCJ1ewbGUL+sFFp1kWa9AP4uFxrxPgPoIJzOydrScg+nkld9ZDQr"
"ZKXSdKqakxeQnEE+p6dbV9Su/9SIRkMvBIrFgk1L6ypXvHUaEx6C6Zb8gZGGN+v6EKKxa"
"3qy5p9atNZRhlWV6IiHt+MWtrmNj4HqDYZoT94HFXU9LXN2y6ulLvGImEfb394ubEPQLH"
"QTcMiLUQZAaGJJDx9G6UoYYUaV/CBz80xJr4dohLIQL6mwq7XXmdQf6ZA6mscLkI55WXO"
"8x2EXBiuA8ELLqdUJPD27sfKqiFNHlhS/sg4aWKSfzU2/jBl3DGdalWmR7EBxD8LkWhKX"
"5Zs5VnO8byhzCkkGv3/yLDUFgw0IoVqVy61JkIQARggwHt6x4y105IXJX5PuhIZkmJJFY"
"ccVxrFGkCcEYoiE4YETor/iqaoOFEGsOFlRdF6XYh7t7Iqerrl5ZUck17u5LJs2U0HUqR"
"yliU4H7C6RsIy1w0ftA0r755jAs+wWm8EbUnjHzPbPa7zHJus1g86C0bWzoJZHMLyzNV2"
"1ftV4fOWFVA0gYyFpymeq22E43DxE9W00ndbmWZb0wW0ZW1pYTRMi9VLZTp6RT4IoV33U"
"Fs3kNCwqLd+kFBbgew5AcVVPKSMbsmcUpS8umtojj8p7xd07jJsTRUl/PDMgYwpJX6E6k"
"oL9C2AEWt3Isi/5orMlomSmBi2/vrqwm4/j4GYkzY8VXpZQeUC8aOR6XMg84CbHsFytZZ"
"2y9KPRYveWNZCn9VnNvQxlWWNZr0r6CYpikPTUDWSURJpG0z7pA5HY/QIShpRAFBbD/ZM"
"6SuWExLbkRI7QQql7WizoXU7OG015WJDXMLaqzqR41z/QMYDtjs6HrxItyrAogyWk3bSg"
"dE0GH2Nz2VFiTMZeTUrrl61dKJcNR0VIosW54WSbz8yZ2czYGyhsJ39ejNZRhXQjLemnB"
"Hs8vggYbCs6c+CYRYqOzStBNJfXV99WWC28QhCr1VrjUe52FB5baxJrEoqTCnf/wDFUbw"
"lAweKPjIhxqhYUwaoU7d3aMXyiuPd/eTguhio1BG9C5G14p5ZjJjkuEsXL3Dimlu8b/fB"
"F+pJelP99iyvrGYAHzgZUaiHqQF8rB6PWCZOkrq0ZIKc6HFkLblaluVdw1uZtjsaC2XML"
"OJMzLHO5cRBg6/+lm5ylbpgmJxPmPMYSUshTmV6gilAaOzAvgKjZCNiJXuIf50mghRBa5"
"XS79PDAnNZJenLf5VAdguq5uS7/9Ih90r6zXYo89wj1KW7fuEFJuJ67EmXJ7MUgW+p0bJ"
"4POA10m88Marmux44YxrHICMDDrW+2+74pUt6B6BYkQZuoU0WRc2ph55yVO5lxEmMvxF6"
"hLTaQJQbIzDGXJY2EHwkKI1yV3RuDiaBQr8H/SWiX80w4ciR7UMIZwzLv7iK85az1ycOr"
"5lWtI0QDpOmG2X+rq2jdO8CoNAAe5SMEF9+BA+XVly36BFHnkxcaxc3QUG4ho+brNRcMY"
"Vpi079VbnwIRFrZIM3X0xC4RuoS/ICN4cSMkJ3t13bqDE+xpbiLMiXOK72++jMzmN5gmB"
"HMrdk2+q7aRAngzPdzVUr3eG4lYZPA5QxVCHBEM4eFeqVf3vV4Wf9fVLwPMC4XbB1JpM+"
"lzTyeXVK0BLLo0YLcSzDu/SEjOg+BDShfrERWZznueMNxQjqyZhh7sP4rPPJ3LEB1nneu"
"OCUthPct+NYxhdfV/TBBcYE/tMUxP6UBWKtF9FLvkhM9PhPgFbe3aXxrEYgLDEifCJRGb"
"sBDy9KPe0uRQuZJQhDAwC5OB15naKkKWhsrKY3FBRH/CPn3D324luC8sHFAc0WOr6gZzR"
"FnT8MFokJFW4OLpMgOqtKNQSkcl8a8VsVOR/6nFrjgSmGcRPpfJIE7Nu3mQ39QzRKdhDI"
"sDZRstDe5mICt8BihLqLc0USAVyUK0pJG9QHjnI0JBsNM7avYURXpIIFxdVTdyOSJClPV"
"iMDjeL4nzVQ1BxTeIsl6mnhm29FWXxV0RV/Vin6eO9onh+15R5GICPpRDB2U+wAXHSnil"
"+YaQsCrG4vSF0lIYBNmrrmuNq1r2CyOFrg65JTS/m6AjRKdu89EwhnXiRL9YzDDfHoTlh"
"341dRv0NH0s/gbUp5k28lyZZvu9sl7z3Dcw0EOxCvMZiDQhVErU2ihVUVXATKOq+mAJh1"
"qEoRh6+uzuDd85xLFGXdYLKNSZe4l9wwtoKy2E/I6f1WoQNZkvzU2OdCa2XyJss8t6LQa"
"xLPvV1fVfLmKMV6nAx6hrJ6LFHlf772XSnnpTuYuaNuZaHt8QhoXRThNhyb4rUt2qGMhK"
"ziNqy/mpsZS15hoRvBAR9vR0COLS9ZWvlhAoDWKTDKyGmeGKDC2ENdxel1sEN0fSPstIi"
"wWan5Ywo3x8XjCnfzr3wXVB4Oy2YdCAGqUhdLvwqFiTESEOun79sa3vu8xrHyzrtfD9oD"
"apeMeL7hh65g2qItCUY1jcMJjMD9XZt2EtwyoMrsoDcR1agyb+qBjc2NhX1sAps1uW9VK"
"TCMUu5wdXv2HzT17kfCxMhFKkB91dKBQwq9KxrHqCK99RRAy12jGExAglrPbYynqFFkIY"
"ZVACzhcWQmGW5KNVauDeVAOgIjhSRCNPF6cfhFADiGKTA+2cZOYPtFr6qOGx1dwiy35hA"
"9lz586vbZB3yjVdTS+1XNsQhhUS4YXhr673/IkNnksW7TcElkWQBgmLpdc73hBEKDbBhY"
"hQOv+lUgcvwcEBITrky9XrsUCs0yE5grMvAmQjfuZi9JG6SfMycGnInCcMcZT1Gi5XZyn"
"4sBBmjAROhDx+qIgWWJNRNUnzhcL92YEekXKo+rnJy1uMjrOOzSWBNK7KNVn2CyralaDU"
"zRI8GZweN6gNQUZ3mQhdbepAIq0nEBQGIuQyVayJXdMk8zhJyI7TarNgOyJ2w87OH78NV"
"fn1paQJ4apUOcsojgA4Cmoo6+UXssmtoihHHGW99vTIGDtsHLAQgkoYJ6VggwrWZLKRpL"
"WyHJJTazzlSUFDvm+eoj8fmlBkKTZkqC81r70dyevcmyLVjKbVp+xXQxjWgPacwH/JGdp"
"hJR1qijAz6km+LOvlolRTOrla7JpwaliQbiB1sIy3wKmvZV6txfmPWCDnFhZCHAn5Xj3M"
"EA2YNcgQkD6HNmU2wJUj+rJe7POEJo0zvl94AotEIoc/qNWowDHsIkNWNBFPOVc0RCUg9"
"5WtrJ2J9cz8cVdRSyGGgvk3OCHGLo6rv/+5upBpQxjWYA8LpdKj13sSXu4Yu3rCFcATOm"
"WHXuueLiSI+WvLcTSiBU8/LZ3/8OmCTNdUvTKS6FDeQoiIoWQKHEszTq9c+S3DHP0Reql"
"F2KjIzZdjCMGshIUwwu6j64pJ+8Tx37q2NvP4G+y41oyroZU1veqXr+HULTJ/gBTVlCox"
"KUEw8QTHm8vJNc33cba6M6yZRGh7o9t8X/CuOMdYY9+ytpzmJ293ZndclZ1IHdVCHWLiR"
"DPNlSj7JeiMOK5qIZN9i7JeVd0ln1uvv+Tmhs40wB1igcqyXtWNczFYw3xpXzj7011Q8O"
"5y4eMEkVO93Q1ELTzcA+Pa3o3vqShp3/xjl8ODtO7BgRSB9OoNtww7cvhj7er6liD4VAq"
"bLLCQj52fxP6AByemr69PPPPG+D+ugzpil40MhrSnPHhd4z/L2nKQj649uvF9wqWhkiwE"
"5eSj8NvyLyFNiM00IWhVsR5eTP0Vj0AKIkZMDRgW3MM4lbKsV1cMZb0gpshnGRZCQILVy"
"MWEFwVpBfMkHIUNS0ZDQC1Q7ZzLkXIpcOGXdaWBf4ZhYVXud2FXcb8imR8p1dkxMvJPdQ"
"vRqTvD0nrlIX1o5AYthGtY1gtNyXUpasvpHa+D+CgqAcbFwcyVCwqsWvW2N3A9RHoWpSD"
"rqazxCbxaxBAu/rjKOo38KjDgwENlmKTWnll9QXafi/wpYQyh7QztT2dNixZCJV0aIPXR"
"yI0XoeucoRaoCSfTmT/0ladQRYdNRa4lYgqxIbd53uROAlmPEJ26M6yQCEve4KMgQlZ18"
"EH+Ci5NbHMwU4MURVCvrC1XyZEnL5iTrn/POHPAV1P2izcSEaKsV1nhTkJQrQFOOEmaGt"
"K8TCS09acJX61K5oXGFsYQYia2cyrQVHRLIzpk0j4jI/zRFhpTJb/lcvIq3QzOMPsopC4"
"F1wdh0mkpRDK/UZF9tJKxLfWaujOssKxXwZ3YYRgljBplvSDnL3UgEd8viRC7W8pYJxTu"
"lWYhoEh//Lgs+6UZ1qusoALdMXlRRY2IoIWQKZ84ORXfWFHv0VwEGOFzhG3faL92YEOvD"
"MmJuKwXIQ1jCFGWXtkYQu5oJhxGUZNxXDfNi4Q7ZLR8X1vLUaLX0ulHL+PIfb3sz6cgKf"
"AsAGkj0ESdwnqU/ao3w9IP90o/miAovolJuTnhtU1qvHdR8eSAYZmaJyrBaFqu4geGDpS"
"GkTwHfxrcB3Gp0oYrhYUQJKugtqY8igB5vekUpbGsV4E0K/JYVDrGCq4DaUyHb6H/LT70"
"V/yuglvrfAlwAWkTsbBXv7X7By/y4b2ajAmsHZC8WBPt7T92EyNWvuxXoBX3cayg19jz7"
"NeVYQlVjlAqwsnOn9zieSzBoeCyhM6KTopgOFfXtD2GXQ7sKicZLd8v1sKCAoaR/XrZUl"
"ixDoIrUvmkfQIBSNpnrJLHZaCKkuVieKnu97xgTi98/UcfCTR3t80sCFLLX103cV8NsCh"
"h6YaJkJynHDJVtCXhgrhEP2Jt6lrbSYQoslMhdcU9nGr6J4wlJAaAlNU9PPy78Hpnk/Mm"
"30f/t64MK0x1e+7uxzcgrB2BrMKloc4wLI5EkYVA1OoyB3dvOCyOPNVIEGFBAYTDXikWj"
"QKOT1h8lRExKT20EC4OaWOuIOtgWS9MnEij0j3wTORzGIZv4TGPYLGuooRFJDZmxAs8Fc"
"yF4VsI1hCqg+dqDsm5/xlh5g84lJ/X4aFbhd3m/o5i/RQYDiyFKNG3ySsMbpCPijdEJ3J"
"CWxg/BwTBTRUKm11/cqV0wVJOf4UhUC9BHU2bWJCytlw1EoT011qz5vsuYWVXXPaLyMGu"
"pUHPqqLcGU4tJsxH0j7NzyY2iOPyeIWVYcIOKnkdHrggaNPxR5Bl1EiCX1HCUI5hYboQk"
"oPSXkZSpB/aMy4zdlQyxoWuCTN/mNoaFOflNlZ75o+FnrOk3zAbNJqlUmZKM4fFsVDTZP"
"3IJfW7wM11ZVgDAy+I5xXd6wez7ZCjoV/GPwWJUBAgATtP3FVaW47XyiZ1ELr+lgLU9iJ"
"NCBnRYk0gBNdR8CSiKrhlsS5j+J3MnCZ8c9gLkhf4gFwVx+VKAdoTpurRElstGC6AG+WO"
"RBiLcEYrTfmuoaXE8XjpCvcQQzLzh6/7FwvQEmLjU48khE5R9zNpyIGat4uQx132SzCQE"
"EVxvz4/LuONSo6zE4VTldw0iAMwCrOEc1lCXyuq91ZaWy7EH3UQ9Pwu9/VauQJK+POcr2"
"ROuE9YBx3IMGReKjYyDoahmEbmXM+2Z2Ip68Vx57ScYFB+UJIxhCoio4wLXU9e2rD6TYJ"
"5z1WgpDbQpZSeTPZcBDleqTXzR23Pru4u+ppogS1CdHqm60dW10elV9eVYeXL8UZ+MPZE"
"oKJHjcQadrPAcG3LQz2Bi/yqFh+j0PNb17OvO45gP8T1okITk/bRrYHMS8VGjUUCFTbAU"
"lnWy4MsWbNX93zjgzSKrvOCYZl6ehMSxc13aWO/BzISyAqaMNv+9eC6D0xgdvXo4imPCF"
"pZufK9w1DoX1tK5o84kUQ6lXUi3R18Dj7DUhifmqduDCufz/McgbEECc93NruqWghFvmp"
"x5LnctfKtlzgJteyauRzvpKUxcUqURaogrxF5lEjah1e+V7PBqQz0mLA6hc4GDjgxgJoX"
"fQ4HL69MWCt22A6syWBhiuGDByLDw2HQ1Ns/SdiO9+dMQLnoplTJODBc6tnl+gxSNWX+q"
"OQ5EVwjyn4FgbtrcPBXu9hfGAMaQd+zuqgbwzpwRFoPTg3+xQZfK3YzkBUzqxoRQgaSZb"
"0MI3Vt5+q3jxJjc5f1moXLB76QOohE4pFLrmMN04EUw12QmPmjCMlZ8KoHHlPHjxgBVXG"
"oDINUwJomFO6VOtRWB+YRcbk5nkbsbxYmMgUlLGZoSGmGW0pdXpF4+tMEOFcOy6purPNf"
"jRAdsT4hYZ0HVeLC+CSX+aFY+BdsXiJEB+hYk077G3l1bzkGdOE7a/u1bgwrHIOrlbYFm"
"t3OYpjYjNRjWMCj8KvRLaG/qv3II50HV6z4jqtgzSj7RVRzvc/diAieksmwlJMlyiBjxg"
"JY8I3ClGdnrc3lkBzJmOceVc3fCjx1du6/4/nFK8yVvgDqan7Ikm6EH0M6k0S2ilX/75t"
"2/IcRRjeEx9gl9Tvj5lwu/NApyn7hU8X+fOGd8b+y7Jfmt7WZZqk0eJDPizOmsG4MKxzE"
"ZOny/kwb5Wb1asuJyWX5nsBiYQVhIZyvrNdihFAW6fHylGMYmdPlZH5zMix+SYY1M2nfY"
"v035HdghszcNFK3VnSsFxksas37tBD8xF0+L49Dum5eFzGdYFkL3VPP30C7vmH6iamx1P"
"iKxL6P8dlDQ70xwCc3A9OzLtu2VRSJGxaR0uuJh/KzxF5rmYj41K2d/C7Osl91Y1gncv1"
"Crg+8YA8kLKB94eNRAxDPR5LoTBtijml0iFQhS4MjJy2FvndOKk3nX3SUqhg/yLAcvo+B"
"+pc2FN5NJTMkRfx3Zufqw+K4XEnKnVoe/PQReRwyjew/g0WKh9fSTxz3YLv12trTkK46f"
"/9NO95/EYHx5uHDfZCNo21Hj0pL4er1vW+AJlD2a2EpPdqnV96b2Jxxua9NCkuheFv57V"
"VdWReGhYPQdObIkj/8KLWJHGRVkNblYpgHsVP4bmoqrXde4SNzuZ9cApw5CbXR+apd4tu"
"FCwow3x8zNXDLUrVBnwJDQlqkAZZJ++KZx5wm8Z4I0p+bmoC/HvIpAidLmItoMArrqJPJ"
"GpCurJPbE9/3IfZ6KuJMqyGkR47kxXghpU/Bn+9yNZk/wj7q88qiFODXvrE9CF5MgLlii"
"cej7qkLwzp6NC/WYBDcasfRf6tj26A8Nct6iUBW7GY927/vopzsXiEZ1jbxJ8MFdm6xmE"
"IiiFlGqeVSmWEROrB1IX0Od/fESD+HBd6/eXf+JUhYX85mEdxJ/8RGNmRSweATpSLiKFN"
"rf2LHjkPF40HeypddMKIGTS58mfkDJT8QU8gQoHg2iKXADpgQU0jCLe28fv0T5WR+R2Mh"
"4xgJ7h4KDhyQFsKXrv7dej+Y3I5UHILu712hzDvhxY2QnEu6vgOerQATVFN7kyJ9e/vuN"
"xzXvEXnP/Q3JwMkZxMWwtofFved2DR9q1SALd/ICgvhnp5aK8MsDioX6/HjOUpVwFnyj2"
"Cc5P+cjXATWLyTSK/QRSaCbFsCB9TsTx/a+XtfInyH9DyjNWJsedE3rNZnXY/LtYrMHzF"
"CdX/XlLBENo2ViYSznb/19/fHwlti6fT+wWhaV28YXzS5C4WBMog/ks48D17Y6M/Q0Qiv"
"9EA/SVDKea1qXiBHj8oBZbM/dxOM6sZ8zn9kiTwKqmwhBKtA0j5D871gzDJXiDCUHNUWM"
"bZc7riQqN6+7//568Kk/nIGCR/BvmJmEHMOCLMT+CtWJWEhtX7n7fv//L/jsGZAuRy7xI"
"eFLwDS9QyqiQt0q6XQk+gCeetuZyd2FH9ExBTmcshfGUOrC8PqGPikAH6qdGNPKgukB4a"
"aqhrdRwlui4UVhASh5fJLQjl1EFJdR6lKOv/xyDez8SORM9NC+MAlMy9v3Hsyc8b06amL"
"G3e89aYE5EisoGIRCCmLrwk988vMTwYNI+vBx8oo70OyfJbf3pmwJkeNZ9+x/xO/KH/PU"
"9yLdfx8zr3MH51XbDsxXk3mj/vGEfOHkK59XS8HQQ/EMkd1YVgXLkwDf8D3cR5c0jErHs"
"yDaZD4kKccoq2WEhIW5NolPUwSdK/AMdQfCGWRZ5oHOyU6aCFkTn9UdlC1+Qn4QyHy4vw"
"O/VAxn48+JGeugR861O9Suf/tB/70s76f+O0Vq1IM/ohdshGwIPQIQmTQuSppTo2bf/zv"
"9//VT/B7VoeJS281GwdSrdDV9Y1XIOQh+yhyFbHotpKNS6ggso/ijThCRw1m7AyL1oLDh"
"6WytOTe3UuRQ8UGIwAshJBo3dToys4NwkI4NBSFU+SQYEGBvuoVivQY/yyRnhcwQwPFTn"
"UbdSf0wWoXCvcDR3rrxlpPncoLzLxj/199cGJE+0wHNCX4wo4XVzoiQD2zvT1lFkaTv/u"
"OA3/1DJ937Fgvwm9knGO8zw97l2PX9e8s6Wb2LP35FF1CusNd19d3XL/+bFZu1vnIaSR2"
"hhUm7bt798UVgH67YwsJK/KBhNNb+yvKenH30vxrj617+ir7OXVK7m6193nvTsPwLhaLM"
"PwyN+ccjVlGSYhz/jjH9fX8isRHqy5jkD3fFfqrsJhIPeBAHKp/LOglo9ey7o53T47pL3"
"WuSCTxkUwrajaPrSNw4dycsMx2zSus++Db9//FL/DZlKzi8Ldi3/M1gXv4efF3GD1Qp7A"
"OS3Y+YBb+HgwLegPN2ZLJvLZOXhp9Mr/YRx9mjrw89dIjKLKy0WNZL1D/wmNvzK8kBsvo"
"RJ7yg2L3PnLkSASLISfE97Vrv/E8Dnxw/qMe6J6ymogIQ3LUxAoXSjkkZ8J3E+aq1zg70"
"eV9qmyuD+t9Hq1y3/L4bw2vTb3pbYWx1InOlckklghXyVIV8Zxn9BG4yZRutnWkLLfY8W"
"8rUo+99dDeP/ht0msezKq+ktU9vIRlvyy9E4H0Yulw3UZAm/eeEcE7YSmEY3HWtmUVnTC"
"6JYK+p7uInWGFO7HjD+9LpowkUt0C0fE4lU2PqqY32Mag0Qx0T0gQ0TlF5gVh6fr7RhBm"
"gjQhAuX3EZtgWGBrSnJx4rIckgP47mQ9SxyXNVlesiZM13oT9VlkWk/t/JXR1ft/9u2ly"
"dV/1JZtM9Jt8FXShV4LDqaomLzIYpa/w8IimJRgVDqcQa32zrTl2ZlzfmHtT3373j/55m"
"/c/itfziNGkByxfjqr2djJ5eR3umW+TrUCdFixr9vZUCz6DdCpe9ksXjRv56JX13hB7AM"
"fyvWLxVko3d6ZSOE4iIwc+MNRKdMADE9jyIGlo6xXl5AgwnxWSwVypkiP+sBfZ2gLJRb2"
"yz9EBJXtNpYPJ0P8gFelGlYsXRp0PfH6N+3/zTuErbd3qZVhahuhUMLjePgUYjS/bc8fv"
"j9p7H5n4Kx7KZPJmNl2FIw3A8Qq8D+hN3XwhkQ3/YpfRP0dHNGNZEYHk0pYlpXUnGLmK/"
"5U1zNrVr77iUN7P/pRWCaFRJc/pLm0UtYGbTR39fVJXWoQbLjk+8lblWT+iObJ1fbCZYQ"
"WOI/zJY6yX8Ixj53H1U4dDddg8BjO4PdWaVwPrKFfYhmMw3BwEEymNSFh1dDNArfk0H0/"
"HmKg7BdZlFRg8wYwNM0BCxd1CPmTos0wRKGFCwSvLH1y42lI4/GQ+iREUGhv2ZZ/HoaMT"
"//zhV///oJ9472BN/HN6UywyrSQWAKpqsSOwHVERKOxjgCrNJcKZtErmecC2/x80lr7N0"
"/v+Ui/ZEx/oB0PclZOO+7h81KPmpHgR24OurZ69S9dv3nz3Tfh8b4OOdYaykTnGhis4Jg"
"KbAnIjcXfqfrAR6Ax5AFz3VXdd7EyLAAPYKXPjO2O7bRE1Qn1ViVwiqBeHFO91M215q7L"
"RGE0FkI5Gf398hWlMb9aKk3ygwksTC8jFp3A/4qF+IYw33s1jeRL/CRDcgYaxrAIQ6hP4"
"hGxzFg+ga8/8cqVP9w0MnXycd/xdxa8oQNYPrBYmYHvuTrSOk+lrDWn4TV+ecOKnacf2/"
"AM6yqWx/Ff4Sics04gp5X0XleHTgEj+QAzu/q3bv2vJ62E8xjf40uhjCc+1GgBQnR4ZNV"
"3jY7+/OoVK373LqpNAZH5yJhrrAyrbCEMzg4e67o0+tmddgkbFshGillqoJhQ0AgmSq/7"
"+o09m3/wuqb9UPnIEw3Rhs5/UJFdLpW0KWalg/GB4qY4a4QWQiENqIOWEBIQW2CWCijMo"
"ftn+WVYICK8oJGvPCJyY+zXDpmH9H7vyS3vZ9obkfpmcbh+XGOmha7+vJ7LHSlLVP2L39"
"aAK2TZL/gz+t55ptsJAnjRKtbIVOnagNf1sBg+AvDAsKK1FMbKsEILYcEdXu/5k+tkam7"
"1FO6w2/lgIkhD0HEKu4PIxYRXCj0RNbpH9Glr1nzzxVs3n0fZL2N3sQhsYIIpZikdkgPp"
"EzmwDCRcHEvo7SJpX04UiOiPCDdL74Zzhl7AuDS9r6/X7OqSvm/Tlsw+WAjKaTBDIxB/O"
"4X0LYf1PCSsPG7nP3Vbj4jbHIDiYjWS+V0FoJSuotlQIxy1SOaXThupQvHWo+j3tYGBVV"
"TNRiaNx8qwusvATro3DqQyZmJySihvOAClGuVt00zgWOafJ2AbvqvH1PLT3vkRwCpFYlg"
"Ki7duHIbzn/b/t3ctwHFd5fme+9inVi9bluTYsWVLduxAEqLwKBC8TlKYSR/DlK5oM9BC"
"mCYlbaADocx0OmjVTttpIRAIBUISkiYpBYlJQ2GghQySeTUhUbETbMd2/I5tET+l1Ur7u"
"I9+/zl7beNIiXb37u6Rfc6Mdle7d+/9z7f3fvc//7OP7Hl0ulGwKAWN0mu66uQbohU7ZD"
"ygRbomhHzBqfhBzlfYSuarSzWXW3Ou94KUKMh9icofuu7uF30Kz/poZGMt9ClE9Yac0UO"
"z7+/fHehpXVPyyPSLtl5FO7tGN3nRvgC1luBOBgozKqI8RsTsKLX1CrYKAV1IxIkkMQhq"
"J4JHufD0SMb2ApTOmv4Q/GiVPjCXPIRYde3Z3JMupeRIyq2VTnFRfE8EMYdCG/fDgHEEp"
"ACpg1wFBAcCrRo8bfYNYo9jgV7zNb1OtghbMoSn3nKYBa5cCQfoAi23C6hE4Dn7Sb5K2n"
"q99rySfPK6bj5HEcEYFPPuUYVRmdt64dQTcjMmuuTUMSXntTG9lLYY5CdNc/PtJ2DCODZ"
"f5Y9GI0KXOLX9wjL9cpIF/5NzILALv2aERUKmSxHdBTu7Cl4avuxpNKCvOD5sVSaizxFp"
"dGRZ7LcO0Oe1qFPuHxe/5QszMzbcp6RUebBfnRf27m8kzzNdJLpt44mhsCaGbwOSR8RLQ"
"xIQgJZOC0Xc1SLPl3oEyDh5tP2i0Aanb2Liz3mKTpBtv2pGWL6QL048sgwBsL3ce0Coyz"
"bArHS30pl1rK/75uNCPHE3C1bUJFeNbTv6IjhA1OcGWZL9ihZY8gHDZ4+EcKbn0CXHsMO"
"csM4asoMFR+3ttRHwNm1K8usVD/s8HuwenOby2odf2BbQU3iKDk7qdgTnLqdvBdn2q2aE"
"BTWFj9O5M5c53mybIy5MKa9LeAhRRTPK7VcgD9Bq8GIiboir9CtXPngKAaS7qaEAQv88h"
"K1IulLmPx/IHAsQzXppWcdVh+idWmqf/IjqYV4EkknxETOaeeUP/EfXLz+vxCcyPIq2X0"
"1NuKrcDC81E2ROYc0Iy1865ItHN0ZjpoEqo6jtJeEdATYapJzgl9f308+dRvdePNXiJIA"
"ulyx5ZUPPUE4hzHpoIiTDSTaPDJDPArGG0OX56q5PZGlJ4gdszvMN9XYNEfADkGEjOpzP"
"6wVE8tPNVbYziO72CBPCA9PXEhx+HGIQ0NSMsI4fH+NAup69hunwEMKjgQs0eNWlOhSIQ"
"fUC1BxEQG+lXW2qbn+v+m3/hGOsaWx2hsKwPCTW1oQcX1WOBX+IG4yLOuKm1vxt+o7fem"
"vB31cbBopAElH4tMNly67fCy39pXCYbnq4qiQbPom6bv4aIdoIlzsIMWtGWFi3CpvN2V5"
"l0t0JaC2Gfh+4KItWETerQwLQZBC4zrkP/4Rj7LKf5QtsXzRCGToCpzm/0NA3mWOh1Eph"
"1jp0+bLXccLawgNGGyrUJX7wNCcnxgamdc06TOWQYL2QjrBwD+Z2LCgpl3vP3ldq+xWMs"
"lITwgLpg2Q5+ZsoiXy5LdrkyKZd0cnPlzz41Q+v6RnYR29sSdaOQGg5hfuhsWxZelpjoc"
"cjYUNDmXT5bpHAAWK5kUgIAbWJB3vaPngmjTy7RpZYod/mUh90TY2i3A3hgDTCX51f+UM"
"mbHBCl3IK7d5TPc9Qig5GOpDrvyaE5VcZffrIvWjrVegtws2JUaNj0a4rHGBWXrSPRY92"
"sCsytJfBABM155ZKeCOKWuSxzJRto90eloWSBQAiIRiB/1Y2Y57oaLkKLbaAS2k5Mvec1"
"Lv1QiCZTPNDwav9oiNt2y/EBXDjLGtxnMJqEjiotl81IpGNnE1DmrMShNXqUBxPKQCRhJ"
"dnoHUVL6hn8jrlVKyt1io246VRNH1F59e2wTr0eGebSUxOPlR5BjwkpF1FjPZ/ubrrwy+"
"LigjpwOwQ8kx08Unit/1ytPDzeR4TI2WRD7r+7UQC5e/dfKBtv2pCWOPjT/L9ZmYPb4g1"
"8TwUqisUiEoY6CnmUSF1qBJGE18Odiduq4uMY2NJjo9pRj/rzDpaSPcsCnEIdG4V7gx2h"
"2IUhfDy2dj/ber7wudoN1u2JBVZVYhn0F/zPW6GEUbbL2MawQM4Z+WzD/t2DthA1gsM1g"
"RyDtWEsL6bETmE0Kr6RLntmhymqnMBvzL5LI0C7lLMjXANa11md11IQ5RE0fSWpY8+DSv"
"RIyuWmCg1wOy6sOWroUa1oZhr5WdMLx5e9WEYInlJYmoC8WpfU5/VEwGRU7h06U0HYchC"
"2y8yvMvc9su5UqAzEkiUQE2YZDApljhFN4MSE3XhgLLPGLgF4CHEWrsYmm1uaj5MO6hvF"
"Ldok9URafmUmbPPRMKMWlc1cmkIi56roa65Fja6PvHWNelfkIGXyLVscNUXaohAml9QvO"
"0Xi7xIQc++NlPDg1ay61IXHXcVqsNGhKmlesN74ISFuzFP6sXZH8IfPIT8fG+48nAh4qj"
"h6FJTSsj4Ulv3ew/S5zu04Np6XXi8C/8nW9azz/ZbrO3Bg60R85PdCZ2SoMGjDWF4XASe"
"3dIaNvLZpq9uXv/5uyEvitrxxg4Xiq7+byACdH6AoBCWSZU/nBdKTU0aKNG8hxZ9CjVv1"
"bFjh0uewuqL+QVOWBsHhVDPHbl3mevleiiigbJd5p1Wgz6g2xTy5DTLTOxfyVbOkhiD2i"
"C/e9VLpP7+cc7mnZ0jX7Xy7uNL2y0Ttiw08KzroDnbzW1hKz/d/sRN6x+4nY6OuyJWhPL"
"ZRuqKjKQHgw2UnyIIQN5Z4A3puAe+rufuAqBBLJanIbUrruvZXto+iBSdwAmrlEIIbWG6"
"B/eAhAO1ATeFOl+DC4ATtylKesavzJN6g2vrtYBjlzahuyV1EqZ/r0gs/6Ax42yPxk302"
"qt1V2NfRrJZeS51Ui5klnzzhnVfeg99Qk1LVQqOj5F8z8mkkElH5Q9RQ13CkCHhZHPicf"
"QBYR4nrCCQDJywfBadmp1YH2sinuJF/qUiLK5WI9/ZsQ247lt3E5BBtfUq90ehTsKUY8i"
"W3Du1pjX+B2beOYXqrCHsR9w7y93hArbHjwFOZEXDdNGKPWbkMy1fumH9l/4IGhXvsExd"
"aRawG7VJgxDw234Z4eUHXNdA2y8SREptmGt9nlfkhnc/06Ma2AInLD+HUGf6eo9syJK46"
"88HiYyUuGj1Ii+VoG8//7NGvGZszCZ7VkfHI7tXxiM3GgX3eDRucE1LkEtgUtEJBEXdc+"
"MJ3dKcWNbNLb/1pivu/ws6AmlWiqwCw7pmO0qlRG5ee/vgUfyWE5ZFSjqd1XINMimQwQ1"
"Uuo4kK2V6VKW8BE1YbKCUQ5h3z2wkYZGkKR2QdDdC4xFctuFT7dHeQwTm8eN3NFTO664b"
"LxJp9az4+ta+ROv1Rt7Y1YJW7OQLBulTyEMV8iGSnuJL4AWMxnUzFo8axdm27y9pvfa65"
"LrPPITPeCt2RVZ0Jsg/SoZ33vaLsch2XgII2rFskuP616mYH0Jl1p46lWoR8lXnKQyUsO"
"AhpOJJ3oS3LY6ggdXFIrcpB3qMYH4U0VgBUB67ousDPKQhlUo1/Acn0roPpHXZZV/bdf3"
"St71Jn2GPNsUtFolj8QbCoqBO4EvLNSIv+ptr8M/AQSWS8mz0vNFjCcOMRCOsOBt9Riuu"
"GLhp/f03X7f8rhfoePQFlSc4F5TyvifafuEk8Ny9osntfKdD4+ZAXptS26/lth0vVR+tz"
"lMIPSO4sXGjEObgwf9ahqJ9q3iTcE5hwR0jkD1Bj0brKiyOmnYBU4c0DAwpfvHbQVo8FW"
"bJR6Yw1z/55f73PX4mn/+bWNR8o2GhUA8CXYsoBA9hSfmiFtLnyU0vefsnFgppzAoZOvW"
"wy07pBScXe9LSWx66YcPd3/IxLC0Bi9wt6L+pnhcFAv39Cf67W3oCzXlPkszceSOT8NCw"
"KKfQQ/hQzClmKEVnTyolWrBVKmeghNXRcSWunxGsPPJ9uqHFiV3RvhrvSTeQ9EzR5S43u"
"N833m/ero0jAEOOQcGa5D0c2DHivaHnsScg1RNP7b3l5uxMMYUGrJuga/VEooZuotQDBY"
"zwlTehDK4izIsw17u2ebTohJ93WfQHrfGe/37Lqru4N5RmOOqlzaTGG4cq4zoBsihHByc"
"slzn7Z1BbDecB9bik92S63iALs6NRw5wssjUE8/h4pir5AiWs3YnvcGFmiqd6IzEYTaYY"
"rQkDPQZNuuqho0sO6ClidXCD+7oMtfVCk0qJBnkPSRzStojA3rL269/Dv9/bs+fO5sPe9"
"AZWYL2zucwGx9ORrSnshNFQZzZiLN0esjKHY7H1u67tvrVUo15MjPaVTI6WOhynxZvqcZ"
"EiIJrzUtuvnP1z9AhwV+TzPEVHQk0LEHv21QT0vn2UU1j5tRYombTtO9t89EpucJeK7M+"
"elxCN+hAiaFTPv0jvJpOiSeXZLSR6Uco7ZAgWNI4f7/D6+u6lpeLTpb9XlxRL3VEN39M6"
"vJQ2ghLVlGZT1Q3u1Y+nPq0jAoO4yaa1lpaPnZo49odHEPEOwpKv4LZYAXBlsIfAEWE8Z"
"II535SxcNgCIyyooyQEN1wXncw63oN94XLUb0t4UyyTGZ5jTVzWdt0BcWBxt6qfEOUdSf"
"y4IqePUp82bRrjjgye+zhC+8LDb7RiT4KEYd9CwcDNmsoFLA/txbE12VxxzVEanKuzENp"
"+5d8so+TECwVuc/V6T5/+QGtb28NnRDG/NGexcmUOjLBKQrgve8NNW1/47mq7iBWNlAZ3"
"0dYLtqBjqztSvxaAibtVueA1YvtS5YQ5PJqcuUoijTVCNHXMOiMArRs3rjG0uvT2uuR+k"
"VN95ik6kK7DtrOUUwjCqtxTyO/UQeA8UhLi4KG98BDmVlDRPqqUHMS+A90HeJ08hDqLbe"
"eeQdwB5PydA5212tlFiAC0aD50L7qNtBgYiuh6rkhzEXuqySP3FEajehjl0jfQEfxsmEq"
"OFhhhnWvrdWI9ak1HbElzCD0EiDEGxZKxvQTYsJbi1SUqAU99RyHQWASEpxBxOQfzeW1G"
"0mJ+ZCpywiF0+HFYD+HlFyGsBLvACMs/eM7JrAlFQPKIVsR7smlYvK0XpeSEjLZtJLNPt"
"L786lkhsHgQGOamga6u1+/HDZgX84OCJZuGBThhb6NH5nFPoVZFGafACGtLckzYVTzv9a"
"SVcpOgbL88EKOifTYarxuefkCIl5RNSiWPQmBBCJDqQoOxj80yT99bqo0lHWHBO8DQJoD"
"WqquEvLx7VEXKTCCERZ6AdMlD6Lgza6lyJQJGpQMOYHkW9dXS9COdS96xj8DbUsO2XrR/"
"NRQCNUQAXJDkjjNcbzvp3MaQ8brjbb+gxaybmvrjpQIPMEYFIxDCGir1HHvu4D+1gfXXU"
"idlrL0C2XcFc5r/KxSAhRpYhh5+aW37b0/ShrVv6zW/OOoThUBQCHjM2EnB0BjyXXdQAn"
"nbL09bMjOT6xJzrsxTGMjkNo6Ig085WXgI8928rRcytYVgMj1SWy9iUuN5kqoebb1kmr2"
"S5eJDYGxMzIk0LCrmh0HXnWxaFhGWixSdMHSGjSTk+Pi+ivihoi/RAc8fHSnKIdS0ontm"
"AxIdI1ivImjx/C0keQ2xEIWl6XqYewjr1dZLktkrMS5CBCj7gaZlGM0HUC75JJaFdOXJR"
"lgkohuydLApW0X/+Mnb9LqcEQhh7R4XOYS2U+ixwsTyLJCWPuVMZEHboq1XseAgBita17"
"ZeC5JNbaQQqACBHTtE45T29uRR8NQxal8PyhIOsAr2V8uvEIvC/H6NOEayIhkDIazb+sc"
"phAFVGgpXy+hUFQAxV3gIzdmQFeY1sPy7k/hcPSoEFh8Cg4NprLLAUWwA16DJi/nJeA2S"
"3keeQhRxWkEoV1p9tGrCEmAJRve8wmryEMo44BtEWy8Er3nakRWrbj5AMu5I1a+tl4yYK"
"JkWPwIgAiguKXEdM28PhToQOcg2iCd4HTem9U1OptpJvpGRktxlCFs1YYkcQk0jDyE4tJ"
"eEQriodJCROqobumbqkb0r2Vsb0tarjN9FbaoQKAMBURQPaf1byfBOznB8mU55mQazqZi"
"Ep7VnszlefbSUr1+WjFUTlp9DKDyEdodwX8rI8ZRDCA1L03YSQo1o61XWL6M2VggsEIGz"
"nkLT3gvCKqJkMhQtOSronjeFUk6hEYbqUHFOYdWE5ae25Iu/3ogqmCEXJVHJtHaeoA1/C"
"WEglKc7aFEa0hMvkECNauvVcDCUABcdAn77LMPo2oegaBTzwzqRLFtyDeIEbpYxGbusUt"
"GqJixek4kk0dzVJm9nIF8fQvrlgJZOy1VUsOMaVqWAqe8pBGRDwDdgL0FvS5zme8hWiyE"
"bYZ2VCcFFV9E/PtHS64WOqglrhzbCgXGdWe6uJG1moQev33YIGEXagutaJ1qaug/QcZWH"
"sH7oqyPVA4EkL42s66EfURcdYYyvx3EXfgxfJgRDck8h7N/cw7nwPYCOy9n4wm3J8o+MI"
"O4WRNmWy10qJYb3Ltyu8f+Lon3oyj7R3/3JIyRPKiUy3Rsvm5JAIRAEAiKAVNPi/zM5id"
"pOmkb9oKVSHsAN6FNIaXv6yomJd8Z9Aitn9lURlp9D+OM9d3ag2M26ktuyqn2WI/xCt4X"
"T0qWifYYe2wljpJNOU2lZ6YySC52O2k4hMAcCohv0smX3b3U9/WfxOFe4eHzkHBs36i1e"
"fRRaTZeumwkhBFSeMkZV5LKxVGXUYeFOZOMspRxCHL0sAcqQteJNqbCFgZAG17P30E66f"
"7ef/5oV71B9USEgGQKkrZyr3BB5mMcWyRdehPZ6JBQ7OTnZNkMQDg2VB2RVhHV6vI1/37"
"Yn4CHUQw4s7zi8jISFlBxNC1ttpZQc0YSyPKjU1goBuREYGkpy84zr9n0jO63tikTQHVT"
"jncJlEVyUd2LW0b6+f4eDANVSEKlfjnBVEda6/t38YNBf4CHkTMUBK0eAOmzLq4zmc1g7"
"O8V9dDzfs1mHY6tDKATqhgA1KCEta/ny9AzyZT+DQgR07LIIoZbCkhnGQF9lqIKHuGCi6"
"09Z8lVFWFvQsYMO7Lqz10gX9XEOedeCmxcWrCNNiTX76e1UFSVaz+1WvVIIyIjAKLdbLe"
"189MHJSe3ppiZYbxkTlbIaLC6ISscqTDNY9Cckyhjv+lOeUBUT1vkeQjgbV5KHUNKBon2"
"YpseOXLf8rhNCxsGyWF3SeSmxFAKvQIC0GOrwTc+mlfhoNku9qzzyGDb6AnVRi87IZNwZ"
"j7WOkeCVNKOomLD8xMXRXbctRVP6XrsIDpDPyMcVYjL06Zr1KwJJeQgJBTUuZgREt/Cku"
"XTpQ097WvhvW1pCFJdFmlcjb9RuLIYlKjOe7Oy8dy8pPEh/LptEKyYsrKv48GyrE4vSJQ"
"6FfsjYh5BIFMVPUVHwRRJ406Zk5XMWU1aPCoFFgIBYGnZ1feMfz5z2Hm9ttUjLQqJHQwa"
"I0jNyOfAVi39RSJA06hqH5ecQFo2TG8Ix3cKKkDNWQ+CY/6DUzRUeQlez9Cbe1mv+TdUn"
"CoGLBwFaEg4Pp7jVvWC/9X2nT3tPEWlBs4G/vO6jCC0P5WXMxzs7H/6h0K7GKooRq1jb8"
"D1tsPuvojrpGKTeSRbSgOZHkA3F+WdN3T5AQvpy02s1FAIXMwIDAyMoRZA0V6782Gw01v"
"o7MMJvbWuzQpgzGeHrtTy0kdsYmpx0Mq7W9nGBd+XNiysmLD+HEE0nSjmEMkaOe4j7IA+"
"hdqSn98b9BFZKUyk54qRRj5cCAoyN2URaLS0PngpHOm6YmmSj7e0h3whfkZZTBm60f5OC"
"tnUj/Gfd3V85QLIwNlLxcXlPszIE4JuSSof1Z8lgxuAhpJUXwp0k068gLPc5wq7KAAAJP"
"0lEQVQQQhVF0b4BXrSPBFdDIXApIeCTFmNfPo3r9MZfT7z3gaYm69ZCwdYKBY+0LeKBoC"
"8MG5ea2dxsaVOT+l2d3d/4Ji1RQVZV2dEq0rD8HMLv7flghydxDiGUXrT1gobliZIy5O7"
"FD1MvVfhSuibUXCVHgEjLD3fo6h7+UGY6cmuxaJxpaaGQb3jLeKwWKykhVU2Grq8CanKZ"
"4bAJsmJ/3dk9fDcpOalU+V7BCyWpiLD8PoRG0e7EDttlzSHEPUO3wec6M3fwiScvnL76X"
"yFw6SAgwh3SOpFHd/djD5nWqqumJqOPGEZIa242LMPgvURJA6K/cm/scMTzAFXW1mqFig"
"XjdC4ff3dn98inhZGdPIRl7/MVP05FhHV6jWiC6OqzG8IRPewg8xl7DlqlfIWwZb7BU3I"
"KeSyXdUsU7Rsrcw9qc4XARYYAFfujKT37bL+1ZMlnD3d2P/anurf8bVOZ2IjjmPmWFstE"
"pQeTl9RiHpZ1rAiiIQIju5OD1/yZvwaxlUjKgeNNb242LROd1U9PGt8MRTqu7ep65NueJ"
"zyVQZAVjsnXrvRc1ljXL5KHmaNfbsU0LZ+jgHvEZso0YGNDUoLh2OxEyAhxg7uocDgmk5"
"RKFoVA3REQ5DFeJDIZGhrxlnR+4ecQ4ucnTvzVhqnMSURY5n4fq8SrEgkdbVk93p6L7NT"
"UrwHaEjdgGwiYQOl4VEEBmyFoPJt1Tk9Oed/X9cR9XV3/9mOaVMnATmQX2KjI6J7UkiCo"
"Meh39tVeZZwX2ATm3VHJfuUU3SMnt90woWlfKbuUxbz7Vh8oBC4CBHxvHRELrmcs6e6hl"
"cjfed7wP7z88k9el53OvtnVnGs0d/pKpptxGOxb0Xs0ojE9j1XlJJStSbx+HtmBT7le/K"
"fLux85RLCA1KC8pKB8VWdgnwvispdxtB711bvvb3/Pj6JN+uaZabIUScdcdjxhmrlp87F"
"3bfyP93O5CYEA1tFzAaneUwgsdgTOLd9eGXZAicunTg015fPjoUiku9jWdtO0aN56btZ0"
"jSGh2SBb2bl3g31VgYaVBsmlvf/85btbocSsPZtDWK6JLth5vGJvpLnSKtXQ47vpwyEAq"
"dUQyFcIoN5QCCwyBM5pXHTtpPTx8X16f/84NC+yXXEPIq9hJab1Vf5EJCe2+z1sk8ZlVz"
"uyogOWTVh+H8L26Iq2rHOsg3IIz2ovYiYyPMLg7hqFnK41Wav+lwTaJINUSgaFwCJAQKy"
"guJbFAzzp+hZOwyE8+2OQ6wQlksN24/4HNX0um7D8HEJPN3ot3YwWi3nMBlOUaeBugHQA"
"w7PDe9f2vf0pEm1LUtjdZBJTyaIQWAwICALjlzhIyh9p/0Vdn8smLF+6kBHpMJCWVCjmX"
"BCWXB5Cz3PDkZBhzyaeWMY2T4+OauZmlq7ZutrHRD0rBBQCtUWgbKLxi25Z+tKjszOOMB"
"QJfbG2ki5077TWZq41kzGdFmP1o/S148nUeXeGhe5IbacQUAjIhkDZhDVSmkHRMV6CIjO"
"rIw4DS1xpCAFNGp14IqRZRvvwtWs+vo2EG2DVpwTI9sMpeRQClyICZROWXw99zdobjxl6"
"9CiVlgFlSUJY0K5018plQsXWWN8/0w/qacM0R0nkuxRPMTVnhUBwCJRNWBTan0ZgWBe7O"
"su08C+sEMxgiG8ITqQq9gTtKtEc1cJm5+euXfGRbWnYri6MFali7+qrCgGFQIMRKJuwSN"
"5NWpp/L2QmvuM6VKeZ0vYaO6DnFcMxZs1mYjs39d0yRNIMJkXeVGMlU0dXCCgEgkKgIsL"
"aMqTxBMr2xBu/U8yF9oTCjW7YyBxmuFYxH3JaQ2s+wNh1M6OjaWhXirCCOlHUfhQCMiBQ"
"sWZEtXUoBH909x0fNaMn78lM5m2EY1UcJlEpGLCpI6zCYbF4DHnlnR/a1HfP10aRG7UZ9"
"X8q3af6nkJAISAnAhVpWDSVLVsoEFPTkn3/+sVsRn8uGufm9/o2bGTQrEBWTYkoc3Mtny"
"KyIvvaZq2yAvc0HzUUAgoBeRGoWMOiKfla1tiuO6/PORM/9pitoTIWhfPzbh01nTZKRWi"
"6ZzYlIloh2/T3N13xwKdg+kd8BR6oJoYaCgGFwEWHQMUaFiFBS8I0lobJ9ff+xNBid8Tj"
"1JADVQuh9tQKKTAskVEhFNFRK8zSijPNd3KywpvpoTS4SpFVrbBX+1UINBqBqjWh0YcPU"
"PtU/UOpbc/ccls/iyW0ZCFvgwihbgVY1E/kM2koIaaxRLNl5mbZcUtvTd24/v6vp9Oank"
"ymWTqtjOyNPqHU8RUCtUSgqiWhLxh1zPE1mx/sfP/HzXDuM47jaMWCR00bqaVQxcchjQo"
"kBY3NY+EoGgYxQ4NncqQ10v2RN/V8emLY0wyUSKQSGGoZ6P8g6lkhcJEiUNWS0MeEyAra"
"DS9u/84Nj97tOi0pp2ieTLRQ00ZSwBiKqJI3b8GkQuTjYGubbFKRGModJiKGnY/+ys23D"
"rxzw2MDRFb3oS71AK/Vs+D9+iKrZ4WAQmARIlCx5jPXXKluzsiIpg8MaM5PD6WXZ7N7P6"
"1ps7dE4kzLzdqabXsuLEzQhnQiJAo3ReofbOTEarxzNNETvfTMUNjQQmgTVMjhAzv8TNh"
"c8pV39L7vUcRYkSeSDXspHTmCNbOVzTU/9Z5CQCHQWAQCJSx/KufHQY3uuuPtBW/6LzXX"
"eVck7rUyHZ5EuBJtFP5zqag9vmSAvwx029ARfwpzmDabBUm57KCuh34YMVq/dX3vPU9Ci"
"+PkxPdNYQtqCejDrZ4VApcMAjUhLEKPurxS40TftrT18JcvyxReekfePvF218326iy8xP"
"PsDjQNhL4VOm17uRNho3lCN5qeDZmRZxLRjVuvXDYw7f8SFEJBsV/KsO4jop4VAgqBwBE"
"goknzLhq/uWvP2xPePvlA+86XvgjiOhz9zU/Ff8PDmjHqpU0y6s/1uXpPIaAQUAjUBAHS"
"uIi8YLOa19BP4Qnk9RPbpefdriYCqp0qBBQC0iPQEM2FDO3kMBzShvjxB7VBmLJggVd2K"
"elPGCWgQkAhoBBQCCgEFAIKAYWAQkAhoBBQCCgEFAIKAYWAQkAhoBBQCCgEFAIKAYWAQk"
"AhoBBQCCgEFAIKAYWAQkAhoBBQCCgEFAIKAYWAQkAhoBCoOQL/D8pAub21P/M1AAAAAEl"
"FTkSuQmCC")
|
|
#!/usr/bin/env python
# encoding: utf-8
import json
import os
import collections
import datetime
import mmap
import re
import requests
import shlex
import sys
import time
try:
import requests
except ImportError as e:
print("Error! requests module could not be imported. Perhaps install it with\n\n pip install requests")
exit()
try:
import requests_toolbelt
except ImportError as e:
print("Error! requests_toolbelt module could not be imported. Perhaps install it with\n\n pip install requests-toolbelt")
exit()
from requests_toolbelt.multipart.encoder import MultipartEncoder, MultipartEncoderMonitor
from talus_client.models import *
import talus_client.models
class TalusClient(object):
"""An api client that will communicate with Talus"""
def __init__(self, api_base="http://localhost:8001", user=None):
"""TODO: to be defined1. """
object.__init__(self)
self._api_base = api_base
self._user = user
# TODO this was annoying, revisit this
talus_client.models.API_BASE = api_base
def model_iter(self, cls, **search):
for item in cls.objects(api_base=self._api_base, **search):
yield item
# -------------------------
# fileset handling
# -------------------------
def fileset_iter(self, **search):
"""Return an iterator that iterates over all existing OS models in Talus
:returns: iterator
"""
for fileset_ in FileSet.objects(api_base=self._api_base, **search):
yield fileset_
def fileset_find(self, name_or_id, **search):
return self._name_or_id(FileSet, name_or_id, **search)
def fileset_create(self, name, files):
"""Create a new fileset named ``name`` containing files ``files``
"""
fileset = FileSet(api_base=self._api_base)
self._prep_model(fileset)
fileset.name = name
fileset.files = files
fileset.job = None
now = time.time()
fileset.timestamps = {"created": now, "modified": now}
fileset.save()
return fileset
def fileset_delete(self, fileset_id, all_files=False):
"""Delete an os by ``os_id`` which may be the id or name
:os_id: The name or id of the os to delete
"""
fileset = self._name_or_id(FileSet, fileset_id)
if fileset is None:
raise errors.TalusApiError("Could not locate FileSet with name/id {!r}".format(fileset_id))
if all_files:
for file_id in fileset.files:
self.corpus_delete(file_id)
fileset.delete()
# -------------------------
# corpus handling
# -------------------------
def corpus_list(self, **filters):
"""List all files in the corpus, using the ``filters`` to search/filter
the results
"""
try:
res = requests.get(self._api_base + "/api/corpus/", params=filters)
except requests.ConnectionError as e:
raise errors.TalusApiError("Could not connect to {}".format(self._api_base + "/api/corpus/"))
if res.status_code // 100 != 2:
raise errors.TalusApiError("Could not list corpus files", error=res.text)
return res.json()
def corpus_upload(self, file_path, **extra_attrs):
"""Upload the file found at ``file_path`` into the talus corpus, adding extra_attrs
to the file as well (TODO).
:param str file_path:
:returns: The id of the uploaded file
"""
if not os.path.exists(file_path):
raise errors.TalusApiError("Could not locate new corpus file {!r} on disk".format(file_path))
file_id = self._upload_file(file_path, api_endpoint="corpus", **extra_attrs)
return file_id
def corpus_get(self, file_id):
"""Fetch the file with id ``file_id`` from the corpus
"""
try:
res = requests.get(self._api_base + "/api/corpus/{}".format(file_id))
except requests.ConnectionError as e:
raise errors.TalusApiError("Could not connect to {}".format(self._api_base + "/api/corpus/{}".format(file_id)))
if res.status_code // 100 != 2:
raise errors.TalusApiError("Could not fetch corpus file with id {}".format(file_id), error=res.text)
filename = res.headers["content-disposition"].split("attachment;")[1].strip().split("filename=")[1].strip()
return filename,res.text
def corpus_delete(self, file_id):
"""Delete the file with id ``file_id`` from the corpus
"""
try:
res = requests.delete(self._api_base + "/api/corpus/{}".format(file_id))
except requests.ConnectionError as e:
raise errors.TalusApiError("Could not connect to {}".format(self._api_base + "/api/corpus/{}".format(file_id)))
if res.status_code // 100 != 2:
raise errors.TalusApiError("Could not delete corpus file with id {}".format(file_id), error=res.text)
return res.json()
# -------------------------
# VM image handling
# -------------------------
def image_iter(self, **search):
"""Return an iterator that iterates over all existing images in Talus
:returns: iterator over all existing images
"""
for image in Image.objects(api_base=self._api_base, **search):
yield image
def image_import(self, image_path, image_name, os_id, desc="desc", tags=None, username="user", password="password", file_id=None):
"""TODO: Docstring for import_image.
:image_path: The path to the image to be uploaded
:image_name: The name of the resulting image
:os_id: The id or name of the operating system document (string)
:desc: A description of the image
:tags: An array of tags associated with this VM image (e.g. ["browser", "ie", "ie10", "windows"])
:username: The username to be used in the image
:password: The password associated with the username
:file_id: The id of the file that has already been uploaded to the server
:returns: The configured image
"""
os = self._name_or_id(OS, os_id)
if os is None:
raise errors.TalusApiError("Could not locate OS by id/name {!r}".format(os_id))
uploaded_file = file_id
if uploaded_file is None:
print("uploading file {!r}".format(image_path))
image_path = self._clean_path(image_path)
uploaded_file = self._upload_file(image_path)
print("uploaded file id: {}".format(uploaded_file))
if tags is None:
tags = []
image = Image(api_base=self._api_base)
self._prep_model(image)
image.name = image_name
image.os = os.id
image.desc = desc
image.tags = tags
image.status = {"name": "import", "tmpfile": uploaded_file}
image.username = username
image.password = password
image.timestamps = {"created": time.time()}
image.md5 = "blahblah"
image.save()
return image
def image_configure(self, image_id_or_name, vagrantfile=None, user_interaction=False, kvm=False):
"""Configure the image with id ``image_id``. An instance of the image will
be spun up which you can then configure. Shutting down the image will commit
any changes.
:image_id_or_name: The id or name of the image that is to be configured
:vagrantfile: The contents of a vagrantfile that is to be used to configure the image
:user_interaction: If the user should be given a chance to manually interact
:returns: The configured image
"""
image = self._name_or_id(Image, image_id_or_name)
if image is None:
raise errors.TalusApiError("image with id or name {!r} not found".format(image_id_or_name))
return
#if image.status["name"] != "ready":
#raise errors.TalusApiError("Image is not in ready state, cannot configure (state is {})".format(
#image.status["name"]
#))
image.status = {
"name": "configure",
"kvm": kvm,
"vagrantfile": vagrantfile,
"user_interaction": user_interaction
}
image.save()
return image
def image_create(self, image_name, base_image_id_or_name, os_id, desc="", tags=None, vagrantfile=None, user_interaction=False):
"""Create a new VM image based on an existing image.
:image_name: The name of the new VM image (to be created)
:base_image_id_or_name: The id or name of the base image
:os_id: The id of the operating system
:desc: A description of the new image
:tags: A list of tags associated with the new image
:vagrantfile: The Vagrantfile to run when creating the new image
:user_interaction: Allow user interaction to occur (vs automatically shutting down the VM after the vagrantfile is run)
:returns: The created image
"""
base_image = self._name_or_id(Image, base_image_id_or_name)
if base_image is None:
print("Base image with id or name {!r} not found".format(base_image_id_or_name))
return
base_image_id = base_image.id
# essentially use the base_image as the base for the new image
base_image.clear_id()
image = base_image
self._prep_model(image)
# required
image.name = image_name
image.base_image = base_image_id
if os_id is not None:
os = self.os_find(os_id)
if os is None:
raise errors.TalusApiError("No os found by name/id '{}'".format(os_id))
image.os = os.id
if desc is not None:
image.desc = desc
if tags is not None:
image.tags = tags
image.status = {
"name": "create",
"vagrantfile": vagrantfile,
"user_interaction": user_interaction
}
image.save()
return image
def image_delete(self, image_id_or_name):
"""Delete the image with id ``image_id`` or name ``name``
:image_id: The id of the image to delete
:returns: None
"""
image = Image.find_one(api_base=self._api_base, id=image_id_or_name)
if image is None:
image = Image.find_one(api_base=self._api_base, name=image_id_or_name)
if image is None:
print("image with id or name {!r} not found".format(image_id_or_name))
return
image.status = {
"name": "delete"
}
image.save()
return image
# -------------------------
# VM os handling
# -------------------------
def os_iter(self, **search):
"""Return an iterator that iterates over all existing OS models in Talus
:returns: iterator
"""
for os_ in OS.objects(api_base=self._api_base, **search):
yield os_
def os_find(self, name_or_id, **search):
return self._name_or_id(OS, name_or_id, **search)
def os_delete(self, os_id):
"""Delete an os by ``os_id`` which may be the id or name
:os_id: The name or id of the os to delete
"""
os_ = self._name_or_id(OS, os_id)
if os_ is None:
raise errors.TalusApiError("Could not locate os with name/id {!r}".format(os_id))
if len(Image.objects(api_base=self._api_base, os=os_.id)) > 0:
raise errors.TalusApiError("Could not delete OS, more than one image references it")
os_.delete()
# -------------------------
# code handling
# -------------------------
def code_iter(self, type_=None, **search):
"""Return an iterator that iterates over all existing Code models in Talus
:returns: iterator
"""
filter_ = search
if type_ is not None:
filter_["type"] = type_
for code in Code.objects(api_base=self._api_base, **filter_):
yield code
def code_find(self, name_or_id, **search):
return self._name_or_id(Code, name_or_id, **search)
def code_create(self, code_name, code_type, tags=None):
"""Create the code, and return the results"""
data = {
"name": code_name,
"type": code_type,
}
if self._user is not None:
if tags is None:
tags = []
if self._user not in tags:
tags.append(self._user)
if tags is not None:
data["tags"] = json.dumps(tags)
e = MultipartEncoder(fields=data)
try:
res = requests.post(self._api_base + "/api/code/create/",
data = e,
headers = {"Content-Type": e.content_type}
)
except requests.ConnectionError as e:
raise errors.TalusApiError("Could not connect to {}".format(self._api_base + "/api/code/create"))
if res.status_code // 100 != 2:
raise errors.TalusApiError("Could not create code!", error=res.text)
return json.loads(res.text)
# -------------------------
# task handling
# -------------------------
def task_find(self, name_or_id, **search):
"""Find the task
"""
return self._name_or_id(Task, name_or_id, **search)
def task_iter(self, **search):
"""Return an iterator that iterates over all existing Task models in Talus
:returns: iterator
"""
for task in Task.objects(api_base=self._api_base, **search):
yield task
def task_create(self, name, tool_id, params, version=None, limit=1, vm_max="30m"):
"""Create a new task with the supplied arguments
:name: The name of the task
:tool_id: The id or name of the tool the task will run
:params: A dict of params for the task
:version: The version of code to use. None defaults to the HEAD version (default: None)
:limit: The default limit of any jobs that use this task
:returns: The task model
"""
tool = self._name_or_id(Code, tool_id, type="tool")
if tool is None:
raise errors.TalusApiError("Could not locate Tool by id/name {!r}".format(tool_id))
if not isinstance(params, dict):
raise errors.TalusApiError("params must be a dict!")
task = Task(api_base=self._api_base)
self._prep_model(task)
task.name = name
task.tool = tool.id
task.version = version
task.params = params
task.limit = limit
task.vm_max = self._total_seconds_from_string(vm_max)
task.save()
def task_delete(self, task_id):
"""Delete a task by ``task_id`` which may be the id or name
:task_id: The name or id of the task to delete
"""
task = self._name_or_id(Task, task_id)
if task is None:
raise errors.TalusApiError("Could not locate task with name/id {!r}".format(task_id))
task.delete()
# -------------------------
# result handling
# -------------------------
def result_iter(self, **search):
"""Iterate through result matching the search criteria
:search: optional search parameters
"""
for result in Result.objects(api_base=self._api_base, **search):
yield result
# -------------------------
# slave handling
# -------------------------
def slave_iter(self, **search):
"""Iterate through all of the slaves
:search: optional search parameters
"""
for slave in Slave.objects(api_base=self._api_base, **search):
yield slave
# -------------------------
# master handling
# -------------------------
def master_get(self):
res = Master.objects(api_base=self._api_base)
if len(res) == 0:
raise errors.TalusApiError("No master model has been created in the DB! Is it not running?")
return res[0]
# -------------------------
# job handling
# -------------------------
def job_find(self, name_or_id, **search):
return self._name_or_id(Job, name_or_id, **search)
def job_iter(self, **search):
"""Iterate through all of the jobs
:search: optional search parameters
"""
for job in Job.objects(api_base=self._api_base, **search):
yield job
def job_create(self, task_name_or_id, image=None, name=None, params=None, priority=50, queue="jobs", limit=1, vm_max=None, network="whitelist", debug=False, tags=None):
"""Create a new job (run a task)"""
task = self._name_or_id(Task, task_name_or_id)
if task is None:
raise errors.TalusApiError("could not locate task with id/name {!r}".format(task_name_or_id))
if task.image is None and image is None:
raise errors.TalusApiError("No image was defined in the task, and no image was specified. Give me mah image!")
if task.image is not None and image is None:
image_obj = self._name_or_id(Image, task.image)
else:
image_obj = self._name_or_id(Image, image)
if image_obj is None:
raise errors.TalusApiError("could not locate image with id/name {!r}".format(image))
image = image_obj
if image.status["name"] != "ready":
raise errors.TalusApiError("image '{}' ({}) is not in ready state (state is {})".format(
image.name,
image.id,
image.status["name"]
))
if name is None:
name = task.name + " " + str(datetime.datetime.now())
if limit is None:
limit = task.limit
# any params set will UPDATE the default params, not override them
base_params = task.params
if params is not None:
base_params = self._dict_nested_updated(base_params, params)
# inherit from the task if not specified
if vm_max is None:
vm_max = task.vm_max
else:
vm_max = self._total_seconds_from_string(vm_max)
job = Job(api_base=self._api_base)
self._prep_model(job)
job.name = name
job.image = image.id
job.params = base_params
job.task = task.id
job.status = {"name": "run"}
job.timestamps = {"created": time.time()}
job.priority = priority
job.queue = queue
job.limit = limit
job.vm_max = vm_max
job.network = network
job.debug = debug
if tags is not None and isinstance(tags, list):
job.tags += tags
job.save()
return job
def job_cancel(self, job_name_or_id, job=None):
"""Cancel the job ``job_name_or_id`` in talus
:job_name_or_id: The job name or id to cancel
"""
if job is None:
job = self._name_or_id(Job, job_name_or_id)
if job is None:
raise errors.TalusApiError("could not locate job with name or id {!r}".format(job_name_or_id))
job.status = {"name": "cancel"}
job.save()
return job
# -------------------------
# utility
# -------------------------
def _prep_model(self, model):
if hasattr(model, "tags") and self._user is not None and self._user not in model.tags:
model.tags.append(self._user)
def _total_seconds_from_string(self, val):
match = re.match(r'(\d+h)?(\d+m)?(\d+s)?', val)
unit_mult = {
"h":60*60,
"m":60,
"s":1
}
total_seconds = 0
if match is not None:
for item in match.groups():
if item is None:
continue
val = int(item[:-1])
unit = item[-1]
total_seconds += val * unit_mult[unit]
else:
total_seconds = int(val)
return total_seconds
def _verify(self, prompt):
while True:
answer = raw_input(prompt)
if answer.lower()[0] not in ["y", "n"]:
print("incorrect answer, y/n only. please.")
continue
break
return answer == "y"
def _dict_nested_updated(self, base, new):
"""Update a nested dictionary
:base: the base dict
:new: the new values for the dict
:returns: the updated dict
"""
for k,v in new.iteritems():
if isinstance(v, collections.Mapping):
r = self._dict_nested_updated(base.get(k, {}), v)
base[k] = r
else:
base[k] = new[k]
return base
def _name_or_id(self, cls, name_or_id, **extra):
"""Find model by name or id
:name_or_id: The name or id of the model
:extra: Any additional search/filter arguments
:returns: The first model if found, else None
"""
res = cls.find_one(api_base=self._api_base, id=name_or_id, **extra)
if res is None:
res = cls.find_one(api_base=self._api_base, name=name_or_id, **extra)
if res is None:
return None
return res
def _upload_file(self, path, api_endpoint="upload", **extra_params):
"""Upload the file found at ``path`` to talus, returning an id
:path: The (local) path to the file
:returns: An id for the remote file
"""
if not os.path.exists(path):
raise errors.TalusApiError("Cannot upload image, path {!r} does not exist".format(path))
total_size = os.path.getsize(path)
self.last_update = ""
def print_progress(monitor):
sys.stdout.write("\b" * len(self.last_update))
percent = float(monitor.bytes_read) / monitor.len
update = "{:0.2f}%".format(percent * 100)
if len(update) < 7:
u = " " * (7 - len(update)) + update
if len(update) < len(self.last_update):
update += " " * (len(self.last_update) - len(update))
sys.stdout.write(update)
sys.stdout.flush()
self.last_update = update
data = {
"file": (os.path.basename(path), open(path, "rb"), "application/octet-stream")
}
data.update(extra_params)
e = MultipartEncoder(fields=data)
m = MultipartEncoderMonitor(e, print_progress)
try:
res = requests.post(
self._api_base + "/api/{}/".format(api_endpoint),
data=m,
headers={"Content-Type":e.content_type},
timeout=(60*60) # super long timeout for uploading massive files!
)
except requests.ConnectionError as e:
raise errors.TalusApiError("Could not connect to {}".format(self._api_base + "/api/{}/".format(api_endpoint)))
# clear out the last of the progress percent that was printed
print("\b" * len(self.last_update))
if res.status_code // 100 != 2:
raise errors.TalusApiError("Could not upload file!", error=res.text)
if res.text[0] in ["'", '"']:
return res.text[1:-1]
return res.text
def _api(self, path):
"""Join the api base with path"""
return self._api_base + "/" + path
def _clean_path(self, path):
return os.path.realpath(os.path.expanduser(path))
|
|
def quickview(results, n = 25):
"""view top n results of results.
Ideally, pass it interrogator() or plotter output. It will also accept DatFrames
or Series (i.e. .results or .totals branches."""
import corpkit
import pandas
import numpy
import os
# handle dictionaries too:
dictpath = 'data/dictionaries'
savedpath = 'data/saved_interrogations'
# too lazy to code this properly for every possible data type:
if n == 'all':
n = 9999
if type(results) == str:
if os.path.isfile(os.path.join(dictpath, results)):
import pickle
from collections import Counter
unpickled = pickle.load(open(os.path.join(dictpath, results), 'rb'))
print '\nTop %d entries in %s:\n' % (n, os.path.join(dictpath, results))
for index, (w, f) in enumerate(unpickled.most_common(n)):
fildex = '% 3d' % index
print '%s: %s (n=%d)' %(fildex, w, f)
return
elif os.path.isfile(os.path.join(savedpath, results)):
from corpkit import load_result
print '\n%s loaded temporarily from file:\n' % results
results = load_result(results)
else:
raise ValueError('File %s not found in data/saved_interrogations or data/dictionaries')
if 'interrogation' in str(type(results)):
clas = results.query['function']
if clas == 'interrogator':
datatype = results.query['datatype']
if datatype == 'int64':
option = 'total'
else:
option = '%'
if results.query['query'] == 'keywords':
option = 'keywords'
elif results.query['query'] == 'ngrams':
option = 'ngrams'
try:
results_branch = results.results
resbranch = True
except AttributeError:
resbranch = False
results_branch = results
elif clas == 'editor':
# currently, it's wrong if you edit keywords! oh well
datatype = results.query['datatype']
if results.query['just_totals']:
resbranch = False
if results.results.dtype == 'int64':
option = 'total'
else:
option = '%'
results_branch = results.results
else:
if datatype == 'int64':
option = 'total'
else:
option = '%'
try:
results_branch = results.results
resbranch = True
except AttributeError:
resbranch = False
if type(results) == pandas.core.frame.DataFrame:
results_branch = results
resbranch = True
if type(results.iloc[0][0]) == numpy.int64:
option = 'total'
else:
option = '%'
elif type(results) == pandas.core.series.Series:
resbranch = False
results_branch = results
if type(results.iloc[0]) == numpy.int64:
option = 'total'
else:
option = '%'
if results.name == 'keywords':
option = 'series_keywords'
if resbranch:
the_list = list(results_branch)[:n]
else:
the_list = list(results_branch.index)[:n]
for index, w in enumerate(the_list):
fildex = '% 3d' % index
if option == 'keywords':
print '%s: %s' %(fildex, w)
elif option == '%' or option == 'ratio':
if 'interrogation' in str(type(results)):
tot = results.totals[w]
totstr = "%.3f" % tot
print '%s: %s (%s%%)' % (fildex, w, totstr)
else:
print '%s: %s' % (fildex, w)
elif option == 'series_keywords':
tot = results_branch[w]
print '%s: %s (k=%d)' %(fildex, w, tot)
else:
if resbranch:
tot = sum(i for i in list(results_branch[w]))
else:
tot = results_branch[w]
print '%s: %s (n=%d)' %(fildex, w, tot)
def concprinter(df, kind = 'string', n = 100):
"""print conc lines nicely, to string, latex or csv"""
if n > len(df):
n = len(df)
if not kind.startswith('l') and kind.startswith('c') and kind.startswith('s'):
raise ValueError('kind argument must start with "l" (latex), "c" (csv) or "s" (string).')
import pandas as pd
if type(n) == int:
to_show = df.ix[range(n)]
elif n is False:
to_show = df
elif n == 'all':
to_show = df
else:
raise ValueError('n argument "%s" not recognised.' % str(n))
print ''
if kind.startswith('s'):
print to_show.to_string(header = False, formatters={'r':'{{:<{}s}}'.format(to_show['r'].str.len().max()).format})
if kind.startswith('l'):
print to_show.to_latex(header = False, formatters={'r':'{{:<{}s}}'.format(to_show['r'].str.len().max()).format}).replace('llll', 'lrrl', 1)
if kind.startswith('c'):
print to_show.to_csv(sep = '\t', header = False, formatters={'r':'{{:<{}s}}'.format(to_show['r'].str.len().max()).format})
print ''
def save_result(interrogation, savename, savedir = 'data/saved_interrogations'):
"""Save an interrogation as pickle to savedir"""
import collections
from collections import namedtuple, Counter
import pickle
import os
import pandas
from time import localtime, strftime
import nltk
if type(interrogation) == str or type(interrogation) == unicode:
raise TypeError('First argument (i.e. the thing to save) cannot be a string.')
if savename.endswith('.p'):
savename = savename[:-2]
def urlify(s):
"Turn savename into filename"
import re
s = s.lower()
s = re.sub(r"[^\w\s-]", '', s)
s = re.sub(r"\s+", '-', s)
s = re.sub(r"-(textbf|emph|textsc|textit)", '-', s)
return s
savename = urlify(savename)
if not os.path.exists(savedir):
os.makedirs(savedir)
if not savename.endswith('.p'):
savename = savename + '.p'
# this feature creeps me out, i don't think it's needed any more
savename = savename.replace('lemmatised', '-lemmatised')
fullpath = os.path.join(savedir, savename)
while os.path.isfile(fullpath):
selection = raw_input("\nSave error: %s already exists in %s.\n\nType 'o' to overwrite, or enter a new name: " % (savename, savedir))
if selection == 'o' or selection == 'O':
import os
os.remove(fullpath)
else:
if not selection.endswith('.p'):
selection = selection + '.p'
fullpath = os.path.join(savedir, selection)
# if it's just a table or series
if type(interrogation) == pandas.core.frame.DataFrame or \
type(interrogation) == pandas.core.series.Series or \
type(interrogation) == dict or \
type(interrogation) == collections.Counter or \
type(interrogation) == nltk.text.Text:
temp_list = [interrogation]
elif len(interrogation) == 2:
temp_list = [interrogation.query, interrogation.totals]
elif len(interrogation) == 3:
if interrogation.query['function'] == 'interrogator':
if interrogation.query['query'].startswith('k'):
temp_list = [interrogation.query, interrogation.results, interrogation.table]
else:
temp_list = [interrogation.query, interrogation.results, interrogation.totals]
else:
temp_list = [interrogation.query, interrogation.results, interrogation.totals]
elif len(interrogation) == 4:
temp_list = [interrogation.query, interrogation.results, interrogation.totals, interrogation.table]
f = open(fullpath, 'w')
pickle.dump(temp_list, f)
time = strftime("%H:%M:%S", localtime())
print '\n%s: Data saved: %s\n' % (time, fullpath)
f.close()
def load_result(savename, loaddir = 'data/saved_interrogations'):
"""Reloads a save_result as namedtuple"""
import collections
import pickle
import os
import pandas
import nltk
if not savename.endswith('.p'):
savename = savename + '.p'
notfound = True
def namesuggester(entered_name, searched_dir):
import nltk
from itertools import groupby
from operator import itemgetter
names = os.listdir(searched_dir)
res = {}
for n in names:
sim = nltk.metrics.distance.edit_distance(entered_name, n, transpositions=False)
res[n] = sim
possibles = sorted([v.replace('.p', '') for k,v in groupby(sorted((v,k) for k,v in res.iteritems()), key=itemgetter(0)).next()[1]])
sel = raw_input('\n"%s" not found. Enter one of the below, or "e" to exit:\n\n%s\n\n' % (entered_name.replace('.p', ''), '\n'.join([' %d) "%s"' % (index + 1, sug) for index, sug in enumerate(possibles[:10])])))
if sel.startswith('e') or sel.startswith('E'):
return
else:
try:
s = int(sel)
return possibles[s - 1]
except ValueError:
return sel
while notfound:
try:
unpickled = pickle.load(open(os.path.join(loaddir, savename), 'rb'))
notfound = False
except IOError:
sel = namesuggester(savename, loaddir)
if not sel:
return
else:
savename = sel + '.p'
if type(unpickled) == pandas.core.frame.DataFrame or \
type(unpickled) == pandas.core.series.Series or \
type(unpickled) == dict or \
type(unpickled) == collections.Counter or \
type(unpickled) == nltk.text.Text:
output = unpickled
if len(unpickled) == 1:
if type(unpickled[0]) == pandas.core.frame.DataFrame or \
type(unpickled[0]) == pandas.core.series.Series or \
type(unpickled[0]) == dict or \
type(unpickled[0]) == collections.Counter or \
type(unpickled[0]) == nltk.text.Text:
output = unpickled[0]
elif len(unpickled) == 4:
outputnames = collections.namedtuple('loaded_interrogation', ['query', 'results', 'totals', 'table'])
output = outputnames(unpickled[0], unpickled[1], unpickled[2], unpickled[3])
elif len(unpickled) == 3:
if unpickled[0]['function'] == 'interrogator':
if unpickled[0]['query'].startswith('k'):
outputnames = collections.namedtuple('loaded_interrogation', ['query', 'results', 'table'])
else:
# not presently possible, i think:
outputnames = collections.namedtuple('loaded_interrogation', ['query', 'results', 'totals'])
else:
outputnames = collections.namedtuple('loaded_interrogation', ['query', 'results', 'totals'])
output = outputnames(unpickled[0], unpickled[1], unpickled[2])
elif len(unpickled) == 2:
outputnames = collections.namedtuple('loaded_interrogation', ['query', 'totals'])
output = outputnames(unpickled[0], unpickled[1])
return output
def report_display():
"""Displays/downloads the risk report, depending on your browser settings"""
class PDF(object):
def __init__(self, pdf, size=(200,200)):
self.pdf = pdf
self.size = size
def _repr_html_(self):
return '<iframe src={0} width={1[0]} height={1[1]}></iframe>'.format(self.pdf, self.size)
def _repr_latex_(self):
return r'\includegraphics[width=1.0\textwidth]{{{0}}}'.format(self.pdf)
return PDF('report/risk_report.pdf',size=(800,650))
def ipyconverter(inputfile, outextension):
"""ipyconverter converts ipynb files to various formats.
This function calls a shell script, rather than using an API.
The first argument is the ipynb file.
The second argument is the file extension of the output format, which may be 'py', 'html', 'tex' or 'md'.
Example usage: ipyconverter('infile.ipynb', 'tex')
This creates a .tex file called infile-converted.tex
"""
import os
if outextension == 'py':
outargument = '--to python ' # the trailing space is important!
if outextension == 'tex':
outargument = '--to latex '
if outextension == 'html':
outargument = '--to html '
if outextension == 'md':
outargument = '--to md '
outbasename = os.path.splitext(inputfile)[0]
output = outbasename + '-converted.' + outextension
shellscript = 'ipython nbconvert ' + outargument + inputfile + ' --stdout > ' + output
print "Shell command: " + shellscript
os.system(shellscript)
def conv(inputfile, loadme = True):
"""A .py to .ipynb converter that relies on old code from IPython.
You shouldn't use this: I only am while I'm on a deadline.
"""
import os, sys
import pycon.current as nbf
import IPython
outbasename = os.path.splitext(inputfile)[0]
output = outbasename + '.ipynb'
badname = outbasename + '.nbconvert.ipynb'
print '\nConverting ' + inputfile + ' ---> ' + output + ' ...'
nb = nbf.read(open(inputfile, 'r'), 'py')
nbf.write(nb, open(output, 'w'), 'ipynb')
os.system('ipython nbconvert --to=notebook --nbformat=4 %s' % output)
os.system('mv %s %s' % (badname, output))
if loadme:
os.system('ipython notebook %s' % output)
#nbnew = open(output, 'r')
#IPython.nbformat.v4.convert.upgrade(nbnew, from_version=3, from_minor=0)
print 'Done!\n'
def pytoipy(inputfile):
"""A .py to .ipynb converter.
This function converts .py files to ipynb.
Comments in the .py file can be used to delimit cells, headings, etc. For example:
# <headingcell level=1>
# A heading
# <markdowncell>
# *This text is in markdown*
# <codecell>
# print 'hello'
Example usage: pytoipy('filename.py')
"""
import os
import IPython.nbformat.current as nbf
outbasename = os.path.splitext(inputfile)[0]
output = outbasename + '.ipynb'
print '\nConverting ' + inputfile + ' ---> ' + output + ' ...'
nb = nbf.read(open(inputfile, 'r'), 'py')
nbf.write(nb, open(output, 'w'), 'ipynb')
print 'Done!\n'
def new_project(name):
"""make a new project in current directory"""
import os
import shutil
import stat
import platform
import corpkit
path_to_corpkit = os.path.dirname(corpkit.__file__)
thepath, corpkitname = os.path.split(path_to_corpkit)
# make project directory
os.makedirs(name)
# make other directories
dirs_to_make = ['data', 'images']
subdirs_to_make = ['dictionaries', 'saved_interrogations', 'corpus']
for directory in dirs_to_make:
os.makedirs(os.path.join(name, directory))
for subdir in subdirs_to_make:
os.makedirs(os.path.join(name, 'data', subdir))
# copy the bnc dictionary to data/dictionaries
shutil.copy(os.path.join(thepath, 'dictionaries', 'bnc.p'), os.path.join(name, 'data', 'dictionaries'))
# make a blank ish notebook
newnotebook_text = open(os.path.join(thepath, corpkitname, 'blanknotebook.ipynb')).read()
fixed_text = newnotebook_text.replace('blanknotebook', str(name))
with open(os.path.join(name, name + '.ipynb'), 'wb') as handle:
handle.write(fixed_text)
handle.close
if platform.system() == 'Darwin':
shtext = '#!/bin/bash\n\npath=$0\ncd ${path%%/*.*}\nipython notebook %s.ipynb\n' % name
with open(os.path.join(name, 'launcher.sh'), 'wb') as handle:
handle.write(shtext)
handle.close
# permissions for sh launcher
st = os.stat(os.path.join(name, 'launcher.sh'))
os.chmod(os.path.join(name, 'launcher.sh'), st.st_mode | 0111)
print '\nNew project made: %s\nTo begin, either use:\n\n ipython notebook %s.ipynb\n\nor run launcher.sh.\n\n' % (name, name)
else:
print '\nNew project made: %s\nTo begin, either use:\n\n ipython notebook %s.ipynb\n\n' % (name, name)
def searchtree(tree, query, options = ['-t', '-o']):
"Searches a tree with Tregex and returns matching terminals"
import os
from corpkit.other import tregex_engine
from corpkit.tests import check_dit
try:
get_ipython().getoutput()
except TypeError:
have_ipython = True
except NameError:
import subprocess
have_ipython = False
fo = open('tree.tmp',"w")
fo.write(tree + '\n')
fo.close()
result = tregex_engine(query = query, check_query = True)
result = tregex_engine(query = query, options = options, corpus = "tree.tmp")
os.remove("tree.tmp")
return result
def quicktree(sentence):
"""Parse a sentence and return a visual representation in IPython"""
import os
from nltk import Tree
from nltk.draw.util import CanvasFrame
from nltk.draw import TreeWidget
try:
from stat_parser import Parser
except:
raise ValueError('PyStatParser not found.')
try:
from IPython.display import display
from IPython.display import Image
except:
pass
try:
get_ipython().getoutput()
except TypeError:
have_ipython = True
except NameError:
import subprocess
have_ipython = False
parser = Parser()
parsed = parser.parse(sentence)
cf = CanvasFrame()
tc = TreeWidget(cf.canvas(),parsed)
cf.add_widget(tc,10,10) # (10,10) offsets
cf.print_to_file('tree.ps')
cf.destroy()
if have_ipython:
tregex_command = 'convert tree.ps tree.png'
result = get_ipython().getoutput(tregex_command)
else:
tregex_command = ["convert", "tree.ps", "tree.png"]
result = subprocess.check_output(tregex_command)
os.remove("tree.ps")
return Image(filename='tree.png')
os.remove("tree.png")
def multiquery(corpus, query, sort_by = 'total', quicksave = False):
"""Creates a named tuple for a list of named queries to count.
Pass in something like:
[[u'NPs in corpus', r'NP'], [u'VPs in corpus', r'VP']]"""
import collections
import os
import pandas
import pandas as pd
from time import strftime, localtime
from corpkit.interrogator import interrogator
from corpkit.editor import editor
if quicksave:
savedir = 'data/saved_interrogations'
if not quicksave.endswith('.p'):
quicksave = quicksave + '.p'
fullpath = os.path.join(savedir, quicksave)
while os.path.isfile(fullpath):
selection = raw_input("\nSave error: %s already exists in %s.\n\nPick a new name: " % (savename, savedir))
if not selection.endswith('.p'):
selection = selection + '.p'
fullpath = os.path.join(savedir, selection)
results = []
for name, pattern in query:
result = interrogator(corpus, 'count', pattern)
result.totals.name = name # rename count
results.append(result.totals)
results = pd.concat(results, axis = 1)
results = editor(results, sort_by = sort_by, print_info = False, keep_stats = False)
time = strftime("%H:%M:%S", localtime())
print '%s: Finished! %d unique results, %d total.' % (time, len(results.results.columns), results.totals.sum())
if quicksave:
from corpkit.other import save_result
save_result(results, quicksave)
return results
# if nothing, the query's fine!
def interroplot(path, query):
"""Interrogates path with Tregex query, gets relative frequencies, and plots the top seven results"""
from corpkit import interrogator, editor, plotter
quickstart = interrogator(path, 'words', query)
edited = editor(quickstart.results, '%', quickstart.totals, print_info = False)
plotter(str(path), edited.results)
def datareader(data, plaintext = False, **kwargs):
"""
Returns a string of plain text from a number of kinds of data.
The kinds of data currently accepted are:
path to corpus : all trees are flattened
path to subcorpus : all trees are flattened
conc() output (list of concordance lines)
csv file generated with conc()
a string of text
"""
import os
import pandas
from corpkit.other import tregex_engine
from corpkit.tests import check_dit
try:
get_ipython().getoutput()
except TypeError:
have_ipython = True
except NameError:
import subprocess
have_ipython = False
tregex_engine_used = False
# if unicode, make it a string
if type(data) == unicode:
if not os.path.isdir(data):
if not os.path.isfile(data):
return good
if type(data) == str:
# if it's a file, read it
if os.path.isfile(data):
good = open(data).read()
# if it's a dir, flatten all trees
elif os.path.isdir(data):
# get all sentences newline separated
query = r'__ !< __'
options = ['-o', '-t']
# if lemmatise, we get each word on a newline
if 'lemmatise' in kwargs:
if kwargs['lemmatise'] is True:
query = r'__ <# (__ !< __)'
options = ['-o']
# check for trees ...
#while plaintext is False:
#for f in first_twenty:
#plaintext = tregex_engine(corpus = f, check_for_trees = True)
if not plaintext:
tregex_engine_used = True
results = tregex_engine(corpus = data,
options = options,
query = query,
**kwargs)
else:
results = []
fs = [os.path.join(data, f) for f in os.listdir(data)]
# do recursive if need
if any(os.path.isdir(f) for f in fs):
recursive_files = []
for dirname, dirnames, filenames in os.walk(data):
for filename in filenames:
recursive_files.append(os.path.join(dirname, filename))
fs = recursive_files
import nltk
sent_tokenizer=nltk.data.load('tokenizers/punkt/english.pickle')
for f in fs:
raw = unicode(open(f).read(), 'utf-8', errors = 'ignore')
sents = sent_tokenizer.tokenize(raw)
tokenized_sents = [nltk.word_tokenize(i) for i in sents]
for sent in tokenized_sents:
for w in sent:
results.append(w.lower())
return results
#good = '\n'.join(results)
# if a string of text,
else:
good = data
# if conc results, turn into string...
elif type(data) == pandas.core.frame.DataFrame:
# if conc lines:
try:
if list(data.columns) == ['l', 'm', 'r']:
conc_lines = True
else:
conc_lines = False
except:
conc_lines = False
if conc_lines:
# may not be unicode!?
good = [' '.join(list(data.ix[l])) for l in list(data.index)]
else:
good = data
# make unicode
if not tregex_engine_used:
try:
good = unicode(good, 'utf-8', errors = 'ignore')
except TypeError:
pass
return good
def tregex_engine(query = False,
options = False,
corpus = False,
check_query = False,
check_for_trees = False,
lemmatise = False,
just_content_words = False,
return_tuples = False):
"""This does a tregex query.
query: tregex query
options: list of tregex options
corpus: place to search
check_query: just make sure query ok
check_for_trees: find out if corpus contains parse trees"""
import subprocess
import re
from time import localtime, strftime
from corpkit.tests import check_dit
from dictionaries.word_transforms import wordlist
on_cloud = check_dit()
def find_wordnet_tag(tag):
if tag.startswith('j'):
tag = 'a'
elif tag.startswith('v') or tag.startswith('m'):
tag = 'v'
elif tag.startswith('n'):
tag = 'n'
elif tag.startswith('r'):
tag = 'r'
else:
tag = False
return tag
# if check_query, enter the while loop
# if not, get out of it
an_error_occurred = True
while an_error_occurred:
if on_cloud:
tregex_command = ["sh", "tregex.sh"]
else:
tregex_command = ["tregex.sh"]
if not query:
query = 'NP'
# if checking for trees, use the -T option
if check_for_trees:
options = ['-T']
filenaming = False
#try:
# if '-f' in options:
# filenaming = True
#except:
# pass
if return_tuples or lemmatise:
options = ['-o']
# append list of options to query
if options:
[tregex_command.append(o) for o in options]
if query:
tregex_command.append(query)
if corpus:
tregex_command.append(corpus)
# do query
try:
res = subprocess.check_output(tregex_command, stderr=subprocess.STDOUT).splitlines()
# exception handling for regex error
except Exception, e:
res = str(e.output).split('\n')
if check_query:
# define error searches
tregex_error = re.compile(r'^Error parsing expression')
regex_error = re.compile(r'^Exception in thread.*PatternSyntaxException')
# if tregex error, give general error message
if re.match(tregex_error, res[0]):
tregex_error_output = ""
time = strftime("%H:%M:%S", localtime())
selection = raw_input('\n%s: Error parsing Tregex expression "%s".\nWould you like to:\n\n' \
' a) rewrite it now\n' \
' b) exit\n\nYour selection: ' % (time, query))
if 'a' in selection:
query = raw_input('\nNew Tregex query: ')
elif 'b' in selection:
print ''
return False
# if regex error, try to help
elif re.match(regex_error, res[0]):
info = res[0].split(':')
index_of_error = re.findall(r'index [0-9]+', info[1])
justnum = index_of_error[0].split('dex ')
spaces = ' ' * int(justnum[1])
remove_start = query.split('/', 1)
remove_end = remove_start[1].split('/', -1)
time = strftime("%H:%M:%S", localtime())
selection = raw_input('\n%s: Error parsing regex inside Tregex query: %s'\
'. Best guess: \n%s\n%s^\n\nYou can either: \n' \
' a) rewrite it now\n' \
' b) exit\n\nYour selection: ' % (time, str(info[1]), str(remove_end[0]), spaces))
if 'a' in selection:
query = raw_input('\nNew Tregex query: ')
elif 'b' in selection:
print ''
return
else:
an_error_occurred = False
return query
# if not query checking, leave this horrible while loop
else:
an_error_occurred = False
# counting is easy, just get out with the number
if '-C' in options:
return int(res[-1])
# remove errors and blank lines
res = [s for s in res if not s.startswith('PennTreeReader:') and s]
# find end of stderr
regex = re.compile('(Reading trees from file|using default tree)')
# remove stderr at start
std_last_index = res.index(next(s for s in res if re.search(regex, s)))
res = res[std_last_index + 1:]
# this is way slower than it needs to be, because it searches a whole subcorpus!
if check_for_trees:
if res[0].startswith('1:Next tree read:'):
return True
else:
return False
# return if no matches
if res[-1] == 'There were 0 matches in total.':
return []
# remove total
res = res[:-1]
# make unicode and lowercase
make_tuples = []
if filenaming:
for index, r in enumerate(res):
if r.startswith('# /'):
make_tuples.append((r, res[index + 1]))
res = make_tuples
if not filenaming:
res = [unicode(w, 'utf-8', errors = 'ignore').lower() for w in res]
else:
res = [(unicode(t), unicode(w, 'utf-8', errors = 'ignore').lower()) for t, w in res]
if lemmatise or return_tuples:
# CAN'T BE USED WITH ALMOST EVERY OPTION!
allwords = []
from nltk.stem.wordnet import WordNetLemmatizer
lmtzr=WordNetLemmatizer()
# turn this into a list of words or lemmas, with or without closed words
for result in res:
# remove brackets and split on first space
result = result.lstrip('(')
result = result.rstrip(')')
tag, word = result.split(' ', 1)
# get wordnet tag from stanford tag
wordnet_tag = find_wordnet_tag(tag)
short_tag = tag[:2]
# do manual lemmatisation first
if lemmatise:
if word in wordlist:
word = wordlist[word]
# do wordnet lemmatisation
if wordnet_tag:
word = lmtzr.lemmatize(word, wordnet_tag)
if just_content_words:
if wordnet_tag:
if return_tuples:
allwords.append((word, tag))
else:
allwords.append(word)
else:
if return_tuples:
allwords.append((word, tag))
else:
allwords.append(word)
res = allwords
if return_tuples:
res = [(w, t.upper()) for w, t in res]
return res
def load_all_results(data_dir = 'data/saved_interrogations'):
"""load every saved interrogation in data_dir into a dict"""
import os
import time
from corpkit.other import load_result
from time import localtime, strftime
r = {}
fs = [f for f in os.listdir(data_dir) if f.endswith('.p')]
if len(fs) == 0:
raise ValueError('No results found in %s' % datadir)
for finding in fs:
try:
r[os.path.splitext(finding)[0]] = load_result(finding, loaddir = data_dir)
time = strftime("%H:%M:%S", localtime())
print '%s: %s loaded as %s.' % (time, finding, os.path.splitext(finding)[0])
except:
time = strftime("%H:%M:%S", localtime())
print '%s: %s failed to load. Try using load_result to find out the matter.' % (time, finding)
return r
def texify(series, n = 20, colname = 'Keyness', toptail = False, sort_by = False):
"""turn a series into a latex table"""
import pandas as pd
if sort_by:
df = pd.DataFrame(series.order(ascending = False))
else:
df = pd.DataFrame(series)
df.columns = [colname]
if not toptail:
return df.head(n).to_latex()
else:
comb = pd.concat([df.head(n), df.tail(n)])
longest_word = max([len(w) for w in list(comb.index)])
tex = ''.join(comb.to_latex()).split('\n')
linelin = len(tex[0])
try:
newline = (' ' * (linelin / 2)) + ' &'
newline_len = len(newline)
newline = newline + (' ' * (newline_len - 1)) + r'\\'
newline = newline.replace(r' \\', r'... \\')
newline = newline.replace(r' ', r'... ', 1)
except:
newline = r'... & ... \\'
tex = tex[:n+4] + [newline] + tex[n+4:]
tex = '\n'.join(tex)
return tex
def make_nltk_text(directory,
collapse_dirs = True,
tagged = False,
lemmatise = False,
just_content_words = False):
"""turn a lot of trees into an nltk style text"""
import nltk
import os
from corpkit.other import tregex_engine
if type(directory) == str:
dirs = [os.path.join(directory, d) for d in os.listdir(directory) if os.path.isdir(os.path.join(directory, d))]
if len(dirs) == 0:
dirs = [directory]
elif type(directory) == list:
dirs = directory
return_tuples = False
if tagged:
return_tuples = True
if just_content_words:
lemmatise = True
query = r'__ < (/.?[A-Za-z0-9].?/ !< __)'
if not return_tuples and not lemmatise:
options = ['-o', '-t']
else:
options = ['-o']
# filthy code.
all_out = []
for d in dirs:
print "Flattening %s ... " % str(d)
res = tregex_engine(corpus = d,
query = query,
options = options,
lemmatise = lemmatise,
just_content_words = just_content_words,
return_tuples = return_tuples)
all_out.append(res)
if collapse_dirs:
tmp = []
for res in all_out:
for w in res:
tmp.append(w)
all_out = tmp
textx = nltk.Text(all_out)
else:
textx = {}
for name, text in zip(dirs, all_out):
t = nltk.Text(all_out)
textx[os.path.basename(name)] = t
return textx
def get_synonyms(word, pos = False):
import nltk
from nltk.corpus import wordnet
if pos:
syns = wordnet.synsets(word, pos = pos)
else:
syns = wordnet.synsets(word)
return list(set([l.name().replace('_', ' ').lower() for s in syns for l in s.lemmas()]))
def synonym_dictmaker(df):
syn_dict = {}
text = make_nltk_text(d)
for w in list(df.columns):
if w not in syn_dict.keys() and w not in syn_dict.values():
wds = get_synonyms(w, pos = pos) + text.similar(w)[:10]
sel = raw_input('Enter the indexes to remove from this list of proposed synonyms, or type "exit" to quit:\n\n%s\n') % '\n'.join(wds)
if sel.startswith('e'):
return
for i in sel:
del wds[i]
for word in wds:
syn_dict[word] = w
return syn_dict
def pmultiquery(path,
option = 'c',
query = 'any',
sort_by = 'total',
quicksave = False,
num_proc = 'default',
function_filter = False,
**kwargs):
"""Parallel process multiple queries or corpora.
This function is used by interrogator if:
a) path is a list of paths
b) query is a dict of named queries.
This function needs joblib 0.8.4 or above in order to run properly."""
import collections
import os
import pandas
import pandas as pd
from collections import namedtuple
from time import strftime, localtime
from corpkit.interrogator import interrogator
from corpkit.editor import editor
from corpkit.other import save_result
try:
from joblib import Parallel, delayed
except:
raise ValueError('joblib, the module used for multiprocessing, cannot be found. ' \
'Install with:\n\n pip install joblib')
import multiprocessing
num_cores = multiprocessing.cpu_count()
def best_num_parallel(num_cores, num_queries):
"""decide how many parallel processes to run
the idea, more or less, is to """
if num_queries <= num_cores:
return num_queries
if num_queries > num_cores:
if (num_queries / num_cores) == num_cores:
return int(num_cores)
if num_queries % num_cores == 0:
return max([int(num_queries / n) for n in range(2, num_cores) if int(num_queries / n) <= num_cores])
else:
import math
if (float(math.sqrt(num_queries))).is_integer():
square_root = math.sqrt(num_queries)
if square_root <= num_queries / num_cores:
return int(square_root)
return num_queries / ((num_queries / num_cores) + 1)
# are we processing multiple queries or corpora?
# find out optimal number of cores to use.
multiple_option = False
multiple_corpora = False
if type(path) != str:
multiple_corpora = True
num_cores = best_num_parallel(num_cores, len(path))
elif type(query) != str:
multiple_corpora = False
num_cores = best_num_parallel(num_cores, len(query))
elif type(function_filter) != str:
multiple_option = True
num_cores = best_num_parallel(num_cores, len(function_filter.keys()))
if num_proc != 'default':
num_cores = num_proc
# make sure quicksaves are right type
if quicksave is True:
raise ValueError('quicksave must be string when using pmultiquery.')
# the options that don't change
d = {'option': option,
'paralleling': True,
'function': 'interrogator'}
# add kwargs to query
for k, v in kwargs.items():
d[k] = v
# make a list of dicts to pass to interrogator,
# with the iterable unique in every one
ds = []
if multiple_corpora and not multiple_option:
path = sorted(path)
for index, p in enumerate(path):
name = os.path.basename(p)
a_dict = dict(d)
a_dict['path'] = p
a_dict['query'] = query
a_dict['outname'] = name
a_dict['printstatus'] = False
ds.append(a_dict)
elif not multiple_corpora and not multiple_option:
import collections
for index, (name, q) in enumerate(query.items()):
a_dict = dict(d)
a_dict['path'] = path
a_dict['query'] = q
a_dict['outname'] = name
a_dict['printstatus'] = False
ds.append(a_dict)
elif multiple_option:
import collections
for index, (name, q) in enumerate(function_filter.items()):
a_dict = dict(d)
a_dict['path'] = path
a_dict['query'] = query
a_dict['outname'] = name
a_dict['function_filter'] = q
a_dict['printstatus'] = False
ds.append(a_dict)
time = strftime("%H:%M:%S", localtime())
if multiple_corpora and not multiple_option:
print ("\n%s: Beginning %d parallel corpus interrogations:\n %s" \
"\n Query: '%s'" \
"\n Interrogating corpus ... \n" % (time, num_cores, "\n ".join(path), query) )
elif not multiple_corpora and not multiple_option:
print ("\n%s: Beginning %d parallel corpus interrogations: %s" \
"\n Queries: '%s'" \
"\n Interrogating corpus ... \n" % (time, num_cores, path, "', '".join(query.values())) )
elif multiple_option:
print ("\n%s: Beginning %d parallel corpus interrogations (multiple options): %s" \
"\n Query: '%s'" \
"\n Interrogating corpus ... \n" % (time, num_cores, path, query) )
# run in parallel, get either a list of tuples (non-c option)
# or a dataframe (c option)
res = Parallel(n_jobs=num_cores)(delayed(interrogator)(**x) for x in ds)
res = sorted(res)
# turn list into dict of results, make query and total branches,
# save and return
if not option.startswith('c'):
out = {}
print ''
for (name, data), d in zip(res, ds):
if not option.startswith('k'):
outputnames = collections.namedtuple('interrogation', ['query', 'results', 'totals'])
stotal = data.sum(axis = 1)
stotal.name = u'Total'
output = outputnames(d, data, stotal)
else:
outputnames = collections.namedtuple('interrogation', ['query', 'results'])
output = outputnames(d, data)
out[name] = output
# could be wrong for unstructured corpora?
num_diff_results = len(data)
time = strftime("%H:%M:%S", localtime())
print "\n%s: Finished! Output is a dictionary with keys:\n\n '%s'\n" % (time, "'\n '".join(sorted(out.keys())))
if quicksave:
for k, v in out.items():
save_result(v, k, savedir = 'data/saved_interrogations/%s' % quicksave)
return out
# make query and total branch, save, return
else:
out = pd.concat(res, axis = 1)
out = editor(out, sort_by = sort_by, print_info = False, keep_stats = False)
time = strftime("%H:%M:%S", localtime())
print '\n%s: Finished! %d unique results, %d total.' % (time, len(out.results.columns), out.totals.sum())
if quicksave:
from corpkit.other import save_result
save_result(out, quicksave)
return out
def as_regex(lst, boundaries = 'w', case_sensitive = False, inverse = False):
"""turns a wordlist into an uncompiled regular expression"""
import re
if case_sensitive:
case = r''
else:
case = r'(?i)'
if not boundaries:
boundary1 = r''
boundary2 = r''
elif type(boundaries) == tuple or type(boundaries) == list:
boundary1 = boundaries[0]
boundary2 = boundaries[1]
else:
if boundaries.startswith('w') or boundaries.startswith('W'):
boundary1 = r'\b'
boundary2 = r'\b'
elif boundaries.startswith('l') or boundaries.startswith('L'):
boundary1 = r'^'
boundary2 = r'$'
elif boundaries.startswith('s') or boundaries.startswith('S'):
boundary1 = r'\s'
boundary2 = r'\s'
else:
raise ValueError('Boundaries not recognised. Use a tuple for custom start and end boundaries.')
if inverse:
inverser1 = r'(?!'
inverser2 = r')'
else:
inverser1 = r''
inverser2 = r''
#if no_punctuation:
# if not inverse:
# # not needed
# punct = r''
# else:
# punct = r'|[^A-Za-z0-9]+'
#else:
# if not inverse:
# punct = r''
# else:
# punct = r''
if inverse:
joinbit = r'%s|%s' % (boundary2, boundary1)
return case + inverser1 + r'(' + boundary1 + joinbit.join(sorted(list(set([re.escape(w) for w in lst])))) + boundary2 + r')' + inverser2
else:
return case + boundary1 + inverser1 + r'(' + r'|'.join(sorted(list(set([re.escape(w) for w in lst])))) + r')' + inverser2 + boundary2
def show(lines, index, show = 'thread'):
"""show lines.ix[index][link] as frame"""
url = lines.ix[index]['link'].replace('<a href=', '').replace('>link</a>', '')
return HTML('<iframe src=%s width=1000 height=500></iframe>' % url)
|
|
import json
import os
from cStringIO import StringIO
from xml.parsers.expat import ExpatError
from django import http
from django.shortcuts import render, get_object_or_404, redirect
from django.db import transaction
from django.conf import settings
from django.utils import timezone
from django.db.models import Count
from django.contrib.auth.decorators import login_required
from django.utils.functional import wraps
from django.template.base import TemplateDoesNotExist
from django.views.decorators.cache import never_cache
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import csrf_exempt
from django.core.cache import cache
from django.contrib.auth.models import User
from django.core.files.base import ContentFile
from jsonview.decorators import json_view
from funfactory.urlresolvers import reverse
import xmltodict
from sorl.thumbnail import get_thumbnail
from PIL import Image
from airmozilla.manage import vidly
from airmozilla.base.utils import get_base_url, prepare_vidly_video_url
from airmozilla.main.models import (
Event,
VidlySubmission,
Template,
Picture,
EventOldSlug,
Channel,
Approval,
get_profile_safely,
)
from airmozilla.comments.models import Discussion
from airmozilla.uploads.models import Upload
from airmozilla.manage import videoinfo
from airmozilla.base.helpers import show_duration
from airmozilla.manage import sending
from . import forms
def xhr_login_required(view_func):
"""similar to django.contrib.auth.decorators.login_required
except instead of redirecting it returns a 403 message if not
authenticated."""
@wraps(view_func)
def inner(request, *args, **kwargs):
if not request.user.is_authenticated():
return http.HttpResponse(
json.dumps({'error': "You must be logged in"}),
content_type='application/json',
status=403
)
return view_func(request, *args, **kwargs)
return inner
def must_be_your_event(f):
@wraps(f)
def inner(request, id, **kwargs):
assert request.user.is_authenticated()
event = get_object_or_404(Event, pk=id)
if event.creator != request.user:
return http.HttpResponseForbidden(
"Not your event to meddle with"
)
return f(request, event, **kwargs)
return inner
@login_required
def home(request):
context = {}
request.show_sidebar = False
return render(request, 'new/home.html', context)
@xhr_login_required
def partial_template(request, template_name):
context = {}
if template_name == 'details.html':
context['form'] = forms.DetailsForm()
template_path = os.path.join('new/partials', template_name)
try:
return render(request, template_path, context)
except TemplateDoesNotExist:
raise http.Http404(template_name)
@json_view
@xhr_login_required
@require_POST
@transaction.atomic
def save_upload(request):
data = json.loads(request.body)
form = forms.SaveForm(data)
if not form.is_valid():
return http.HttpResponseBadRequest(form.errors)
url = form.cleaned_data['url']
file_name = form.cleaned_data['file_name'] or os.path.basename(url)
mime_type = form.cleaned_data['mime_type']
size = form.cleaned_data['size']
upload_time = form.cleaned_data['upload_time']
duration = data.get('duration')
new_upload = Upload.objects.create(
user=request.user,
url=url,
size=size,
file_name=file_name,
mime_type=mime_type,
upload_time=upload_time,
)
# now we can create the event to start with
event = Event.objects.create(
upload=new_upload,
status=Event.STATUS_INITIATED,
start_time=timezone.now(),
privacy=Event.PRIVACY_PUBLIC,
creator=request.user,
duration=duration,
)
new_upload.event = event
new_upload.save()
return {'id': event.id}
@never_cache
@xhr_login_required
@transaction.commit_on_success
@must_be_your_event
@json_view
def event_edit(request, event):
if request.method == 'POST':
if event.status != Event.STATUS_INITIATED:
return http.HttpResponseBadRequest(
"You can't edit events that are NOT in the state of initiated."
)
title_before = event.title
form = forms.DetailsForm(json.loads(request.body), instance=event)
if form.is_valid():
form.save()
if event.title != title_before:
# Un-setting it will automatically pick a good slug.
# But first we need to forget any EventOldSlug
EventOldSlug.objects.filter(slug=event.slug).delete()
event.slug = None
event.save()
else:
return {'errors': form.errors}
context = {
'event': serialize_event(event),
}
return context
def serialize_event(event, extended=False):
data = {
'id': event.id,
'title': event.title,
'slug': event.slug,
'description': event.description,
'privacy': event.privacy,
'privacy_display': event.get_privacy_display(),
'status': event.status,
'status_display': event.get_status_display(),
'additional_links': event.additional_links,
'duration': event.duration,
'tags': [],
'channels': {},
'topics': {},
}
if extended:
# When it's the extended version, we return a list of dicts
# that contain the id, name, etc.
data['channels'] = []
data['topics'] = []
if event.slug:
data['url'] = reverse('main:event', args=(event.slug,))
for tag in event.tags.all():
data['tags'].append(tag.name) # good enough?
# lastly, make it a string
data['tags'] = ', '.join(sorted(data['tags']))
for channel in event.channels.all():
if extended:
data['channels'].append({
'id': channel.id,
'name': channel.name,
'url': reverse('main:home_channels', args=(channel.slug,)),
})
else:
data['channels'][channel.id] = True
for topic in event.topics.all():
if extended:
data['topics'].append({
'id': topic.id,
'topic': topic.topic,
})
else:
data['topics'][topic.id] = True
if extended:
# get a list of all the groups that need to approve it
data['approvals'] = []
for approval in Approval.objects.filter(event=event, approved=False):
data['approvals'].append({
'group_name': approval.group.name,
})
if event.picture:
geometry = '160x90'
crop = 'center'
thumb = get_thumbnail(
event.picture.file, geometry, crop=crop
)
data['picture'] = {
'url': thumb.url,
'width': thumb.width,
'height': thumb.height,
}
if event.upload:
data['upload'] = {
'size': event.upload.size,
'url': event.upload.url,
'mime_type': event.upload.mime_type,
}
return data
@require_POST
@login_required
@transaction.atomic
@must_be_your_event
@json_view
def event_archive(request, event):
if event.status != Event.STATUS_INITIATED:
return http.HttpResponseBadRequest(
"You can't archive events that are NOT in the state of initiated."
)
submissions = VidlySubmission.objects.filter(
event=event,
url__startswith=event.upload.url
)
for vidly_submission in submissions.order_by('-submission_time'):
break
else:
# we haven't sent it in for archive yet
upload = event.upload
base_url = get_base_url(request)
webhook_url = base_url + reverse('new:vidly_media_webhook')
video_url = prepare_vidly_video_url(upload.url)
tag, error = vidly.add_media(
video_url,
hd=True,
notify_url=webhook_url,
# Note that we deliberately don't bother yet to set
# token_protection here because we don't yet know if the
# event is going to be private or not.
# Also, it's much quicker to make screencaptures of videos
# that are not token protected on vid.ly.
)
# then we need to record that we did this
vidly_submission = VidlySubmission.objects.create(
event=event,
url=video_url,
tag=tag,
hd=True,
submission_error=error or None
)
default_template = Template.objects.get(default_archive_template=True)
# Do an in place edit in case this started before the fetch_duration
# has started.
Event.objects.filter(id=event.id).update(
template=default_template,
template_environment={'tag': tag}
)
return {
'tag': vidly_submission.tag,
'error': vidly_submission.submission_error
}
@require_POST
@login_required
@must_be_your_event
@json_view
def event_screencaptures(request, event):
if event.status != Event.STATUS_INITIATED:
return http.HttpResponseBadRequest(
"Events NOT in the state of initiated."
)
upload = event.upload
video_url = upload.url
context = {}
cache_key = 'fetching-{0}'.format(event.id)
# This function sets the cache `fetching-{id}` before and after calling
# those functions in the videoinfo module.
# The reason is that those calls might take many many seconds
# and the webapp might send async calls to the event_picture view
# which will inform the webapp that the slow videoinfo processes
# are running and thus that the webapp shouldn't kick if off yet.
seconds = event.duration
if not event.duration:
# it's a poor man's lock
if not cache.get(cache_key):
cache.set(cache_key, True, 60)
seconds = videoinfo.fetch_duration(
event,
video_url=video_url,
save=True,
verbose=settings.DEBUG
)
cache.delete(cache_key)
event = Event.objects.get(id=event.id)
context['seconds'] = seconds
# The reason we can't use `if event.duration:` is because the
# fetch_duration() does an inline-update instead of modifying
# the instance object.
no_pictures = Picture.objects.filter(event=event).count()
if event.duration and not no_pictures:
if not cache.get(cache_key):
cache.set(cache_key, True, 60)
event = Event.objects.get(id=event.id)
no_pictures = videoinfo.fetch_screencapture(
event,
video_url=video_url,
save=True,
verbose=settings.DEBUG,
set_first_available=not event.picture,
import_immediately=True,
)
cache.delete(cache_key)
event = Event.objects.get(id=event.id)
if no_pictures and not event.picture:
# no picture has been chosen previously
pictures = Picture.objects.filter(event=event).order_by('created')[:1]
for picture in pictures:
event.picture = picture
event.save()
break
context['no_pictures'] = no_pictures
return context
# Note that this view is publically available.
# That means we can't trust the content but we can take it as a hint.
@csrf_exempt
@require_POST
def vidly_media_webhook(request):
if not request.POST.get('xml'):
return http.HttpResponseBadRequest("no 'xml'")
xml_string = request.POST['xml'].strip()
try:
struct = xmltodict.parse(xml_string)
except ExpatError:
return http.HttpResponseBadRequest("Bad 'xml'")
try:
task = struct['Response']['Result']['Task']
try:
vidly_submission = VidlySubmission.objects.get(
url=task['SourceFile'],
tag=task['MediaShortLink']
)
if task['Status'] == 'Finished':
if not vidly_submission.finished:
vidly_submission.finished = timezone.now()
vidly_submission.save()
event = vidly_submission.event
if (
task['Private'] == 'false' and
event.privacy != Event.PRIVACY_PUBLIC
):
# the event is private but the video is not
vidly.update_media_protection(
vidly_submission.tag,
True # make it private
)
if not vidly_submission.token_protection:
vidly_submission.token_protection = True
vidly_submission.save()
# Awesome!
# This event now has a fully working transcoded piece of
# media.
if event.status == Event.STATUS_PENDING:
event.status = Event.STATUS_SCHEDULED
event.archive_time = timezone.now()
event.save()
# More awesome! We can start processing the transcoded media.
if not event.duration:
videoinfo.fetch_duration(
event,
save=True,
verbose=settings.DEBUG
)
event = Event.objects.get(id=event.id)
if event.duration:
if not Picture.objects.filter(event=event):
videoinfo.fetch_screencapture(
event,
save=True,
verbose=settings.DEBUG,
set_first_available=True,
)
elif task['Status'] == 'Error':
if not vidly_submission.errored:
vidly_submission.errored = timezone.now()
vidly_submission.save()
except VidlySubmission.DoesNotExist:
# remember, we can't trust the XML since it's publicly
# available and exposed as a webhook
pass
except KeyError:
# If it doesn't have a "Result" or "Task", it was just a notification
# that the media was added.
pass
return http.HttpResponse('OK\n')
@never_cache
@login_required
@must_be_your_event
@json_view
def event_picture(request, event):
if request.method == 'POST':
form = forms.PictureForm(json.loads(request.body), instance=event)
if not form.is_valid():
return http.HttpResponseBadRequest(form.errors)
with transaction.atomic():
form.save()
# if it has screen captures start returning them
pictures = Picture.objects.filter(event=event).order_by('created')
thumbnails = []
# geometry = request.GET.get('geometry', '160x90')
# crop = request.GET.get('crop', 'center')
geometry = '160x90'
crop = 'center'
for p in pictures:
thumb = get_thumbnail(
p.file, geometry, crop=crop
)
picked = event.picture and event.picture == p
thumbnails.append({
'id': p.id,
'url': thumb.url,
'width': thumb.width,
'height': thumb.height,
'picked': picked,
# 'large_url': large_thumb.url,
})
context = {}
if thumbnails:
context['thumbnails'] = thumbnails
cache_key = 'fetching-{0}'.format(event.id)
context['fetching'] = bool(cache.get(cache_key))
return context
@never_cache
@login_required
@must_be_your_event
@json_view
def event_summary(request, event):
return {
'event': serialize_event(event, extended=True),
'pictures': Picture.objects.filter(event=event).count(),
}
def _videos_by_tags(tags):
"""Return a list of dicts where each dict looks something like this:
{'id': 123, 'tag': 'abc123', 'Status': 'Processing', 'finished': False}
And if there's no VidlySubmission the dict will just look like this:
{'id': 124}
The advantage of this function is that you only need to do 1 query
to Vid.ly for a long list of tags.
"""
all_results = vidly.query(tags.keys())
video_contexts = []
for tag, event in tags.items():
video_context = {
'id': event.id,
}
if event.duration:
video_context['duration'] = event.duration
video_context['duration_human'] = show_duration(event.duration)
qs = VidlySubmission.objects.filter(event=event, tag=tag)
for vidly_submission in qs.order_by('-submission_time')[:1]:
video_context['tag'] = tag
results = all_results.get(tag, {})
video_context['status'] = results.get('Status')
video_context['finished'] = results.get('Status') == 'Finished'
if video_context['finished']:
if not vidly_submission.finished:
vidly_submission.finished = timezone.now()
vidly_submission.save()
if not event.archive_time:
event.archive_time = timezone.now()
event.save()
elif results.get('Status') == 'Error':
if not vidly_submission.errored:
vidly_submission.errored = timezone.now()
vidly_submission.save()
else:
video_context['estimated_time_left'] = (
vidly_submission.get_estimated_time_left()
)
break
video_contexts.append(video_context)
return video_contexts
@never_cache
@login_required
@must_be_your_event
@json_view
def event_video(request, event):
context = {}
tag = event.template_environment and event.template_environment.get('tag')
if tag:
tags = {tag: event}
contexts = _videos_by_tags(tags)
context = contexts[0]
return context
@require_POST
@login_required
@json_view
def videos(request):
"""Similar to event_video except it expects a 'ids' request parameter
and returns a dict of videos where the event ID is the keys."""
try:
ids = json.loads(request.body)['ids']
except ValueError as x:
return http.HttpResponseBadRequest(str(x))
events = Event.objects.filter(
id__in=ids,
creator=request.user,
template__name__icontains='vid.ly',
)
tags = {}
for event in events:
tag = (
event.template_environment and
event.template_environment.get('tag')
)
tags[tag] = event
return dict(
(x['id'], x)
for x in _videos_by_tags(tags)
)
@require_POST
@login_required
@must_be_your_event
@json_view
def event_publish(request, event):
if event.status != Event.STATUS_INITIATED:
return http.HttpResponseBadRequest("Not in an initiated state")
# there has to be a Vid.ly video
tag = event.template_environment['tag']
submission = None
qs = VidlySubmission.objects.filter(event=event, tag=tag)
for each in qs.order_by('-submission_time'):
submission = each
break
assert submission, "Event has no vidly submission"
groups = []
with transaction.atomic():
results = vidly.query(tag).get(tag, {})
# Let's check the privacy/tokenization of the video.
# What matters (source of truth) is the event's privacy state.
if event.privacy != Event.PRIVACY_PUBLIC and results:
# make sure the submission the the video IS token protected
if not submission.token_protection:
submission.token_protection = True
submission.save()
if results['Private'] == 'false':
# We can only do this if the video has been successfully
# transcoded.
if results['Status'] == 'Finished':
vidly.update_media_protection(
tag,
True
)
if results.get('Status') == 'Finished':
event.status = Event.STATUS_SCHEDULED
# If it's definitely finished, it means we managed to ask
# Vid.ly this question before Vid.ly had a chance to ping
# us on the webhook. Might as well set it now.
if not event.archive_time:
event.archive_time = timezone.now()
else:
# vidly hasn't finished processing it yet
event.status = Event.STATUS_PENDING
event.save()
if not event.picture:
# assign the default placeholder picture if there is one
try:
event.picture = Picture.objects.get(default_placeholder=True)
event.save()
except Picture.DoesNotExist: # pragma: no cover
pass
if not event.channels.all():
# forcibly put it in the default channel(s)
for channel in Channel.objects.filter(default=True):
event.channels.add(channel)
if not Discussion.objects.filter(event=event):
discussion = Discussion.objects.create(
event=event,
enabled=True,
notify_all=True
)
discussion.moderators.add(event.creator)
if event.privacy == Event.PRIVACY_PUBLIC:
for topic in event.topics.all():
for group in topic.groups.all():
if group not in groups:
groups.append(group)
for group in groups:
Approval.objects.create(event=event, group=group)
for group in groups:
sending.email_about_approval_requested(
event,
group,
request
)
return True
@never_cache
@login_required
@json_view
def your_events(request):
# If you have some uploads that are lingering but not associated
# with an event, we might want to create empty events for them
# now.
lingering_uploads = Upload.objects.filter(
mime_type__startswith='video/',
user=request.user,
event__isnull=True,
size__gt=0
)
with transaction.atomic():
for upload in lingering_uploads:
event = Event.objects.create(
status=Event.STATUS_INITIATED,
creator=upload.user,
upload=upload,
start_time=upload.created,
privacy=Event.PRIVACY_PUBLIC,
created=upload.created
)
# event.channels.add(default_channel)
# We'll pretend the event was created at the time the
# video was uploaded.
# Doing this after the create() is necessary because the
# model uses the auto_now_add=True
event.created = upload.created
event.save()
upload.event = event
upload.save()
events = (
Event.objects.filter(
creator=request.user,
status=Event.STATUS_INITIATED,
upload__isnull=False,
)
.select_related('upload', 'picture')
.order_by('-created')
)
all_possible_pictures = (
Picture.objects
.filter(event__in=events)
.values('event_id')
.annotate(Count('event'))
)
pictures_count = {}
for each in all_possible_pictures:
pictures_count[each['event_id']] = each['event__count']
serialized = []
for event in events:
upload = event.upload
upload = {
'size': upload.size,
'mime_type': upload.mime_type
}
thumbnail = None
if event.picture:
geometry = '160x90'
crop = 'center'
thumb = get_thumbnail(
event.picture.file, geometry, crop=crop
)
thumbnail = {
'url': thumb.url,
'width': thumb.width,
'height': thumb.height,
}
serialized.append({
'id': event.id,
'title': event.title,
'upload': upload,
'picture': thumbnail,
'pictures': pictures_count.get(event.id, 0),
'modified': event.modified,
})
return {'events': serialized}
@require_POST
@login_required
@must_be_your_event
@json_view
def event_delete(request, event):
with transaction.atomic():
event.status = Event.STATUS_REMOVED
event.save()
return True
@transaction.atomic
def unsubscribe(request, identifier):
context = {}
cache_key = 'unsubscribe-%s' % identifier
user_id = cache.get(cache_key)
if user_id:
user = get_object_or_404(User, id=user_id)
else:
user = None
cache.set(cache_key, request.user.id, 60)
context['user'] = user
if request.method == 'POST':
if not user:
return http.HttpResponseBadRequest('No user')
user_profile = get_profile_safely(user, create_if_necessary=True)
user_profile.optout_event_emails = True
user_profile.save()
cache.delete(cache_key)
return redirect('new:unsubscribed')
return render(request, 'new/unsubscribe.html', context)
def unsubscribed(request):
context = {}
return render(request, 'new/unsubscribed.html', context)
@require_POST
@login_required
@must_be_your_event
@json_view
@transaction.atomic
def event_pictures_rotate(request, event):
try:
post = request.body and json.loads(request.body) or {}
except ValueError:
return http.HttpResponseBadRequest('invalid JSON body')
direction = post.get('direction', 'left')
for picture in Picture.objects.filter(event=event):
img = Image.open(picture.file.path)
format = picture.file.name.lower().endswith('.png') and 'png' or 'jpeg'
img = img.rotate(direction == 'left' and 90 or 270)
f = StringIO()
try:
img.save(f, format=format)
picture.file.save(
picture.file.name,
ContentFile(f.getvalue())
)
finally:
f.close()
return True
|
|
# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslo_concurrency import processutils as putils
from os_brick.encryptors import luks
from os_brick.tests.encryptors import test_cryptsetup
class LuksEncryptorTestCase(test_cryptsetup.CryptsetupEncryptorTestCase):
def _create(self):
return luks.LuksEncryptor(root_helper=self.root_helper,
connection_info=self.connection_info,
keymgr=self.keymgr)
@mock.patch('os_brick.executor.Executor._execute')
def test_is_luks(self, mock_execute):
luks.is_luks(self.root_helper, self.dev_path, execute=mock_execute)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path,
run_as_root=True, root_helper=self.root_helper,
check_exit_code=True),
], any_order=False)
@mock.patch('os_brick.executor.Executor._execute')
@mock.patch('os_brick.encryptors.luks.LOG')
def test_is_luks_with_error(self, mock_log, mock_execute):
error_msg = "Device %s is not a valid LUKS device." % self.dev_path
mock_execute.side_effect = putils.ProcessExecutionError(
exit_code=1, stderr=error_msg)
luks.is_luks(self.root_helper, self.dev_path, execute=mock_execute)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path,
run_as_root=True, root_helper=self.root_helper,
check_exit_code=True),
])
self.assertEqual(1, mock_log.warning.call_count) # warning logged
@mock.patch('os_brick.executor.Executor._execute')
def test__format_volume(self, mock_execute):
self.encryptor._format_volume("passphrase")
mock_execute.assert_has_calls([
mock.call('cryptsetup', '--batch-mode', 'luksFormat',
'--type', 'luks1', '--key-file=-', self.dev_path,
process_input='passphrase',
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True, attempts=3),
])
@mock.patch('os_brick.executor.Executor._execute')
def test__open_volume(self, mock_execute):
self.encryptor._open_volume("passphrase")
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input='passphrase',
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
])
@mock.patch('os_brick.executor.Executor._execute')
def test_attach_volume(self, mock_execute):
fake_key = '0c84146034e747639b698368807286df'
self.encryptor._get_key = mock.MagicMock()
self.encryptor._get_key.return_value = (
test_cryptsetup.fake__get_key(None, fake_key))
self.encryptor.attach_volume(None)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
mock.call('ln', '--symbolic', '--force',
'/dev/mapper/%s' % self.dev_name, self.symlink_path,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
])
@mock.patch('os_brick.executor.Executor._execute')
def test_attach_volume_not_formatted(self, mock_execute):
fake_key = 'bc37c5eccebe403f9cc2d0dd20dac2bc'
self.encryptor._get_key = mock.MagicMock()
self.encryptor._get_key.return_value = (
test_cryptsetup.fake__get_key(None, fake_key))
mock_execute.side_effect = [
putils.ProcessExecutionError(exit_code=1), # luksOpen
putils.ProcessExecutionError(exit_code=1), # isLuks
mock.DEFAULT, # luksFormat
mock.DEFAULT, # luksOpen
mock.DEFAULT, # ln
]
self.encryptor.attach_volume(None)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', '--batch-mode', 'luksFormat',
'--type', 'luks1', '--key-file=-', self.dev_path,
process_input=fake_key,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True, attempts=3),
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
mock.call('ln', '--symbolic', '--force',
'/dev/mapper/%s' % self.dev_name, self.symlink_path,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
], any_order=False)
@mock.patch('os_brick.executor.Executor._execute')
def test_attach_volume_fail(self, mock_execute):
fake_key = 'ea6c2e1b8f7f4f84ae3560116d659ba2'
self.encryptor._get_key = mock.MagicMock()
self.encryptor._get_key.return_value = (
test_cryptsetup.fake__get_key(None, fake_key))
mock_execute.side_effect = [
putils.ProcessExecutionError(exit_code=1), # luksOpen
mock.DEFAULT, # isLuks
]
self.assertRaises(putils.ProcessExecutionError,
self.encryptor.attach_volume, None)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
], any_order=False)
@mock.patch('os_brick.executor.Executor._execute')
def test__close_volume(self, mock_execute):
self.encryptor.detach_volume()
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksClose', self.dev_name,
root_helper=self.root_helper,
attempts=3, run_as_root=True, check_exit_code=[0, 4]),
])
@mock.patch('os_brick.executor.Executor._execute')
def test_detach_volume(self, mock_execute):
self.encryptor.detach_volume()
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksClose', self.dev_name,
root_helper=self.root_helper,
attempts=3, run_as_root=True, check_exit_code=[0, 4]),
])
class Luks2EncryptorTestCase(LuksEncryptorTestCase):
def _create(self):
return luks.Luks2Encryptor(root_helper=self.root_helper,
connection_info=self.connection_info,
keymgr=self.keymgr)
@mock.patch('os_brick.executor.Executor._execute')
def test__format_volume(self, mock_execute):
self.encryptor._format_volume("passphrase")
mock_execute.assert_has_calls([
mock.call('cryptsetup', '--batch-mode', 'luksFormat',
'--type', 'luks2', '--key-file=-', self.dev_path,
process_input='passphrase',
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True, attempts=3),
])
@mock.patch('os_brick.executor.Executor._execute')
def test_attach_volume_not_formatted(self, mock_execute):
fake_key = 'bc37c5eccebe403f9cc2d0dd20dac2bc'
self.encryptor._get_key = mock.MagicMock()
self.encryptor._get_key.return_value = (
test_cryptsetup.fake__get_key(None, fake_key))
mock_execute.side_effect = [
putils.ProcessExecutionError(exit_code=1), # luksOpen
putils.ProcessExecutionError(exit_code=1), # isLuks
mock.DEFAULT, # luksFormat
mock.DEFAULT, # luksOpen
mock.DEFAULT, # ln
]
self.encryptor.attach_volume(None)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', '--batch-mode', 'luksFormat',
'--type', 'luks2', '--key-file=-', self.dev_path,
process_input=fake_key,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True, attempts=3),
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
mock.call('ln', '--symbolic', '--force',
'/dev/mapper/%s' % self.dev_name, self.symlink_path,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
], any_order=False)
|
|
import argparse
from collections import OrderedDict
import os
import re
import sys
import types
if sys.version_info >= (3, 0):
from io import StringIO
else:
from StringIO import StringIO
__version__ = "0.9"
ACTION_TYPES_THAT_DONT_NEED_A_VALUE = {argparse._StoreTrueAction,
argparse._StoreFalseAction, argparse._CountAction,
argparse._StoreConstAction, argparse._AppendConstAction}
# global ArgumentParser instances
_parsers = {}
def initArgumentParser(name=None, **kwargs):
"""Creates a global ArgumentParser instance with the given name,
passing any args other than "name" to the ArgumentParser constructor.
This instance can then be retrieved using getArgumentParser(..)
"""
if name is None:
name = "default"
if name in _parsers:
raise ValueError(("kwargs besides 'name' can only be passed in the"
" first time. '%s' ArgumentParser already exists: %s") % (
name, _parsers[name]))
kwargs.setdefault('formatter_class', argparse.ArgumentDefaultsHelpFormatter)
kwargs.setdefault('conflict_handler', 'resolve')
_parsers[name] = ArgumentParser(**kwargs)
def getArgumentParser(name=None, **kwargs):
"""Returns the global ArgumentParser instance with the given name. The 1st
time this function is called, a new ArgumentParser instance will be created
for the given name, and any args other than "name" will be passed on to the
ArgumentParser constructor.
"""
if name is None:
name = "default"
if len(kwargs) > 0 or name not in _parsers:
initArgumentParser(name, **kwargs)
return _parsers[name]
class ArgumentParser(argparse.ArgumentParser):
"""Drop-in replacement for argparse.ArgumentParser that adds support for
environment variables and .ini or .yaml-style config files.
"""
def __init__(self,
prog=None,
usage=None,
description=None,
epilog=None,
version=None,
parents=[],
formatter_class=argparse.HelpFormatter,
prefix_chars='-',
fromfile_prefix_chars=None,
argument_default=None,
conflict_handler='error',
add_help=True,
add_config_file_help=True,
add_env_var_help=True,
default_config_files=[],
allow_unknown_config_file_keys=False,
args_for_setting_config_path=[],
config_arg_is_required=False,
config_arg_help_message="config file path",
):
"""Supports all the same args as the argparse.ArgumentParser
constructor, as well as these additional args.
Additional Args:
add_config_file_help: Whether to add a description of config file
syntax to the help message.
add_env_var_help: Whether to add something to the help message for
args that can be set through environment variables.
default_config_files: When specified, this list of config files will
be parsed in order, with the values from each config file
taking precedence over pervious ones. This allows an application
to look for config files in multiple standard locations such as
the install directory, home directory, and current directory:
["<install dir>/app_config.ini",
"~/.my_app_config.ini",
"./app_config.txt"]
allow_unknown_config_file_keys: Whether unknown config file keys
should be ignored or whether it should be an error.
args_for_setting_config_path: A list of one or more command line
args that would allow a user to provide a config file path
(eg. ["-c", "--config-file"]). Default: []
config_arg_is_required: when args_for_setting_config_path is set,
set this to True to always require users to provide a config path.
config_arg_help_message: when args_for_setting_config_path is set,
this will be the help message for the config_file_args.
"""
self._add_config_file_help = add_config_file_help
self._add_env_var_help = add_env_var_help
# extract kwargs that can be passed to the super constructor
kwargs_for_super = {k: v for k, v in locals().items() if k in [
"prog", "usage", "description", "epilog", "version", "parents",
"formatter_class", "prefix_chars", "fromfile_prefix_chars",
"argument_default", "conflict_handler", "add_help" ]}
if sys.version_info >= (3, 3) and "version" in kwargs_for_super:
del kwargs_for_super["version"] # version arg deprecated in v3.3
argparse.ArgumentParser.__init__(self, **kwargs_for_super)
# parse the additionial args
self._default_config_files = default_config_files
self._allow_unknown_config_file_keys = allow_unknown_config_file_keys
if args_for_setting_config_path:
self.add_argument(*args_for_setting_config_path, dest="config_file",
required=config_arg_is_required, help=config_arg_help_message,
is_config_file=True)
def parse_args(self, args = None, namespace = None,
config_file_contents = None, env_vars = os.environ):
"""Supports all the same args as the ArgumentParser.parse_args(..),
as well as these additional args.
Additional Args:
config_file_contents: String. Used for testing.
env_vars: Dictionary. Used for testing.
"""
args, argv = self.parse_known_args(args = args,
namespace = namespace,
config_file_contents = config_file_contents,
env_vars = env_vars)
if argv:
self.error('unrecognized arguments: %s' % ' '.join(argv))
return args
def parse_known_args(self, args = None, namespace = None,
config_file_contents = None, env_vars = os.environ):
"""Supports all the same args as the ArgumentParser.parse_args(..),
as well as these additional args.
Additional Args:
config_file_contents: String. Used for testing.
env_vars: Dictionary. Used for testing.
"""
if args is None:
args = sys.argv[1:]
elif type(args) == str:
args = args.split()
else:
args = list(args)
# maps string describing the source (eg. env var) to a settings dict
# to keep track of where values came from (used by print_values())
self._source_to_settings = OrderedDict()
self._command_line_args_string = ' '.join(args)
if args:
self._source_to_settings["Command Line Args: "] = {
'': self._command_line_args_string}
# add env var settings to the command line that aren't there already
env_var_args = []
actions_with_env_var_values = [a for a in self._actions
if a.option_strings and a.env_var
and a.env_var in env_vars
and not any(opt in args for opt in a.option_strings)]
for a in actions_with_env_var_values:
key = a.env_var
value = env_vars[key]
env_var_args += self.convert_setting_to_command_line_arg(
a, key, value)
args = env_var_args + args
if env_var_args:
self._source_to_settings["Environment Variables:\n"] = OrderedDict(
[(a.env_var, env_vars[a.env_var])
for a in actions_with_env_var_values])
# read settings from config file(s)
if config_file_contents:
stream = StringIO(config_file_contents)
stream.name = "method arg"
config_streams = [stream]
else:
config_streams = self._get_config_file_streams(args)
# add config file settings to the command line that aren't there already
# for each action, add its possible config keys to a dict
possible_config_keys = {config_key: action for action in self._actions
for config_key in self.get_possible_config_keys(action)}
# parse each config file
for stream in config_streams[::-1]:
try:
config_settings = self.parse_config_file(stream)
finally:
if hasattr(stream, "close"):
stream.close()
# make sure config file doesn't use any unknown keys
if not self._allow_unknown_config_file_keys:
invalid_keys = list(
set(config_settings.keys()) - set(possible_config_keys.keys()))
if invalid_keys:
self.error(("%s contains unknown config key(s): %s") % (
stream.name, ", ".join(invalid_keys)))
# add config settings to the command line if they aren't there already
config_args = []
for key, value in config_settings.items():
if key in possible_config_keys:
action = possible_config_keys[key]
already_on_command_line = any(
arg in args for arg in action.option_strings)
if already_on_command_line:
del config_settings[key]
else:
config_args += self.convert_setting_to_command_line_arg(
action, key, value)
args = config_args + args
if config_args:
self._source_to_settings[
"Config File (%s):\n" %stream.name]=config_settings
# save default settings for use by print_values()
default_settings = OrderedDict()
for a in self._actions:
already_on_command_line = any(arg in args for arg in a.option_strings)
cares_about_default = a.option_strings or a.nargs in [
argparse.OPTIONAL, argparse.ZERO_OR_MORE]
if (already_on_command_line or not cares_about_default or
a.default == None or a.default == argparse.SUPPRESS or
type(a) in ACTION_TYPES_THAT_DONT_NEED_A_VALUE):
continue
else:
key = a.option_strings[-1] if a.option_strings else a.dest
default_settings[key] = str(a.default)
if default_settings:
self._source_to_settings["Defaults:\n"] = default_settings
# parse all args (including command-line, config file, and env var)
return argparse.ArgumentParser.parse_known_args(
self, args=args, namespace=namespace)
def parse_config_file(self, stream):
"""Parses a config file and return a dictionary of settings"""
settings = OrderedDict()
for i, line in enumerate(stream):
line = line.strip()
if not line or line[0] in ["#", ";", "["] or line.startswith("---"):
continue
white_space = "\\s*"
key = "(?P<key>[^:=;#\s]+?)"
value1 = white_space+"[:=]"+white_space+"(?P<value>[^;#]+?)"
value2 = white_space+"[\s]"+white_space+"(?P<value>[^;#\s]+?)"
comment = white_space+"(?P<comment>\\s[;#].*)?"
key_only_match = re.match("^" + key + comment +"$", line)
if key_only_match:
key = key_only_match.group("key")
settings[key] = "true"
continue
key_value_match = re.match("^"+key+value1+comment+"$", line) or \
re.match("^"+key+value2+comment+"$", line)
if key_value_match:
key = key_value_match.group("key")
value = key_value_match.group("value")
settings[key] = value
continue
self.error("Unexpected line %s in %s: %s" % (i, stream.name, line))
return settings
def convert_setting_to_command_line_arg(self, action, key, value):
"""Converts a config file or env var key/value to a list of
command line args to append to the command line.
Args:
action: The action corresponding to this setting
key: The config file key or env var name (used for error messages)
value: The raw value string from the config file or env var
"""
assert type(value) == str
args = []
if value.lower() == "true":
if type(action) not in ACTION_TYPES_THAT_DONT_NEED_A_VALUE:
self.error("%s set to 'True' rather than a value" % key)
args.append( action.option_strings[-1] )
elif value.startswith("[") and value.endswith("]"):
if type(action) != argparse._AppendAction:
self.error(("%s can't be set to a list '%s' unless its action "
"type is changed to 'append'") % (key, value))
for list_elem in value[1:-1].split(","):
args.append( action.option_strings[-1] )
args.append( list_elem.strip() )
else:
if type(action) in ACTION_TYPES_THAT_DONT_NEED_A_VALUE:
self.error("%s is a flag but is being set to '%s'" % (key,
value))
args.append( action.option_strings[-1] )
args.append( value )
return args
def get_possible_config_keys(self, action):
"""This method decides which actions can be set in a config file and
what their keys will be. It return a list of 0 or more config keys that
can be used to set the given action's value in a config file.
"""
keys = []
for arg in action.option_strings:
if arg.startswith(2*self.prefix_chars[0]):
keys += [arg[2:], arg] # eg. for '--bla' return ['bla', '--bla']
return keys
def _get_config_file_streams(self, command_line_args):
# open any default config files
cfg = [open(f) for f in self._default_config_files if os.path.isfile(f)]
# if there's an arg for specifying config files, try to parse it from
# the command line using a clean ArgumentParser that only knows this arg
user_config_file_arg_actions = [
a for a in self._actions if getattr(a, "is_config_file", False)]
if user_config_file_arg_actions and command_line_args:
for action in user_config_file_arg_actions:
arg_parser = argparse.ArgumentParser(
usage=self.usage,
description=self.description,
epilog=self.epilog,
formatter_class=self.formatter_class,
prefix_chars=self.prefix_chars,
fromfile_prefix_chars=self.fromfile_prefix_chars)
arg_parser._add_action(action)
# make parser not exit on error by replacing its error method
def error_method(self, message):
raise argparse.ArgumentError(None, message)
arg_parser.error = types.MethodType(error_method, arg_parser)
namespace, _ = arg_parser.parse_known_args(args=command_line_args)
user_config_file = getattr(namespace, action.dest, None)
if user_config_file:
if os.path.isfile(user_config_file):
cfg += [open(user_config_file)]
else:
self.error('File not found: %s' % user_config_file)
return cfg
def format_values(self):
"""Returns a string with all args and settings and where they came from
(eg. command line, config file, enviroment variable or default)
"""
r = StringIO()
for source, settings in self._source_to_settings.items():
r.write(source)
for key, value in settings.items():
if key:
r.write(" %-19s%s\n" % (key+":", value))
else:
r.write(" %s\n" % value)
return r.getvalue()
def print_values(self, file = sys.stdout):
"""Prints the format_values() string (to sys.stdout or another file)."""
file.write(self.format_values())
def format_help(self):
msg = ""
added_config_file_help = False
added_env_var_help = False
if self._add_config_file_help:
default_config_files = self._default_config_files
cc = 2*self.prefix_chars[0] # eg. --
config_keys = [(arg, a) for a in self._actions for arg in
a.option_strings if arg.startswith(cc) and a.dest != "help"]
config_path_actions = [a for a in
self._actions if getattr(a, "is_config_file", False)]
if (default_config_files or config_path_actions) and config_keys:
self._add_config_file_help = False # prevent duplication
added_config_file_help = True
msg += ("Args that start with '%s' (eg. %s) can also be set in "
"a config file") % (cc, config_keys[0][0])
config_arg_string = " or ".join(a.option_strings[0]
for a in config_path_actions if a.option_strings)
if config_arg_string:
config_arg_string = "provided via " + config_arg_string
if default_config_files or config_arg_string:
msg += " (%s)" % " or ".join(default_config_files +
[config_arg_string])
msg += " by using .ini or .yaml-style syntax "
examples = []
key_value_args = [arg for arg, a in config_keys
if a.type not in ACTION_TYPES_THAT_DONT_NEED_A_VALUE]
if key_value_args:
examples += ["%s=value" % key_value_args[0].strip(cc)]
flag_args = [arg for arg, a in config_keys
if a.type in ACTION_TYPES_THAT_DONT_NEED_A_VALUE]
if flag_args:
examples += ["%s=TRUE" % flag_args[0].strip(cc)]
if examples:
msg += "(eg. %s)." % " or ".join(examples)
if self._add_env_var_help:
env_var_actions = [(a.env_var, a) for a in self._actions
if getattr(a, "env_var", None)]
for env_var, a in env_var_actions:
env_var_help_string = " [env var: %s]" % env_var
if not a.help:
a.help = ""
if env_var_help_string not in a.help:
a.help += env_var_help_string
added_env_var_help = True
self._add_env_var_help = False # prevent duplication
if added_env_var_help or added_config_file_help:
value_sources = ["defaults"]
if added_config_file_help:
value_sources = ["config file values"] + value_sources
if added_env_var_help:
value_sources = ["environment variables"] + value_sources
msg += " Command-line values override %s." % (
" which override ".join(value_sources))
if msg:
self.description = (self.description or "") + " " + msg
return argparse.ArgumentParser.format_help(self)
def add_argument(self, *args, **kwargs):
"""
This method supports the same args as ArgumentParser.add_argument(..)
as well as the additional args below.
All
Additional Args:
env_var: The name of the environment variable to check.
is_config_file: If True, this arg is treated as a config file path
This provides an alternative way to specify config files in place of
the ArgumentParser(fromfile_prefix_chars=..) mechanism.
Default: False
"""
env_var = kwargs.pop("env_var", None)
is_config_file = kwargs.pop("is_config_file", None)
action = self.original_add_argument_method(*args, **kwargs)
is_positional_arg = not action.option_strings
if is_positional_arg and env_var:
raise ValueError("env_var can't be set for a positional arg.")
if is_config_file and type(action) != argparse._StoreAction:
raise ValueError("arg with is_config_file=True must have action='store'")
action.env_var = env_var
action.is_config_file = is_config_file
return action
# wrap ArgumentParser's add_argument(..) method with the one above
argparse._ActionsContainer.original_add_argument_method = argparse._ActionsContainer.add_argument
argparse._ActionsContainer.add_argument = add_argument
# create shorter aliases for the key methods and class names
getArgParser = getArgumentParser
getParser = getArgumentParser
ArgParser = ArgumentParser
Parser = ArgumentParser
argparse._ActionsContainer.add_arg = argparse._ActionsContainer.add_argument
argparse._ActionsContainer.add = argparse._ActionsContainer.add_argument
ArgumentParser.parse = ArgumentParser.parse_args
ArgumentParser.parse_known = ArgumentParser.parse_known_args
|
|
#!/usr/bin/env python
"""This file contains tests that do not fit into any specific file yet.
Feel free to make your own test file if appropriate.
Refer to conftest.py for shared helper methods.
resources/test_* : For tests related to resources
test_* : For other tests of the non-resource elements of the jira package.
"""
import logging
import os
import pickle
from time import sleep
from typing import cast
from unittest import mock
import pytest
import requests
from jira import JIRA, Issue, JIRAError
from jira.client import ResultList
from jira.resources import cls_for_resource
from tests.conftest import JiraTestCase, rndpassword
LOGGER = logging.getLogger(__name__)
class UniversalResourceTests(JiraTestCase):
def test_universal_find_existing_resource(self):
resource = self.jira.find("issue/{0}", self.test_manager.project_b_issue1)
issue = self.jira.issue(self.test_manager.project_b_issue1)
self.assertEqual(resource.self, issue.self)
self.assertEqual(resource.key, issue.key)
def test_find_invalid_resource_raises_exception(self):
with self.assertRaises(JIRAError) as cm:
self.jira.find("woopsydoodle/{0}", "666")
ex = cm.exception
assert ex.status_code in [400, 404]
self.assertIsNotNone(ex.text)
self.assertRegex(ex.url, "^https?://.*/rest/api/(2|latest)/woopsydoodle/666$")
def test_pickling_resource(self):
resource = self.jira.find("issue/{0}", self.test_manager.project_b_issue1)
pickled = pickle.dumps(resource.raw)
unpickled = pickle.loads(pickled)
cls = cls_for_resource(unpickled["self"])
unpickled_instance = cls(
self.jira._options, self.jira._session, raw=pickle.loads(pickled)
)
self.assertEqual(resource.key, unpickled_instance.key)
# Class types are no longer equal, cls_for_resource() returns an Issue type
# find() returns a Resource type. So we compare the raw json
self.assertEqual(resource.raw, unpickled_instance.raw)
def test_pickling_resource_class(self):
resource = self.jira.find("issue/{0}", self.test_manager.project_b_issue1)
pickled = pickle.dumps(resource)
unpickled = pickle.loads(pickled)
self.assertEqual(resource.key, unpickled.key)
self.assertEqual(resource, unpickled)
def test_pickling_issue_class(self):
resource = self.test_manager.project_b_issue1_obj
pickled = pickle.dumps(resource)
unpickled = pickle.loads(pickled)
self.assertEqual(resource.key, unpickled.key)
self.assertEqual(resource, unpickled)
def test_bad_attribute(self):
resource = self.jira.find("issue/{0}", self.test_manager.project_b_issue1)
with self.assertRaises(AttributeError):
getattr(resource, "bogus123")
def test_hashable(self):
resource = self.jira.find("issue/{0}", self.test_manager.project_b_issue1)
resource2 = self.jira.find("issue/{0}", self.test_manager.project_b_issue2)
r1_hash = hash(resource)
r2_hash = hash(resource2)
assert r1_hash != r2_hash
dict_of_resource = {resource: "hey", resource2: "peekaboo"}
dict_of_resource.update({resource: "hey ho"})
assert len(dict_of_resource.keys()) == 2
assert {resource, resource2} == set(dict_of_resource.keys())
assert dict_of_resource[resource] == "hey ho"
def test_hashable_issue_object(self):
resource = self.test_manager.project_b_issue1_obj
resource2 = self.test_manager.project_b_issue2_obj
r1_hash = hash(resource)
r2_hash = hash(resource2)
assert r1_hash != r2_hash
dict_of_resource = {resource: "hey", resource2: "peekaboo"}
dict_of_resource.update({resource: "hey ho"})
assert len(dict_of_resource.keys()) == 2
assert {resource, resource2} == set(dict_of_resource.keys())
assert dict_of_resource[resource] == "hey ho"
class ApplicationPropertiesTests(JiraTestCase):
def test_application_properties(self):
props = self.jira.application_properties()
for p in props:
self.assertIsInstance(p, dict)
self.assertTrue(
set(p.keys()).issuperset({"type", "name", "value", "key", "id"})
)
def test_application_property(self):
clone_prefix = self.jira.application_properties(
key="jira.lf.text.headingcolour"
)
self.assertEqual(clone_prefix["value"], "#172b4d")
def test_set_application_property(self):
prop = "jira.lf.favicon.hires.url"
valid_value = "/jira-favicon-hires.png"
invalid_value = "/invalid-jira-favicon-hires.png"
self.jira.set_application_property(prop, invalid_value)
self.assertEqual(
self.jira.application_properties(key=prop)["value"], invalid_value
)
self.jira.set_application_property(prop, valid_value)
self.assertEqual(
self.jira.application_properties(key=prop)["value"], valid_value
)
def test_setting_bad_property_raises(self):
prop = "random.nonexistent.property"
self.assertRaises(JIRAError, self.jira.set_application_property, prop, "666")
class FieldsTests(JiraTestCase):
def test_fields(self):
fields = self.jira.fields()
self.assertGreater(len(fields), 10)
class MyPermissionsTests(JiraTestCase):
def setUp(self):
JiraTestCase.setUp(self)
self.issue_1 = self.test_manager.project_b_issue1
def test_my_permissions(self):
perms = self.jira.my_permissions()
self.assertGreaterEqual(len(perms["permissions"]), 40)
def test_my_permissions_by_project(self):
perms = self.jira.my_permissions(projectKey=self.test_manager.project_a)
self.assertGreaterEqual(len(perms["permissions"]), 10)
perms = self.jira.my_permissions(projectId=self.test_manager.project_a_id)
self.assertGreaterEqual(len(perms["permissions"]), 10)
def test_my_permissions_by_issue(self):
perms = self.jira.my_permissions(issueKey=self.issue_1)
self.assertGreaterEqual(len(perms["permissions"]), 10)
perms = self.jira.my_permissions(
issueId=self.test_manager.project_b_issue1_obj.id
)
self.assertGreaterEqual(len(perms["permissions"]), 10)
class SearchTests(JiraTestCase):
def setUp(self):
JiraTestCase.setUp(self)
self.issue = self.test_manager.project_b_issue1
def test_search_issues(self):
issues = self.jira.search_issues("project=%s" % self.project_b)
issues = cast(ResultList[Issue], issues)
self.assertLessEqual(len(issues), 50) # default maxResults
for issue in issues:
self.assertTrue(issue.key.startswith(self.project_b))
def test_search_issues_async(self):
original_val = self.jira._options["async"]
try:
self.jira._options["async"] = True
issues = self.jira.search_issues(
"project=%s" % self.project_b, maxResults=False
)
issues = cast(ResultList[Issue], issues)
self.assertEqual(len(issues), issues.total)
for issue in issues:
self.assertTrue(issue.key.startswith(self.project_b))
finally:
self.jira._options["async"] = original_val
def test_search_issues_maxresults(self):
issues = self.jira.search_issues("project=%s" % self.project_b, maxResults=10)
self.assertLessEqual(len(issues), 10)
def test_search_issues_startat(self):
issues = self.jira.search_issues(
"project=%s" % self.project_b, startAt=2, maxResults=10
)
self.assertGreaterEqual(len(issues), 1)
# we know that project_b should have at least 3 issues
def test_search_issues_field_limiting(self):
issues = self.jira.search_issues(
"key=%s" % self.issue, fields="summary,comment"
)
issues = cast(ResultList[Issue], issues)
self.assertTrue(hasattr(issues[0].fields, "summary"))
self.assertTrue(hasattr(issues[0].fields, "comment"))
self.assertFalse(hasattr(issues[0].fields, "reporter"))
self.assertFalse(hasattr(issues[0].fields, "progress"))
def test_search_issues_expand(self):
issues = self.jira.search_issues("key=%s" % self.issue, expand="changelog")
issues = cast(ResultList[Issue], issues)
# self.assertTrue(hasattr(issues[0], 'names'))
self.assertEqual(len(issues), 1)
self.assertFalse(hasattr(issues[0], "editmeta"))
self.assertTrue(hasattr(issues[0], "changelog"))
self.assertEqual(issues[0].key, self.issue)
class ServerInfoTests(JiraTestCase):
def test_server_info(self):
server_info = self.jira.server_info()
self.assertIn("baseUrl", server_info)
self.assertIn("version", server_info)
class OtherTests(JiraTestCase):
def setUp(self) -> None:
pass # we don't need Jira instance here
def test_session_invalid_login(self):
try:
JIRA(
"https://jira.atlassian.com",
basic_auth=("xxx", "xxx"),
validate=True,
logging=False,
)
except Exception as e:
self.assertIsInstance(e, JIRAError)
e = cast(JIRAError, e) # help mypy
# 20161010: jira cloud returns 500
assert e.status_code in (401, 500, 403)
str(JIRAError) # to see that this does not raise an exception
return
assert False
class SessionTests(JiraTestCase):
def test_session(self):
user = self.jira.session()
self.assertIsNotNone(user.raw["self"])
self.assertIsNotNone(user.raw["name"])
def test_session_with_no_logged_in_user_raises(self):
anon_jira = JIRA("https://jira.atlassian.com", logging=False)
self.assertRaises(JIRAError, anon_jira.session)
def test_session_server_offline(self):
try:
JIRA("https://127.0.0.1:1", logging=False, max_retries=0)
except Exception as e:
self.assertIn(
type(e),
(JIRAError, requests.exceptions.ConnectionError, AttributeError),
e,
)
return
self.assertTrue(False, "Instantiation of invalid JIRA instance succeeded.")
class AsyncTests(JiraTestCase):
def setUp(self):
self.jira = JIRA(
"https://jira.atlassian.com",
logging=False,
async_=True,
validate=False,
get_server_info=False,
)
def test_fetch_pages(self):
"""Tests that the JIRA._fetch_pages method works as expected."""
params = {"startAt": 0}
total = 26
expected_results = []
for i in range(0, total):
result = _create_issue_result_json(i, "summary %s" % i, key="KEY-%s" % i)
expected_results.append(result)
result_one = _create_issue_search_results_json(
expected_results[:10], max_results=10, total=total
)
result_two = _create_issue_search_results_json(
expected_results[10:20], max_results=10, total=total
)
result_three = _create_issue_search_results_json(
expected_results[20:], max_results=6, total=total
)
mock_session = mock.Mock(name="mock_session")
responses = mock.Mock(name="responses")
responses.content = "_filler_"
responses.json.side_effect = [result_one, result_two, result_three]
responses.status_code = 200
mock_session.request.return_value = responses
mock_session.get.return_value = responses
self.jira._session.close()
self.jira._session = mock_session
items = self.jira._fetch_pages(Issue, "issues", "search", 0, False, params)
self.assertEqual(len(items), total)
self.assertEqual(
{item.key for item in items},
{expected_r["key"] for expected_r in expected_results},
)
def _create_issue_result_json(issue_id, summary, key, **kwargs):
"""Returns a minimal json object for an issue."""
return {
"id": "%s" % issue_id,
"summary": summary,
"key": key,
"self": kwargs.get("self", "http://example.com/%s" % issue_id),
}
def _create_issue_search_results_json(issues, **kwargs):
"""Returns a minimal json object for Jira issue search results."""
return {
"startAt": kwargs.get("start_at", 0),
"maxResults": kwargs.get("max_results", 50),
"total": kwargs.get("total", len(issues)),
"issues": issues,
}
class WebsudoTests(JiraTestCase):
def test_kill_websudo(self):
self.jira.kill_websudo()
# def test_kill_websudo_without_login_raises(self):
# self.assertRaises(ConnectionError, JIRA)
class UserAdministrationTests(JiraTestCase):
def setUp(self):
JiraTestCase.setUp(self)
self.test_username = "test_%s" % self.test_manager.project_a
self.test_email = "%s@example.com" % self.test_username
self.test_password = rndpassword()
self.test_groupname = "testGroupFor_%s" % self.test_manager.project_a
def _skip_pycontribs_instance(self):
pytest.skip(
"The current ci jira admin user for "
"https://pycontribs.atlassian.net lacks "
"permission to modify users."
)
def _should_skip_for_pycontribs_instance(self):
# return True
return self.test_manager.CI_JIRA_ADMIN == "ci-admin" and (
self.test_manager.CI_JIRA_URL == "https://pycontribs.atlassian.net"
)
def test_add_and_remove_user(self):
if self._should_skip_for_pycontribs_instance():
self._skip_pycontribs_instance()
try:
self.jira.delete_user(self.test_username)
except JIRAError as e:
print(e)
# we ignore if it fails to delete from start because we don't know if it already existed
pass
result = self.jira.add_user(
self.test_username, self.test_email, password=self.test_password
)
assert result, True
try:
# Make sure user exists before attempting test to delete.
self.jira.add_user(
self.test_username, self.test_email, password=self.test_password
)
except JIRAError:
pass
result = self.jira.delete_user(self.test_username)
assert result, True
x = -1
# avoiding a zombie due to Atlassian caching
for i in range(10):
x = self.jira.search_users(self.test_username)
if len(x) == 0:
break
sleep(1)
self.assertEqual(
len(x), 0, "Found test user when it should have been deleted. Test Fails."
)
# test creating users with no application access (used for Service Desk)
result = self.jira.add_user(
self.test_username,
self.test_email,
password=self.test_password,
application_keys=["jira-software"],
)
assert result, True
result = self.jira.delete_user(self.test_username)
assert result, True
def test_add_group(self):
if self._should_skip_for_pycontribs_instance():
self._skip_pycontribs_instance()
try:
self.jira.remove_group(self.test_groupname)
except JIRAError:
pass
sleep(2) # avoid 500 errors
result = self.jira.add_group(self.test_groupname)
assert result, True
x = self.jira.groups(query=self.test_groupname)
self.assertEqual(
self.test_groupname,
x[0],
"Did not find expected group after trying to add" " it. Test Fails.",
)
self.jira.remove_group(self.test_groupname)
def test_remove_group(self):
if self._should_skip_for_pycontribs_instance():
self._skip_pycontribs_instance()
try:
self.jira.add_group(self.test_groupname)
sleep(1) # avoid 400
except JIRAError:
pass
result = self.jira.remove_group(self.test_groupname)
assert result, True
x = -1
for i in range(5):
x = self.jira.groups(query=self.test_groupname)
if x == 0:
break
sleep(1)
self.assertEqual(
len(x),
0,
"Found group with name when it should have been deleted. Test Fails.",
)
def test_add_user_to_group(self):
try:
self.jira.add_user(
self.test_username, self.test_email, password=self.test_password
)
self.jira.add_group(self.test_groupname)
# Just in case user is already there.
self.jira.remove_user_from_group(self.test_username, self.test_groupname)
except JIRAError:
pass
result = self.jira.add_user_to_group(self.test_username, self.test_groupname)
assert result, True
x = self.jira.group_members(self.test_groupname)
self.assertIn(
self.test_username,
x.keys(),
"Username not returned in group member list. Test Fails.",
)
self.assertIn("email", x[self.test_username])
self.assertIn("fullname", x[self.test_username])
self.assertIn("active", x[self.test_username])
self.jira.remove_group(self.test_groupname)
self.jira.delete_user(self.test_username)
def test_remove_user_from_group(self):
if self._should_skip_for_pycontribs_instance():
self._skip_pycontribs_instance()
try:
self.jira.add_user(
self.test_username, self.test_email, password=self.test_password
)
except JIRAError:
pass
try:
self.jira.add_group(self.test_groupname)
except JIRAError:
pass
try:
self.jira.add_user_to_group(self.test_username, self.test_groupname)
except JIRAError:
pass
result = self.jira.remove_user_from_group(
self.test_username, self.test_groupname
)
assert result, True
sleep(2)
x = self.jira.group_members(self.test_groupname)
self.assertNotIn(
self.test_username,
x.keys(),
"Username found in group when it should have been removed. " "Test Fails.",
)
self.jira.remove_group(self.test_groupname)
self.jira.delete_user(self.test_username)
class JiraShellTests(JiraTestCase):
def setUp(self) -> None:
pass # Jira Instance not required
def test_jirashell_command_exists(self):
result = os.system("jirashell --help")
self.assertEqual(result, 0)
|
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# External imports
import xyzservices.providers as xyz
# Bokeh imports
from bokeh.models import WMTSTileSource
# Module under test
import bokeh.tile_providers as bt # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'CARTODBPOSITRON',
'CARTODBPOSITRON_RETINA',
'STAMEN_TERRAIN',
'STAMEN_TERRAIN_RETINA',
'STAMEN_TONER',
'STAMEN_TONER_BACKGROUND',
'STAMEN_TONER_LABELS',
'OSM',
'ESRI_IMAGERY',
'get_provider',
'Vendors'
)
_CARTO_URLS = {
'CARTODBPOSITRON': xyz.CartoDB.Positron.build_url(),
'CARTODBPOSITRON_RETINA': xyz.CartoDB.Positron.build_url(scale_factor="@2x"),
}
_STAMEN_URLS = {
'STAMEN_TERRAIN': xyz.Stamen.Terrain.build_url(),
'STAMEN_TERRAIN_RETINA': xyz.Stamen.Terrain.build_url(scale_factor="@2x"),
'STAMEN_TONER': xyz.Stamen.Toner.build_url(),
'STAMEN_TONER_BACKGROUND': xyz.Stamen.TonerBackground.build_url(),
'STAMEN_TONER_LABELS': xyz.Stamen.TonerLabels.build_url(),
}
_STAMEN_ATTR = {
'STAMEN_TERRAIN': xyz.Stamen.Terrain.html_attribution,
'STAMEN_TERRAIN_RETINA': xyz.Stamen.Terrain.html_attribution,
'STAMEN_TONER': xyz.Stamen.Toner.html_attribution,
'STAMEN_TONER_BACKGROUND': xyz.Stamen.TonerBackground.html_attribution,
'STAMEN_TONER_LABELS': xyz.Stamen.TonerLabels.html_attribution,
}
_OSM_URLS = {
'OSM': xyz.OpenStreetMap.Mapnik.build_url()
}
_ESRI_URLS = {
'ESRI_IMAGERY': xyz.Esri.WorldImagery.build_url()
}
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
# XXX This is commented out until version 2.0 and literals are converted to enums
# Test___all__ = verify_all(bt, ALL)
@pytest.mark.parametrize('name', [ 'STAMEN_TERRAIN', 'STAMEN_TERRAIN_RETINA', 'STAMEN_TONER', 'STAMEN_TONER_BACKGROUND', 'STAMEN_TONER_LABELS',])
class Test_StamenProviders:
def test_type(self, name) -> None:
p = getattr(bt, name)
assert isinstance(p, str)
def test_url(self, name) -> None:
p = bt.get_provider(getattr(bt, name))
assert p.url == _STAMEN_URLS[name]
def test_attribution(self, name) -> None:
p = bt.get_provider(getattr(bt, name))
assert p.attribution == _STAMEN_ATTR[name]
def test_copies(self, name) -> None:
p1 = bt.get_provider(getattr(bt, name))
p2 = bt.get_provider(getattr(bt, name))
assert p1 is not p2
@pytest.mark.parametrize('name', ['CARTODBPOSITRON', 'CARTODBPOSITRON_RETINA'])
class Test_CartoProviders:
def test_type(self, name) -> None:
p = getattr(bt, name)
assert isinstance(p, str)
def test_url(self, name) -> None:
p = bt.get_provider(getattr(bt, name))
assert p.url == _CARTO_URLS[name]
def test_attribution(self, name) -> None:
p = bt.get_provider(getattr(bt, name))
assert p.attribution == xyz.CartoDB.Positron.html_attribution
def test_copies(self, name) -> None:
p1 = bt.get_provider(getattr(bt, name))
p2 = bt.get_provider(getattr(bt, name))
assert p1 is not p2
@pytest.mark.parametrize('name', ['OSM'])
class Test_OsmProvider:
def test_type(self, name) -> None:
p = getattr(bt, name)
assert isinstance(p, str)
def test_url(self, name) -> None:
p = bt.get_provider(getattr(bt, name))
assert p.url == _OSM_URLS[name]
def test_attribution(self, name) -> None:
p = bt.get_provider(getattr(bt, name))
assert p.attribution == xyz.OpenStreetMap.Mapnik.html_attribution
def test_copies(self, name) -> None:
p1 = bt.get_provider(getattr(bt, name))
p2 = bt.get_provider(getattr(bt, name))
assert p1 is not p2
@pytest.mark.parametrize('name', ['ESRI_IMAGERY'])
class Test_EsriProvider:
def test_type(self, name) -> None:
p = getattr(bt, name)
assert isinstance(p, str)
def test_url(self, name) -> None:
p = bt.get_provider(getattr(bt, name))
assert p.url == _ESRI_URLS[name]
def test_attribution(self, name) -> None:
p = bt.get_provider(getattr(bt, name))
assert p.attribution == xyz.Esri.WorldImagery.html_attribution
def test_copies(self, name) -> None:
p1 = bt.get_provider(getattr(bt, name))
p2 = bt.get_provider(getattr(bt, name))
assert p1 is not p2
class Test_GetProvider:
@pytest.mark.parametrize('name', ['CARTODBPOSITRON', 'CARTODBPOSITRON_RETINA', 'STAMEN_TERRAIN',
'STAMEN_TERRAIN_RETINA', 'STAMEN_TONER', 'STAMEN_TONER_BACKGROUND',
'STAMEN_TONER_LABELS', 'OSM', 'ESRI_IMAGERY', ])
def test_get_provider(self, name) -> None:
assert name in bt.Vendors
enum_member = getattr(bt.Vendors, name)
assert hasattr(bt, name)
mod_member = getattr(bt, name)
p1 = bt.get_provider(enum_member)
p2 = bt.get_provider(name)
p3 = bt.get_provider(name.lower())
p4 = bt.get_provider(mod_member)
assert isinstance(p1, WMTSTileSource)
assert isinstance(p2, WMTSTileSource)
assert isinstance(p3, WMTSTileSource)
assert isinstance(p4, WMTSTileSource)
assert p1 is not p2
assert p2 is not p3
assert p2 is not p4
assert p4 is not p1
assert p1.url == p2.url == p3.url == p4.url
assert p1.attribution == p2.attribution == p3.attribution == p4.attribution
def test_unknown_vendor(self) -> None:
with pytest.raises(ValueError):
bt.get_provider("This is not a valid tile vendor")
def test_xyzservices(self) -> None:
xyzservices = pytest.importorskip("xyzservices")
provider_data = xyzservices.providers.CartoDB.Positron
provider = bt.get_provider(provider_data)
assert isinstance(provider, WMTSTileSource)
assert provider.url == provider_data.build_url()
assert provider.attribution == provider_data.html_attribution
assert provider.min_zoom == provider_data.get("min_zoom", 0)
assert provider.max_zoom == provider_data.get("max_zoom", 30)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import telnetlib
import time
from savanna.tests.integration import base
import savanna.tests.integration.configs.parameters as param
class ClusterScalingTest(base.ITestCase):
def setUp(self):
super(ClusterScalingTest, self).setUp()
telnetlib.Telnet(self.host, self.port)
def create_cluster_for_scaling(self, node_processes):
cluster_body = self.make_cl_body_node_processes(node_processes)
cluster_body['name'] = param.CLUSTER_NAME_SCALING
cluster_id = self.create_cluster_and_get_id(cluster_body)
return cluster_id
def implement_scaling(self, cluster_id, scaling_body):
self.put_object(self.url_cluster_with_slash, cluster_id,
scaling_body, 202)
self.await_cluster_active(cluster_id)
def implement_scaling_new_node_group_addition(self, cluster_id,
scaling_map1,
scaling_map2='',
multi_scaling=False):
if multi_scaling:
self.implement_scaling(cluster_id, {
'add_node_groups': [
{
'node_group_template_id': scaling_map1['ngt_id'],
'count': scaling_map1['node_count'],
'name': scaling_map1['ng_name']
}, {
'node_group_template_id': scaling_map1['ngt_id'],
'count': scaling_map2['node_count'],
'name': scaling_map2['ng_name']
}
]
})
else:
self.implement_scaling(cluster_id, {
'add_node_groups': [
{
'node_group_template_id': scaling_map1['ngt_id'],
'count': scaling_map1['node_count'],
'name': scaling_map1['ng_name']
}
]
})
def implement_scaling_addition_to_existing_node_group(self, cluster_id,
scaling_map1,
scaling_map2='',
multi_scaling=False):
if multi_scaling:
self.implement_scaling(cluster_id, {
'resize_node_groups': [
{
'name': scaling_map1['ng_name'],
'count': scaling_map1['node_count']
}, {
'name': scaling_map2['ng_name'],
'count': scaling_map2['node_count']
}
]
})
else:
self.implement_scaling(cluster_id, {
'resize_node_groups': [
{
'name': scaling_map1['ng_name'],
'count': scaling_map1['node_count']
}
]
})
def check_cluster_worker_nodes(self, cluster_id):
ip_instances = self.get_instances_ip_and_node_processes_list(
cluster_id)
time.sleep(10)
try:
worker_map = self.get_namenode_ip_and_tt_dn_count(ip_instances)
self.await_active_workers_for_namenode(worker_map)
return worker_map
except Exception as e:
self.fail(str(e))
def compare_worker_node_count_after_scaling(self,
worker_map,
worker_type,
worker_node_count):
self.assertEquals(
worker_map[worker_type], worker_node_count,
'%s != %s after cluster scaling!' % (worker_type, worker_type))
def check_cluster_worker_nodes_after_scaling(self,
cluster_id,
worker_type,
scaling_worker_node_count):
worker_map = self.check_cluster_worker_nodes(cluster_id)
self.compare_worker_node_count_after_scaling(
worker_map, worker_type, scaling_worker_node_count)
def test_scaling_addition_to_existing_ng(self):
ng_name_for_tt = 'tt'
tt_count = 1
ng_name_for_dn = 'dn'
dn_count = 1
dn_replication_factor = 3
cluster_id = self.create_cluster_for_scaling(
{'JT': 1, 'NN': 1, 'TT': tt_count, 'DN': dn_count})
try:
self.implement_scaling_addition_to_existing_node_group(
cluster_id, {
'ng_name': ng_name_for_tt,
'node_count': tt_count + 1
})
self.check_cluster_worker_nodes_after_scaling(
cluster_id, 'tasktracker_count', tt_count + 1)
self.implement_scaling_addition_to_existing_node_group(
cluster_id, {
'ng_name': ng_name_for_dn,
'node_count': dn_count + dn_replication_factor
})
self.check_cluster_worker_nodes_after_scaling(
cluster_id, 'datanode_count', dn_count + dn_replication_factor)
multi_scaling = True
self.implement_scaling_addition_to_existing_node_group(
cluster_id, {
'ng_name': ng_name_for_tt,
'node_count': 0
},
{
'ng_name': ng_name_for_dn,
'node_count': dn_replication_factor
},
multi_scaling)
self.check_cluster_worker_nodes_after_scaling(
cluster_id, 'tasktracker_count', 0)
self.check_cluster_worker_nodes_after_scaling(
cluster_id, 'datanode_count', dn_replication_factor)
except Exception as e:
self.fail(str(e))
finally:
self.del_object(self.url_cluster_with_slash, cluster_id, 204)
def test_scaling_new_node_group_addition(self):
ng_name_for_tt = 'ng-tt'
added_tt_count = 2
ng_name_for_dn = 'ng-dn'
added_dn_count = 2
dn_replication_factor = 3
cluster_id = self.create_cluster_for_scaling(
{'JT+NN': 1, 'TT+DN': 1})
self.create_node_group_templates()
try:
self.implement_scaling_new_node_group_addition(
cluster_id, {
'ngt_id': self.id_tt,
'node_count': added_tt_count,
'ng_name': ng_name_for_tt,
})
self.check_cluster_worker_nodes_after_scaling(
cluster_id, 'tasktracker_count', added_tt_count + 1)
self.implement_scaling_new_node_group_addition(
cluster_id, {
'ngt_id': self.id_dn,
'node_count': added_dn_count + dn_replication_factor,
'ng_name': ng_name_for_dn,
})
self.check_cluster_worker_nodes_after_scaling(
cluster_id, 'datanode_count',
1 + added_dn_count + dn_replication_factor)
multi_scaling = True
self.implement_scaling_addition_to_existing_node_group(
cluster_id, {
'ngt_id': self.id_tt,
'node_count': 0,
'ng_name': ng_name_for_tt,
}, {
'ngt_id': self.id_dn,
'node_count': dn_replication_factor,
'ng_name': ng_name_for_dn,
}, multi_scaling)
self.check_cluster_worker_nodes_after_scaling(
cluster_id, 'tasktracker_count', 1)
self.check_cluster_worker_nodes_after_scaling(
cluster_id, 'datanode_count', 1 + dn_replication_factor)
except Exception as e:
self.fail(str(e))
finally:
self.del_object(self.url_cluster_with_slash, cluster_id, 204)
self.delete_node_group_templates()
|
|
# Copyright 2014 Alcatel-Lucent USA Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from gbpservice.neutron.services.grouppolicy.drivers.oneconvergence import (
nvsd_gbp_api as api)
from gbpservice.neutron.tests.unit.services.grouppolicy import (
test_resource_mapping as test_resource_mapping)
class MockNVSDApiClient(object):
def create_endpoint(self, context, endpoint):
pass
def update_endpoint(self, context, updated_endpoint):
pass
def delete_endpoint(self, context, endpoint_id):
pass
def create_endpointgroup(self, context, endpointgroup):
pass
def update_endpointgroup(self, context, endpointgroup):
pass
def delete_endpointgroup(self, context, endpointgroup_id):
pass
def create_policy_classifier(self, context, policy_classifier):
pass
def update_policy_classifier(self, context, policy_classifier):
pass
def delete_policy_classifier(self, context, policy_classifier_id):
pass
class OneConvergenceGBPDriverTestCase(
test_resource_mapping.ResourceMappingTestCase):
def setUp(self):
policy_drivers = ['implicit_policy', 'oneconvergence_gbp_driver']
with mock.patch.object(
api, 'NVSDServiceApi',
new=MockNVSDApiClient) as self.mockNVSDApi:
super(OneConvergenceGBPDriverTestCase, self).setUp(
policy_drivers=policy_drivers)
class TestPolicyTarget(OneConvergenceGBPDriverTestCase,
test_resource_mapping.TestPolicyTarget):
# Functionality tests and api results are covered by the base class tests
def test_oneconvergence_controller_api_invoked(self):
with mock.patch.object(MockNVSDApiClient,
'create_endpoint') as create_ep:
with mock.patch.object(MockNVSDApiClient,
'update_endpoint') as update_ep:
with mock.patch.object(MockNVSDApiClient,
'delete_endpoint') as delete_ep:
ptg = self.create_policy_target_group(name="ptg1")
ptg_id = ptg['policy_target_group']['id']
# Create policy_target with implicit port.
pt = self.create_policy_target(
name="pt1",
policy_target_group_id=ptg_id)['policy_target']
create_ep.assert_called_once_with(mock.ANY, pt)
pt = self.update_policy_target(
pt['id'], name="new_pt")['policy_target']
update_ep.assert_called_once_with(mock.ANY, pt)
self.delete_policy_target(pt['id'])
delete_ep.assert_called_once_with(mock.ANY, pt['id'])
class TestPolicyTargetGroup(OneConvergenceGBPDriverTestCase,
test_resource_mapping.TestPolicyTargetGroup):
def test_subnet_allocation(self):
ptg1 = self.create_policy_target_group(name="ptg1")
subnet1 = ptg1['policy_target_group']['subnets']
ptg2 = self.create_policy_target_group(name="ptg2")
subnet2 = ptg2['policy_target_group']['subnets']
self.assertEqual(subnet1, subnet2)
def test_no_extra_subnets_created(self):
count = len(self._get_all_subnets())
self.create_policy_target_group()
self.create_policy_target_group()
new_count = len(self._get_all_subnets())
# One Convergence driver shares the same implicit subnet
self.assertEqual(count + 1, new_count)
def test_ip_pool_exhaustion(self):
# One Convergence driver shares the same implicit subnet
pass
def test_oneconvergence_controller_api_invoked(self):
with mock.patch.object(MockNVSDApiClient,
'create_endpointgroup') as create_epg:
with mock.patch.object(MockNVSDApiClient,
'update_endpointgroup') as update_epg:
with mock.patch.object(MockNVSDApiClient,
'delete_endpointgroup') as delete_epg:
ptg = self.create_policy_target_group(
name="ptg1")['policy_target_group']
create_epg.assert_called_once_with(mock.ANY, ptg)
ptg = self.update_policy_target_group(
ptg['id'],
name="new_ptg")['policy_target_group']
update_epg.assert_called_once_with(mock.ANY, ptg)
self.delete_policy_target_group(ptg['id'])
delete_epg.assert_called_once_with(mock.ANY, ptg['id'])
class TestPolicyClassifier(OneConvergenceGBPDriverTestCase):
def test_oneconvergence_controller_api_invoked(self):
with mock.patch.object(
MockNVSDApiClient,
'create_policy_classifier') as create_classifier:
with mock.patch.object(
MockNVSDApiClient,
'update_policy_classifier') as update_classifier:
with mock.patch.object(
MockNVSDApiClient,
'delete_policy_classifier') as delete_classifier:
classifier = self.create_policy_classifier(
name="classifier1")
classifier = classifier['policy_classifier']
classifier.update({"policy_rules": []})
create_classifier.assert_called_once_with(mock.ANY,
classifier)
classifier = self.update_policy_classifier(
classifier['id'],
name="new_classifier")['policy_classifier']
classifier.update({"policy_rules": []})
update_classifier.assert_called_once_with(mock.ANY,
classifier)
self.delete_policy_classifier(classifier['id'])
delete_classifier.assert_called_once_with(
mock.ANY, classifier['id'])
class TestL2Policy(OneConvergenceGBPDriverTestCase,
test_resource_mapping.TestL2Policy):
pass
class TestL3Policy(OneConvergenceGBPDriverTestCase,
test_resource_mapping.TestL3Policy):
pass
class TestPolicyRuleSet(OneConvergenceGBPDriverTestCase,
test_resource_mapping.TestPolicyRuleSet):
pass
class TestServiceChain(OneConvergenceGBPDriverTestCase,
test_resource_mapping.TestServiceChain):
pass
class TestServiceChainAdminOwner(
OneConvergenceGBPDriverTestCase,
test_resource_mapping.TestServiceChainAdminOwner):
pass
class TestPolicyAction(OneConvergenceGBPDriverTestCase,
test_resource_mapping.TestPolicyAction):
pass
class TestPolicyRule(OneConvergenceGBPDriverTestCase,
test_resource_mapping.TestPolicyRule):
pass
class TestExternalSegment(OneConvergenceGBPDriverTestCase,
test_resource_mapping.TestExternalSegment):
pass
class TestExternalPolicy(OneConvergenceGBPDriverTestCase,
test_resource_mapping.TestExternalPolicy):
pass
|
|
#!/usr/bin/env python2
#
# Copyright (C) Microsoft Corporation, All rights reserved.
"""Urllib3 HttpClient."""
import sys
import http.client
import socket
import time
import traceback
import urllib.request, urllib.error, urllib.parse
from httpclient import *
from workerexception import *
PY_MAJOR_VERSION = 0
PY_MINOR_VERSION = 1
PY_MICRO_VERSION = 2
SSL_MODULE_NAME = "ssl"
# On some system the ssl module might be missing
try:
import ssl
except ImportError:
ssl = None
class HttpsClientHandler(urllib.request.HTTPSHandler):
"""Https handler to enable attaching cert/key to request. Also used to disable strict cert verification for
testing.
"""
def __init__(self, cert_path, key_path, insecure=False):
self.cert_path = cert_path
self.key_path = key_path
ssl_context = None
if insecure and SSL_MODULE_NAME in sys.modules and (sys.version_info[PY_MAJOR_VERSION] == 3):
ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
urllib.request.HTTPSHandler.__init__(self, context=ssl_context) # Context can be None here
def https_open(self, req):
return self.do_open(self.get_https_connection, req, context=self._context)
def get_https_connection(self, host, context=None, timeout=180):
"""urllib3's AbstractHttpHandler will invoke this method with the host/timeout parameter. See urllib3's
AbstractHttpHandler for more details.
Args:
host : string , the host.
context : ssl_context , the ssl context.
timeout : int , the timeout value in seconds.
Returns:
An HttpsConnection
"""
socket.setdefaulttimeout(180)
if self.cert_path is None or self.key_path is None:
return http.client.HTTPSConnection(host, timeout=timeout, context=context)
else:
return http.client.HTTPSConnection(host, cert_file=self.cert_path, key_file=self.key_path, timeout=timeout,
context=context)
def request_retry_handler(func):
def decorated_func(*args, **kwargs):
max_retry_count = 3
for iteration in range(0, max_retry_count, 1):
try:
ret = func(*args, **kwargs)
return ret
except Exception as exception:
if iteration >= max_retry_count - 1:
raise RetryAttemptExceededException(traceback.format_exc())
elif SSL_MODULE_NAME in sys.modules:
if type(exception).__name__ == 'SSLError':
time.sleep(5 + iteration)
continue
elif isinstance(exception, urllib.error.URLError):
if "name resolution" in exception.reason:
time.sleep(5 + iteration)
continue
raise exception
return decorated_func
class Urllib3HttpClient(HttpClient):
"""Urllib3 http client. Inherits from HttpClient.
Targets:
[2.7.9 - 2.7.9+] only due to the lack of strict certificate verification prior to this version.
Implements the following method common to all classes inheriting HttpClient.
get (url, headers)
post (url, headers, data)
"""
def __init__(self, cert_path, key_path, insecure=False, proxy_configuration=None):
HttpClient.__init__(self, cert_path, key_path, insecure, proxy_configuration)
@request_retry_handler
def issue_request(self, url, headers, method=None, data=None):
"""Issues a GET request to the provided url and using the provided headers.
Args:
url : string , the url.
headers : dictionary, contains the headers key value pair.
data : string , contains the serialized request body.
Returns:
A RequestResponse
:param method:
"""
https_handler = HttpsClientHandler(self.cert_path, self.key_path, self.insecure)
opener = urllib.request.build_opener(https_handler)
if self.proxy_configuration is not None:
proxy_handler = urllib.request.ProxyHandler({'http': self.proxy_configuration,
'https': self.proxy_configuration})
opener.add_handler(proxy_handler)
if data is not None:
data = data.encode("utf-8")
req = urllib.request.Request(url, data=data, headers=headers)
req.get_method = lambda: method
response = opener.open(req, timeout=30)
opener.close()
https_handler.close()
return response
def get(self, url, headers=None):
"""Issues a GET request to the provided url and using the provided headers.
Args:
url : string , the url.
headers : dictionary, contains the headers key value pair.
Returns:
An http_response
"""
headers = self.merge_headers(self.default_headers, headers)
try:
response = self.issue_request(url, headers=headers, method=self.GET)
except urllib.error.HTTPError as e:
if e is not None and e.code is not None:
return RequestResponse(e.code)
else:
exception_type, error = sys.exc_info()[:2]
return RequestResponse(error.code)
except RetryAttemptExceededException :
# return an http timeout status code when all retries fail due to timeout
return RequestResponse(408)
return RequestResponse(response.getcode(), response.read())
def post(self, url, headers=None, data=None):
"""Issues a POST request to the provided url and using the provided headers.
Args:
url : string , the url.
headers : dictionary, contains the headers key value pair.
data : dictionary, contains the non-serialized request body.
Returns:
A RequestResponse
"""
headers = self.merge_headers(self.default_headers, headers)
if data is None:
serial_data = ""
else:
serial_data = self.json.dumps(data)
headers.update({self.CONTENT_TYPE_HEADER_KEY: self.APP_JSON_HEADER_VALUE})
try:
response = self.issue_request(url, headers=headers, method=self.POST, data=serial_data)
except urllib.error.HTTPError as e:
if e is not None and e.code is not None:
return RequestResponse(e.code)
else:
exception_type, error = sys.exc_info()[:2]
return RequestResponse(error.code)
except RetryAttemptExceededException:
# return an http timeout status code when all retries fail due to timeout
return RequestResponse(408)
return RequestResponse(response.getcode(), response.read())
def put(self, url, headers=None, data=None):
"""Issues a PUT request to the provided url and using the provided headers.
Args:
url : string , the url.
headers : dictionary, contains the headers key value pair.
data : dictionary, contains the non-serialized request body.
Returns:
A RequestResponse
"""
headers = self.merge_headers(self.default_headers, headers)
if data is None:
serial_data = ""
else:
serial_data = self.json.dumps(data)
headers.update({self.CONTENT_TYPE_HEADER_KEY: self.APP_JSON_HEADER_VALUE})
try:
response = self.issue_request(url, headers=headers, method=self.PUT, data=serial_data)
except urllib.error.HTTPError as e:
if e is not None and e.code is not None:
return RequestResponse(e.code)
else:
exception_type, error = sys.exc_info()[:2]
return RequestResponse(error.code)
except RetryAttemptExceededException:
# return an http timeout status code when all retries fail due to timeout
return RequestResponse(408)
return RequestResponse(response.getcode(), response.read())
def delete(self, url, headers=None, data=None):
"""Issues a DELETE request to the provided url and using the provided headers.
Args:
url : string , the url.
headers : dictionary, contains the headers key value pair.
data : dictionary, contains the non-serialized request body.
Returns:
A RequestResponse
"""
headers = self.merge_headers(self.default_headers, headers)
if data is None:
serial_data = ""
else:
serial_data = self.json.dumps(data)
headers.update({self.CONTENT_TYPE_HEADER_KEY: self.APP_JSON_HEADER_VALUE})
try:
response = self.issue_request(url, headers=headers, method=self.DELETE, data=serial_data)
except urllib.error.HTTPError as e:
if e is not None and e.code is not None:
return RequestResponse(e.code)
else:
exception_type, error = sys.exc_info()[:2]
return RequestResponse(error.code)
except RetryAttemptExceededException:
# return an http timeout status code when all retries fail due to timeout
return RequestResponse(408)
return RequestResponse(response.getcode(), response.read())
|
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module that computes statistics for features of image format.
Specifically, the following statistics are computed:
- Maximum image heigh and width
- Histogram of value count by image format
- If the rate of recognized formats is high enough and enough values
have been considered, features get marked with domain_info: image_domain
used for schema inference.
The current implementation is using imghdr for identifying image formats
(efficient, based on metadata) and tf.image.decode_image for image height,
width (possibly expensive, performs decoding).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
from typing import Iterable, List, Optional, Text
import numpy as np
import pandas as pd
import pyarrow as pa
import six
import tensorflow as tf
from tensorflow_data_validation import types
from tensorflow_data_validation.arrow import arrow_util
from tensorflow_data_validation.statistics.generators import stats_generator
from tensorflow_data_validation.utils import stats_util
from tensorflow_metadata.proto.v0 import statistics_pb2
_DOMAIN_INFO = 'domain_info'
_IMAGE_DOMAIN = 'image_domain {}'
_IMAGE_MAX_WIDTH_STATISTICS = 'image_max_width'
_IMAGE_MAX_HEIGHT_STATISTICS = 'image_max_height'
_IMAGE_FORMAT_HISTOGRAM = 'image_format_histogram'
# ImageStatsGenerator default initialization values.
_IS_IMAGE_RATIO = 0.8
_VALUES_THRESHOLD = 100
# Magic bytes (hex) signature for each image format.
# Source: https://en.wikipedia.org/wiki/List_of_file_signatures.
_IMAGE_FORMAT_SIGNATURES = {
'bmp': b'\x42\x4d',
'gif': b'\x47\x49\x46\x38',
# The 4th byte of JPEG is '\xe0' or '\xe1', so check just the first three.
'jpeg': b'\xff\xd8\xff',
'png': b'\x89\x50\x4E\x47\x0D\x0A\x1A\x0A'
}
class ImageDecoderInterface(six.with_metaclass(abc.ABCMeta)):
"""Interface for extracting image formats and sizes."""
@abc.abstractmethod
def get_formats(self, values: np.ndarray) -> np.ndarray:
"""Returns the image format name for each value if it represents an image.
Args:
values: a list of values in bytes to check the image format.
Returns:
A list of string image formats (e.g: 'jpeg', 'bmp', ...) or None
if the value is not a supported image, in the same order as the input
value_list.
"""
raise NotImplementedError
@abc.abstractmethod
def get_sizes(self, values: np.ndarray) -> np.ndarray:
"""Returns the image size for each value if it represents an image.
Args:
values: a list of values in bytes to check the image size.
Returns:
A list of (image_height, image_width) tuple (if the value represents an
image) in the same order as the input value list.
"""
raise NotImplementedError
class TfImageDecoder(ImageDecoderInterface):
"""ImageDecoderInterface implementation based on tensorflow library.
This image decoder only supports image formats supported by:
tf.image.decode_image, ['bmp', 'gif', 'jpeg', 'png'].
Image sizes are computed using tf.image.decode_image, which requires tf.
Initializating and pickling tf objects can be non-trivial, so:
- Initialization is done lazily when get_sizes computation is needed.
- __reduce__() is overridden so that tf state is ignored. It is lazily
initialized as needed, after deserialization.
"""
def __init__(self): # pylint: disable=super-init-not-called
self._lazy_get_sizes_callable = None
def __reduce__(self):
return TfImageDecoder, tuple()
def _initialize_lazy_get_sizes_callable(self):
# Initialize the tensorflow graph for decoding images.
graph = tf.Graph()
self._session = tf.compat.v1.Session(graph=graph)
def get_image_shape(value):
image_shape = tf.shape(input=tf.image.decode_image(value))
# decode_image returns a 3-D array ([height, width, num_channels]) for
# BMP/JPEG/PNG images, but 4-D array ([num_frames, height, width, 3])
# for GIF images.
return tf.cond(
pred=tf.equal(tf.size(input=image_shape), 4),
true_fn=lambda: image_shape[1:3],
false_fn=lambda: image_shape[0:2],
)
with self._session.graph.as_default(), self._session.as_default():
self._batch_image_input = tf.compat.v1.placeholder(
dtype=tf.string, shape=[None])
self._image_shapes = tf.map_fn(
get_image_shape,
elems=self._batch_image_input,
dtype=tf.int32,
infer_shape=False)
graph.finalize()
self._lazy_get_sizes_callable = self._session.make_callable(
fetches=self._image_shapes, feed_list=[self._batch_image_input])
def get_formats(self, values: List[np.object]) -> np.ndarray:
"""Returns the image format name for each value if it represents an image.
Args:
values: a list of value in bytes to check the image format.
Returns:
A list of image format name (e.g. 'JPG'/'GIF'/etc, or None if the
value is not an image) in the same order as the input value list.
"""
def get_image_format(image_bytes):
for image_format, signature in _IMAGE_FORMAT_SIGNATURES.items():
if bytes(image_bytes[:len(signature)]) == signature:
return image_format
return None
return np.vectorize(get_image_format, otypes=[np.object])(values)
def get_sizes(self, values: np.ndarray) -> np.ndarray:
"""Returns the image size for each value if it represents an image.
Args:
values: a list of value in bytes to check the image size.
Returns:
A numpy array containing (image_height, image_width) tuples (if the value
represents an image) in the same order as the input value list.
Raises:
ValueError: If any of the input value does not represents an image.
"""
if not self._lazy_get_sizes_callable:
self._initialize_lazy_get_sizes_callable()
return self._lazy_get_sizes_callable(values)
class _PartialImageStats(object):
"""Partial feature stats for images.
Attributes:
total_num_values: The total number of values processed for this feature.
max_width: The maximum image width among all the values for this feature.
max_height: The maximum image height among all the values for this feature.
counter_by_format: A dict from image format string to the number of images
in this format. The format / key '' is used for non supported.
invalidate: True only if this feature should never be considered, e.g: some
value_lists have inconsistent formats.
"""
def __init__(self):
self.total_num_values = 0
self.max_width = 0
self.max_height = 0
self.counter_by_format = collections.Counter()
self.invalidate = False
def __iadd__(self, other: '_PartialImageStats') -> '_PartialImageStats':
"""Merge two partial image stats."""
self.total_num_values += other.total_num_values
self.max_width = max(self.max_width, other.max_width)
self.max_height = max(self.max_height, other.max_height)
self.counter_by_format += other.counter_by_format
self.invalidate |= other.invalidate
return self
class ImageStatsGenerator(stats_generator.CombinerFeatureStatsGenerator):
"""Computes the statistics for features of image format."""
def __init__(self,
image_decoder: Optional[ImageDecoderInterface] = None,
name: Text = 'ImageStatsGenerator',
is_image_ratio_threshold: float = _IS_IMAGE_RATIO,
values_threshold: int = _VALUES_THRESHOLD,
enable_size_stats: bool = False):
"""Initializes an image statistics generator.
Args:
image_decoder: ImageDecoderInterface instance for fetching image metadata.
name: The unique name associated with this statistics generator.
is_image_ratio_threshold: In order for a feature to be considered "image"
type and respective stats to be generated, at least this ratio of values
should be supported images.
values_threshold: In order for a feature to be considered "image" type
and respective stats to be generated, at least so many values should be
considered.
enable_size_stats: If True statistics about image sizes are generated.
This currently requires decoding through TF that could have performance
implications.
"""
super(ImageStatsGenerator, self).__init__(name)
if image_decoder is None:
image_decoder = TfImageDecoder()
self._image_decoder = image_decoder
self._is_image_ratio_threshold = is_image_ratio_threshold
self._values_threshold = values_threshold
self._enable_size_stats = enable_size_stats
def create_accumulator(self) -> _PartialImageStats:
"""Return a fresh, empty accumulator.
Returns:
An empty accumulator.
"""
return _PartialImageStats()
def add_input(self, accumulator: _PartialImageStats,
feature_path: types.FeaturePath,
feature_array: pa.Array) -> _PartialImageStats:
"""Return result of folding a batch of inputs into accumulator.
Args:
accumulator: The current accumulator.
feature_path: The path of the feature.
feature_array: An arrow array representing a batch of feature values
which should be added to the accumulator.
Returns:
The accumulator after updating the statistics for the batch of inputs.
"""
if accumulator.invalidate:
return accumulator
feature_type = stats_util.get_feature_type_from_arrow_type(
feature_path, feature_array.type)
# Ignore null array.
if feature_type is None:
return accumulator
# If we see a different type, invalidate.
if feature_type != statistics_pb2.FeatureNameStatistics.STRING:
accumulator.invalidate = True
return accumulator
# Consider using memoryview to avoid copying after upgrading to
# arrow 0.12. Note that this would involve modifying the subsequent logic
# to iterate over the values in a loop.
values = np.asarray(arrow_util.flatten_nested(feature_array)[0])
accumulator.total_num_values += values.size
image_formats = self._image_decoder.get_formats(values)
valid_mask = ~pd.isnull(image_formats)
valid_formats = image_formats[valid_mask]
format_counts = np.unique(valid_formats, return_counts=True)
for (image_format, count) in zip(*format_counts):
accumulator.counter_by_format[image_format] += count
unknown_count = image_formats.size - valid_formats.size
if unknown_count > 0:
accumulator.counter_by_format[''] += unknown_count
if self._enable_size_stats:
# Get image height and width.
image_sizes = self._image_decoder.get_sizes(values[valid_mask])
if image_sizes.any():
max_sizes = np.max(image_sizes, axis=0)
# Update the max image height/width with all image values.
accumulator.max_height = max(accumulator.max_height, max_sizes[0])
accumulator.max_width = max(accumulator.max_width, max_sizes[1])
return accumulator
def merge_accumulators(
self, accumulators: Iterable[_PartialImageStats]) -> _PartialImageStats:
"""Merges several accumulators to a single accumulator value.
Args:
accumulators: The accumulators to merge.
Returns:
The merged accumulator.
"""
it = iter(accumulators)
result = next(it)
for accumulator in it:
result += accumulator
return result
def extract_output(self, accumulator: _PartialImageStats
) -> statistics_pb2.FeatureNameStatistics:
"""Return result of converting accumulator into the output value.
Args:
accumulator: The final accumulator value.
Returns:
A proto representing the result of this stats generator.
"""
result = statistics_pb2.FeatureNameStatistics()
# Only generate an image statistics proto if the ratio of image feature
# values is at or above a threshold.
if (accumulator.invalidate or
accumulator.total_num_values < self._values_threshold or
(1 - (float(accumulator.counter_by_format['']) /
accumulator.total_num_values)) < self._is_image_ratio_threshold):
return result
result.custom_stats.add(name=_DOMAIN_INFO, str=_IMAGE_DOMAIN)
# Image format histogram.
custom_stats = result.custom_stats.add(name=_IMAGE_FORMAT_HISTOGRAM)
# Add the buckets with sorted image format.
for image_format in sorted(accumulator.counter_by_format):
custom_stats.rank_histogram.buckets.add(
# LINT.IfChange
label=image_format if image_format else 'UNKNOWN',
# image_domain_util.cc relies on unsupported image formats being
# being assigned to the bucket labeled 'UNKNOWN'. If this labeling
# changes, change the corresponding code accordingly.
# LINT.ThenChange(../../anomalies/image_domain_util.cc)
sample_count=accumulator.counter_by_format[image_format])
if self._enable_size_stats:
result.custom_stats.add(
name=_IMAGE_MAX_WIDTH_STATISTICS, num=accumulator.max_width)
result.custom_stats.add(
name=_IMAGE_MAX_HEIGHT_STATISTICS, num=accumulator.max_height)
return result
|
|
#!/usr/bin/env python
"""
TwoDelegateDatasetAgent: improved generalized implementation of external dataset agent:
- avoids artificial agent/driver communication and state model
- pluggable behavior to find new data files (Poller) and parse contents (Parser)
- hands-off interrupt/resume state mechanism: memento generated by poller only has to make sense to that poller
- RPC and database calls removed
classes defined:
- BistableDatasetAgent
- abstract base class
- implements two-state model (streaming or idle)
- acts as agent, driver client, and driver; simplifies control flow
- TwoDelegateDatasetAgent
- usable implementation takes two delegates for pluggable behavior
- poller delegate knows how to find new data
- parser delegate knows how to handle data
- Poller
The poller may read files, call web services or otherwise figuring out if new datasets are available.
When new data is found, it invokes a callback with an open file-like object and a memento.
Data is parsed from the file-like object and published, and
if successful the memento is saved.
Should the agent (or container or system) crash and be restarted,
the memento will be passed to the new poller so it knows its position
and can resume looking for only new datasets.
- Parser
The parser
interrupt/resume state:
- on each callback, the poller provides a memento it can use to keep its position after resume
- after successful parsing, the memento is persisted as part of the agent state
- upon restart, the agent reads the memento and passes to the new poller
"""
__author__ = 'Christopher Mueller, Jonathan Newbrough'
import os
from mi.core.log import get_logger ; log = get_logger()
from ooi.reflection import EggCache
from pyon.agent.agent import ResourceAgentEvent
from pyon.agent.agent import ResourceAgentState
from pyon.core.exception import InstStateError
from pyon.public import OT
from pyon.core.bootstrap import IonObject
from pyon.ion.stream import StandaloneStreamPublisher
from ion.agents.instrument.exceptions import InstrumentStateException
from ion.agents.instrument.instrument_agent import InstrumentAgent
from ion.core.includes.mi import DriverEvent
from ion.services.dm.utility.granule.record_dictionary import RecordDictionaryTool
from coverage_model import ParameterDictionary
# TODO: make unique for multiple processes on same VM
EGG_CACHE=EggCache('/tmp/eggs%d' % os.getpid())
class TwoDelegateDatasetAgent(InstrumentAgent):
"""
this dataset agent has two states: autosampling and idle
based on InstrumentAgent but override methods to simplify flow control: agent == driver client == driver
abstract base class, subclass is expected to override: set_configuration, start_sampling, stop_sampling
implement functionality with two pluggable delegates:
- poller -- waits for new datasets to be available and alerts system when found
- parser -- reads and parses data files into record dicts
"""
_poller = None
_polling_exception = None
ORIGIN_TYPE = "ExternalDataset"
def __init__(self, *args, **kwargs):
super(TwoDelegateDatasetAgent,self).__init__(*args, **kwargs)
self._fsm.add_handler(ResourceAgentState.STREAMING, ResourceAgentEvent.EXECUTE_RESOURCE, self._handler_streaming_execute_resource)
log.warn("DRIVER: __init__")
def _start_driver(self, dvr_config):
log.warn("DRIVER: _start_driver: %s", dvr_config)
try:
self.set_configuration(dvr_config)
except:
log.error('error in configuration', exc_info=True)
raise
self._dvr_client = self
self._asp.reset_connection()
def _stop_driver(self):
self.stop_sampling()
self._dvr_client = None
def _handler_streaming_execute_resource(self, command, *args, **kwargs):
"""
Handler for execute_resource command in streaming state.
Delegates to InstrumentAgent._handler_observatory_execute_resource
"""
if command == DriverEvent.ACQUIRE_SAMPLE or command == DriverEvent.STOP_AUTOSAMPLE:
return self._handler_execute_resource(command, *args, **kwargs)
else:
raise InstrumentStateException('Command \'{0}\' not allowed in current state {1}'.format(command, self._fsm.get_current_state()))
def _handler_active_unknown_go_inactive(self, *args, **kwargs):
self.stop_sampling()
return (ResourceAgentState.INACTIVE, None)
def _handler_inactive_go_active(self, *args, **kwargs):
self.start_sampling()
return (ResourceAgentState.IDLE, None)
def cmd_dvr(self, cmd, *args, **kwargs):
log.warn("DRIVER: cmd_dvr %s", cmd)
if cmd == 'execute_start_autosample':
# Delegate to BaseDataHandler.execute_start_autosample()
self.start_sampling()
return (None, None)
elif cmd == 'execute_stop_autosample':
self.stop_sampling()
return (ResourceAgentState.IDLE, None)
elif cmd == 'execute_resource':
log.warn("cmd_dvr.execute_resource %r %r",args,kwargs)
return (None,None)
def _validate_driver_config(self):
out = True
for key in 'poller', 'parser':
if key not in self._dvr_config:
log.error('missing key: %s', key)
out = False
for key in ('stream_config', ):
if key not in self.CFG:
log.error('missing key: %s', key)
out = False
if get_safe(self._dvr_config, 'max_records', 100) < 1:
log.error('max_records=%d, must be at least 1 or unset (default 100)', self.max_records)
out = False
return out
def set_configuration(self, config):
log.warn("DRIVER: set_configuration")
"""
expect configuration to have:
- parser module/class
- directory, wildcard to find data files
- optional timestamp of last granule
- optional poll rate
- publish info
"""
log.error("Log level: %s", log.getEffectiveLevel())
log.debug('using configuration: %s', config)
self.config = config
self.max_records = get_safe(config, 'max_records', 100)
self.stream_config = self.CFG.get('stream_config', {})
if len(self.stream_config) == 1:
stream_cfg = self.stream_config.values()[0]
elif len(self.stream_config) > 1:
stream_cfg = self.stream_config.values()[0]
stream_id = stream_cfg['stream_id']
stream_route = IonObject(OT.StreamRoute, routing_key=stream_cfg['routing_key'], exchange_point=stream_cfg['exchange_point'])
param_dict = stream_cfg['stream_def_dict']['parameter_dictionary']
self.publisher = StandaloneStreamPublisher(stream_id=stream_id, stream_route=stream_route)
self.parameter_dictionary = ParameterDictionary.load(param_dict)
self.time_field = self.parameter_dictionary.get_temporal_context()
self.latest_granule_time = get_safe(config, 'last_time', 0)
def _create_plugin(self, config, args=None, kwargs=None):
args, kwargs = args or [], kwargs or {}
uri = config['uri']
egg_name = uri.split('/')[-1] if uri.startswith('http') else uri
egg_repo = uri[0:len(uri)-len(egg_name)-1] if uri.startswith('http') else None
module_name = config['module']
class_name = config['class']
return EGG_CACHE.get_object(class_name, module_name, egg_name, egg_repo, args, kwargs)
def start_sampling(self):
if self._poller:
raise InstStateError('already polling')
memento = self._get_state('poller_state')
config = self.config['poller']
log.trace('poller config: %r', config)
self._poller = self._create_plugin(config, args=[config['config'], memento, self.poller_callback, self.exception_callback])
self._poller.start()
def poller_callback(self, file_like_object, state_memento):
log.debug('poller found data to parse')
try:
config = self.config['parser']
parser = self._create_plugin(config, kwargs=dict(open_file=file_like_object, parse_after=self.latest_granule_time))
records = parser.get_records(max_count=self.max_records)
log.trace('have %d records', len(records))
while records:
self._asp.on_sample_mult(records)
# # secretly uses pubsub client
# rdt = RecordDictionaryTool(param_dictionary=self.parameter_dictionary)
# for key in records[0]: #assume all dict records have same keys
# rdt[key] = [ record[key] for record in records ]
# g = rdt.to_granule()
# self.publisher.publish(g)
records = parser.get_records(max_count=self.max_records)
self._set_state('poller_state', state_memento)
except Exception as ex:
log.error('error handling data', exc_info=True)
def exception_callback(self, exception):
log.error('error in poller', exc_info=True)
self.stop_sampling()
def stop_sampling(self):
log.debug('stop_sampling')
self._poller.shutdown()
|
|
import hashlib
import os.path
import urllib
from contextlib import contextmanager
from mimetypes import guess_type
from django.conf import settings
from django.core.validators import FileExtensionValidator
from django.db import models
from django.dispatch import Signal
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from taggit.managers import TaggableManager
from wagtail.admin.models import get_object_usage
from wagtail.core.models import CollectionMember
from wagtail.search import index
from wagtail.search.queryset import SearchableQuerySetMixin
class DocumentQuerySet(SearchableQuerySetMixin, models.QuerySet):
pass
class AbstractDocument(CollectionMember, index.Indexed, models.Model):
title = models.CharField(max_length=255, verbose_name=_("title"))
file = models.FileField(upload_to="documents", verbose_name=_("file"))
created_at = models.DateTimeField(verbose_name=_("created at"), auto_now_add=True)
uploaded_by_user = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name=_("uploaded by user"),
null=True,
blank=True,
editable=False,
on_delete=models.SET_NULL,
)
tags = TaggableManager(help_text=None, blank=True, verbose_name=_("tags"))
file_size = models.PositiveIntegerField(null=True, editable=False)
# A SHA-1 hash of the file contents
file_hash = models.CharField(max_length=40, blank=True, editable=False)
objects = DocumentQuerySet.as_manager()
search_fields = CollectionMember.search_fields + [
index.SearchField("title", partial_match=True, boost=10),
index.AutocompleteField("title"),
index.FilterField("title"),
index.RelatedFields(
"tags",
[
index.SearchField("name", partial_match=True, boost=10),
index.AutocompleteField("name"),
],
),
index.FilterField("uploaded_by_user"),
]
def clean(self):
"""
Checks for WAGTAILDOCS_EXTENSIONS and validates the uploaded file
based on allowed extensions that were specified.
Warning : This doesn't always ensure that the uploaded file is valid
as files can be renamed to have an extension no matter what
data they contain.
More info : https://docs.djangoproject.com/en/3.1/ref/validators/#fileextensionvalidator
"""
allowed_extensions = getattr(settings, "WAGTAILDOCS_EXTENSIONS", None)
if allowed_extensions:
validate = FileExtensionValidator(allowed_extensions)
validate(self.file)
def is_stored_locally(self):
"""
Returns True if the image is hosted on the local filesystem
"""
try:
self.file.path
return True
except NotImplementedError:
return False
@contextmanager
def open_file(self):
# Open file if it is closed
close_file = False
f = self.file
if f.closed:
# Reopen the file
if self.is_stored_locally():
f.open("rb")
else:
# Some external storage backends don't allow reopening
# the file. Get a fresh file instance. #1397
storage = self._meta.get_field("file").storage
f = storage.open(f.name, "rb")
close_file = True
# Seek to beginning
f.seek(0)
try:
yield f
finally:
if close_file:
f.close()
def get_file_size(self):
if self.file_size is None:
try:
self.file_size = self.file.size
except Exception:
# File doesn't exist
return
self.save(update_fields=["file_size"])
return self.file_size
def _set_file_hash(self, file_contents):
self.file_hash = hashlib.sha1(file_contents).hexdigest()
def get_file_hash(self):
if self.file_hash == "":
with self.open_file() as f:
self._set_file_hash(f.read())
self.save(update_fields=["file_hash"])
return self.file_hash
def __str__(self):
return self.title
@property
def filename(self):
return os.path.basename(self.file.name)
@property
def file_extension(self):
return os.path.splitext(self.filename)[1][1:]
@property
def url(self):
if getattr(settings, "WAGTAILDOCS_SERVE_METHOD", None) == "direct":
try:
return self.file.url
except NotImplementedError:
# backend does not provide a url, so fall back on the serve view
pass
return reverse("wagtaildocs_serve", args=[self.id, self.filename])
def get_usage(self):
return get_object_usage(self)
@property
def usage_url(self):
return reverse("wagtaildocs:document_usage", args=(self.id,))
def is_editable_by_user(self, user):
from wagtail.documents.permissions import permission_policy
return permission_policy.user_has_permission_for_instance(user, "change", self)
@property
def content_type(self):
content_types_lookup = getattr(settings, "WAGTAILDOCS_CONTENT_TYPES", {})
return (
content_types_lookup.get(self.file_extension.lower())
or guess_type(self.filename)[0]
or "application/octet-stream"
)
@property
def content_disposition(self):
inline_content_types = getattr(
settings, "WAGTAILDOCS_INLINE_CONTENT_TYPES", ["application/pdf"]
)
if self.content_type in inline_content_types:
return "inline"
else:
return "attachment; filename={0}; filename*=UTF-8''{0}".format(
urllib.parse.quote(self.filename)
)
class Meta:
abstract = True
verbose_name = _("document")
verbose_name_plural = _("documents")
class Document(AbstractDocument):
admin_form_fields = ("title", "file", "collection", "tags")
class Meta(AbstractDocument.Meta):
permissions = [
("choose_document", "Can choose document"),
]
# provides args: request
document_served = Signal()
class UploadedDocument(models.Model):
"""
Temporary storage for documents uploaded through the multiple doc uploader, when validation
rules (e.g. required metadata fields) prevent creating a Document object from the document file
alone. In this case, the document file is stored against this model, to be turned into a
Document object once the full form has been filled in.
"""
file = models.FileField(upload_to="uploaded_documents", max_length=200)
uploaded_by_user = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name=_("uploaded by user"),
null=True,
blank=True,
editable=False,
on_delete=models.SET_NULL,
)
|
|
import weakref
from warnings import warn
from fontTools.misc import bezierTools
from defcon.objects.base import BaseObject
from defcon.tools import bezierMath
from defcon.tools.representations import contourBoundsRepresentationFactory,\
contourControlPointBoundsRepresentationFactory, contourClockwiseRepresentationFactory
class Contour(BaseObject):
"""
This object represents a contour and it contains a list of points.
**This object posts the following notifications:**
===============================
Name
===============================
Contour.Changed
Contour.WindingDirectionChanged
Contour.PointsChanged
Contour.IdentifierChanged
===============================
The Contour object has list like behavior. This behavior allows you to interact
with point data directly. For example, to get a particular point::
point = contour[0]
To iterate over all points::
for point in contour:
To get the number of points::
pointCount = len(contour)
To interact with components or anchors in a similar way,
use the ``components`` and ``anchors`` attributes.
"""
changeNotificationName = "Contour.Changed"
representationFactories = {
"defcon.contour.bounds" : dict(
factory=contourBoundsRepresentationFactory,
destructiveNotifications=("Contour.PointsChanged")
),
"defcon.contour.controlPointBounds" : dict(
factory=contourControlPointBoundsRepresentationFactory,
destructiveNotifications=("Contour.PointsChanged")
),
"defcon.contour.clockwise" : dict(
factory=contourClockwiseRepresentationFactory,
destructiveNotifications=("Contour.PointsChanged", "Contour.WindingDirectionChanged")
),
}
def __init__(self, glyph=None, pointClass=None):
self._font = None
self._layerSet = None
self._layer = None
self._glyph = None
self.glyph = glyph
super(Contour, self).__init__()
self.beginSelfNotificationObservation()
self._points = []
if pointClass is None:
from .point import Point
pointClass = Point
self._pointClass = pointClass
self._identifier = None
def __del__(self):
super(Contour, self).__del__()
self._points = None
# --------------
# Parent Objects
# --------------
def getParent(self):
return self.glyph
def _get_font(self):
font = None
if self._font is None:
glyph = self.glyph
if glyph is not None:
font = glyph.font
if font is not None:
self._font = weakref.ref(font)
else:
font = self._font()
return font
font = property(_get_font, doc="The :class:`Font` that this contour belongs to.")
def _get_layerSet(self):
layerSet = None
if self._layerSet is None:
glyph = self.glyph
if glyph is not None:
layerSet = glyph.layerSet
if layerSet is not None:
self._layerSet = weakref.ref(layerSet)
else:
layerSet = self._layerSet()
return layerSet
layerSet = property(_get_layerSet, doc="The :class:`LayerSet` that this contour belongs to.")
def _get_layer(self):
layer = None
if self._layer is None:
glyph = self.glyph
if glyph is not None:
layer = glyph.layer
if layer is not None:
self._layer = weakref.ref(layer)
else:
layer = self._layer()
return layer
layer = property(_get_layer, doc="The :class:`Layer` that this contour belongs to.")
def _get_glyph(self):
if self._glyph is None:
return None
return self._glyph()
def _set_glyph(self, glyph):
assert self._glyph is None
if glyph is not None:
glyph = weakref.ref(glyph)
self._font = None
self._layerSet = None
self._layer = None
self._glyph = glyph
glyph = property(_get_glyph, _set_glyph, doc="The :class:`Glyph` that this contour belongs to. This should not be set externally.")
# ------
# Points
# ------
def _get_pointClass(self):
return self._pointClass
pointClass = property(_get_pointClass, doc="The class used for point.")
def _get_onCurvePoints(self):
return [point for point in self._points if point.segmentType]
onCurvePoints = property(_get_onCurvePoints, doc="A list of all on curve points in the contour.")
def appendPoint(self, point):
"""
Append **point** to the glyph. The point must be a defcon
:class:`Point` object or a subclass of that object. An error
will be raised if the point's identifier conflicts with any of
the identifiers within the glyph.
This will post *Contour.PointsChanged* and *Contour.Changed* notifications.
"""
assert point not in self._points
self.insertPoint(len(self._points), point)
def insertPoint(self, index, point):
"""
Insert **point** into the contour at index. The point
must be a defcon :class:`Point` object or a subclass
of that object. An error will be raised if the points's
identifier conflicts with any of the identifiers within
the glyph.
This will post *Contour.PointsChanged* and *Contour.Changed* notifications.
"""
assert point not in self._points
if point.identifier is not None:
identifiers = self.identifiers
assert point.identifier not in identifiers
if point.identifier is not None:
identifiers.add(point.identifier)
self._points.insert(index, point)
self.postNotification("Contour.PointsChanged")
self.dirty = True
def removePoint(self, point):
"""
Remove **point** from the contour.
This will post *Contour.PointsChanged* and *Contour.Changed* notifications.
"""
self._points.remove(point)
self.postNotification("Contour.PointsChanged")
self.dirty = True
def setStartPoint(self, index):
"""
Set the point at **index** as the first point in the contour.
This point must be an on-curve point.
This will post *Contour.PointsChanged* and *Contour.Changed* notifications.
"""
onCurvePoints = self.onCurvePoints
if len(onCurvePoints) < 2:
return
if self.open:
return
point = self._points[index]
assert point.segmentType is not None, "index must represent an on curve point"
before = self._points[:index]
self._points = self._points[index:] + before
self.postNotification("Contour.PointsChanged")
self.dirty = True
# -------------
# List Behavior
# -------------
def __len__(self):
return len(self._points)
def __getitem__(self, index):
if isinstance(index, slice):
if index.start > len(self._points):
raise IndexError
if index.stop is None:
stop = len(self._points)
elif index.stop > len(self._points):
raise IndexError
else:
stop = index.stop
return self._points[index.start:stop]
if index > len(self._points):
raise IndexError
return self._points[index]
def __iter__(self):
pointCount = len(self)
index = 0
while index < pointCount:
point = self[index]
yield point
index += 1
def clear(self):
"""
Clear the contents of the contour.
This posts *Contour.PointsChanged* and *Contour.Changed* notifications.
"""
self._clear()
def _clear(self, postNotification=True):
# clear the internal storage
self._points = []
# reset the clockwise cache
# post a dirty notification
if postNotification:
self.postNotification("Contour.PointsChanged")
self.dirty = True
def index(self, point):
"""
Get the index for **point**.
"""
return self._points.index(point)
def reverse(self):
"""
Reverse the direction of the contour. It's important to note
that the actual points stored in this object will be completely
repalced by new points.
This will post *Contour.WindingDirectionChanged*,
*Contour.PointsChanged* and *Contour.Changed* notifications.
"""
from robofab.pens.reverseContourPointPen import ReverseContourPointPen
oldDirection = self.clockwise
# put the current points in another contour
otherContour = self.__class__(glyph=None, pointClass=self.pointClass)
# draw the points in this contour through
# the reversing pen.
reversePen = ReverseContourPointPen(otherContour)
self.drawPoints(reversePen)
# clear the points in this contour
self._clear(postNotification=False)
# draw the points back into this contour
self.disableNotifications()
otherContour.drawPoints(self)
self.enableNotifications()
# post a notification
self.postNotification("Contour.WindingDirectionChanged", data=dict(oldValue=oldDirection, newValue=self.clockwise))
self.postNotification("Contour.PointsChanged")
self.dirty = True
# --------
# Segments
# --------
def _get_segments(self):
if not len(self._points):
return []
segments = [[]]
lastWasOffCurve = False
for point in self._points:
segments[-1].append(point)
if point.segmentType is not None:
segments.append([])
lastWasOffCurve = point.segmentType is None
if len(segments[-1]) == 0:
del segments[-1]
if lastWasOffCurve:
segment = segments.pop(-1)
assert len(segments[0]) == 1
segment.append(segments[0][0])
del segments[0]
segments.append(segment)
elif segments[0][-1].segmentType != "move":
segment = segments.pop(0)
segments.append(segment)
return segments
segments = property(_get_segments, doc="A list of all points in the contour organized into segments.")
def removeSegment(self, segmentIndex, preserveCurve=False):
"""
Remove the segment at **segmentIndex**. If
**preserveCurve** is True, the contour will
try to preserve the overall curve shape.
"""
segments = self.segments
nextIndex = segmentIndex + 1
if nextIndex == len(segments):
nextIndex = 0
previousIndex = segmentIndex - 1
if previousIndex < 0:
previousIndex = len(segments) + previousIndex
nextSegment = segments[nextIndex]
segment = segments[segmentIndex]
previousSegment = segments[previousIndex]
# if preserveCurve is off
# or if all are lines, handle it
if not preserveCurve or (previousSegment[-1].segmentType == "line"\
and segment[-1].segmentType == "line"\
and nextSegment[-1].segmentType == "line"):
for point in segment:
self._points.remove(point)
# if have a curve, do the preservation
else:
# gather the needed points
previousOnCurveX = previousSegment[-1].x
previousOnCurveY = previousSegment[-1].y
onCurveX = segment[-1].x
onCurveY = segment[-1].y
nextOnCurveX = nextSegment[-1].x
nextOnCurveY = nextSegment[-1].y
if segment[-1].segmentType == "curve":
offCurve1X = segment[0].x
offCurve1Y = segment[0].y
offCurve2X = segment[-2].x
offCurve2Y = segment[-2].y
elif segment[-1].segmentType == "line":
offCurve1X = previousOnCurveX
offCurve1Y = previousOnCurveY
offCurve2X = onCurveX
offCurve2Y = onCurveY
else:
# XXX could be a quad. in that case, we can't handle it.
raise NotImplementedError("unknown segment type: %s" % segment[-1].segmentType)
if nextSegment[-1].segmentType == "curve":
nextOffCurve1X = nextSegment[0].x
nextOffCurve1Y = nextSegment[0].y
nextOffCurve2X = nextSegment[-2].x
nextOffCurve2Y = nextSegment[-2].y
elif nextSegment[-1].segmentType == "line":
nextOffCurve1X = onCurveX
nextOffCurve1Y = onCurveY
nextOffCurve2X = nextOnCurveX
nextOffCurve2Y = nextOnCurveY
else:
# XXX could be a quad. in that case, we can't handle it.
raise NotImplementedError("unknown segment type: %s" % nextSegment[-1].segmentType)
# now do the math
result = bezierMath.joinSegments((previousOnCurveX, previousOnCurveY),
(offCurve1X, offCurve1Y), (offCurve2X, offCurve2Y), (onCurveX, onCurveY),
(nextOffCurve1X, nextOffCurve1Y), (nextOffCurve2X, nextOffCurve2Y), (nextOnCurveX, nextOnCurveY))
# remove the segment
for point in segment:
self._points.remove(point)
# if the next segment type isn't a curve, make it one
if not nextSegment[-1].segmentType == "curve":
nextSegment[-1].segmentType = "curve"
pointIndex = self._points.index(nextSegment[-1])
newPoints = [self._pointClass((result[0][0], result[0][1])), self._pointClass((result[1][0], result[1][1]))]
if pointIndex == 0:
self._points.extend(newPoints)
else:
self._points = self._points[:pointIndex] + newPoints + self._points[pointIndex:]
# otherwise, set the point positions
else:
nextSegment[0].x = result[0][0]
nextSegment[0].y = result[0][1]
nextSegment[1].x = result[1][0]
nextSegment[1].y = result[1][1]
# mark the contour as dirty
self.dirty = True
# ----------------
# Basic Attributes
# ----------------
# clockwise
def _get_clockwise(self):
return self.getRepresentation("defcon.contour.clockwise")
def _set_clockwise(self, value):
if self.clockwise != value:
self.reverse()
self._clockwiseCache = None
clockwise = property(_get_clockwise, _set_clockwise, doc="A boolean representing if the contour has a clockwise direction. Setting this posts *Contour.WindingDirectionChanged* and *Contour.Changed* notifications.")
# open
def _get_open(self):
if not self._points:
return True
return self._points[0].segmentType == 'move'
open = property(_get_open, doc="A boolean indicating if the contour is open or not.")
# ------
# Bounds
# ------
def _get_bounds(self):
return self.getRepresentation("defcon.contour.bounds")
bounds = property(_get_bounds, doc="The bounds of the contour's outline expressed as a tuple of form (xMin, yMin, xMax, yMax).")
def _get_controlPointBounds(self):
return self.getRepresentation("defcon.contour.controlPointBounds")
controlPointBounds = property(_get_controlPointBounds, doc="The control bounds of all points in the contour. This only measures the point positions, it does not measure curves. So, curves without points at the extrema will not be properly measured.")
# ----
# Move
# ----
def move(self, xxx_todo_changeme):
"""
Move all points in the contour by **(x, y)**.
This will post *Contour.PointsChanged* and *Contour.Changed* notifications.
"""
(x, y) = xxx_todo_changeme
for point in self._points:
point.move((x, y))
# update the representations
# XXX this is strictly against the rules.
# XXX subclasses should never, ever do
# XXX anything like this. this is a *very*
# XXX special case.
if "defcon.contour.bounds" in self._representations:
bounds = self._representations["defcon.contour.bounds"][None]
if bounds is not None:
xMin, yMin, xMax, yMax = bounds
xMin += x
yMin += y
xMax += x
yMax += y
bounds = (xMin, yMin, xMax, yMax)
self._representations["defcon.contour.bounds"][None] = bounds
if "defcon.contour.controlPointBounds" in self._representations:
bounds = self._representations["defcon.contour.controlPointBounds"][None]
if bounds is not None:
xMin, yMin, xMax, yMax = bounds
xMin += x
yMin += y
xMax += x
yMax += y
bounds = (xMin, yMin, xMax, yMax)
self._representations["defcon.contour.controlPointBounds"][None] = bounds
self.disableNotifications(observer=self)
self.postNotification("Contour.PointsChanged")
self.enableNotifications(observer=self)
self.dirty = True
# ------------
# Point Inside
# ------------
def pointInside(self, xxx_todo_changeme1, evenOdd=False):
"""
Returns a boolean indicating if **(x, y)** is in the
"black" area of the contour.
"""
(x, y) = xxx_todo_changeme1
from fontTools.pens.pointInsidePen import PointInsidePen
pen = PointInsidePen(glyphSet=None, testPoint=(x, y), evenOdd=evenOdd)
self.draw(pen)
return pen.getResult()
# ---------
# Splitting
# ---------
def positionForProspectivePointInsertionAtSegmentAndT(self, segmentIndex, t):
"""
Get the precise coordinates and a boolean indicating
if the point will be smooth for the given **segmentIndex**
and **t**.
"""
return self._splitAndInsertAtSegmentAndT(segmentIndex, t, False)
def splitAndInsertPointAtSegmentAndT(self, segmentIndex, t):
"""
Insert a point into the contour for the given
**segmentIndex** and **t**.
This posts a *Contour.Changed* notification.
"""
self._splitAndInsertAtSegmentAndT(segmentIndex, t, True)
def _splitAndInsertAtSegmentAndT(self, segmentIndex, t, insert):
segments = self.segments
segment = segments[segmentIndex]
segment.insert(0, segments[segmentIndex-1][-1])
firstPoint = segment[0]
lastPoint = segment[-1]
segmentType = lastPoint.segmentType
segment = [(point.x, point.y) for point in segment]
if segmentType == "line":
(x1, y1), (x2, y2) = segment
x = x1 + (x2 - x1) * t
y = y1 + (y2 - y1) * t
pointsToInsert = [((x, y), "line", False)]
insertionPoint = (x, y)
pointWillBeSmooth = False
elif segmentType == "curve":
pt1, pt2, pt3, pt4 = segment
(pt1, pt2, pt3, pt4), (pt5, pt6, pt7, pt8) = bezierTools.splitCubicAtT(pt1, pt2, pt3, pt4, t)
pointsToInsert = [(pt2, None, False), (pt3, None, False), (pt4, "curve", True), (pt6, None, False), (pt7, None, False)]
insertionPoint = tuple(pt4)
pointWillBeSmooth = True
else:
# XXX could be a quad. in that case, we could handle it.
raise NotImplementedError("unknown segment type: %s" % segmentType)
if insert:
firstPointIndex = self._points.index(firstPoint)
lastPointIndex = self._points.index(lastPoint)
firstPoints = self._points[:firstPointIndex + 1]
if firstPointIndex == len(self._points) - 1:
firstPoints = firstPoints[lastPointIndex:]
lastPoints = []
elif lastPointIndex == 0:
lastPoints = []
else:
lastPoints = self._points[lastPointIndex:]
newPoints = [self._pointClass(pos, segmentType=segmentType, smooth=smooth) for pos, segmentType, smooth in pointsToInsert]
self._points = firstPoints + newPoints + lastPoints
self.dirty = True
return insertionPoint, pointWillBeSmooth
# -----------
# Pen methods
# -----------
def beginPath(self, identifier=None):
"""
Standard point pen *beginPath* method.
This should not be used externally.
"""
self.identifier = identifier
def endPath(self):
"""
Standard point pen *endPath* method.
This should not be used externally.
"""
pass
def addPoint(self, xxx_todo_changeme2, segmentType=None, smooth=False, name=None, identifier=None, **kwargs):
"""
Standard point pen *addPoint* method.
This should not be used externally.
"""
(x, y) = xxx_todo_changeme2
point = self._pointClass((x, y), segmentType=segmentType, smooth=smooth, name=name, identifier=identifier)
self.insertPoint(len(self._points), point)
def draw(self, pen):
"""
Draw the contour with **pen**.
"""
from robofab.pens.adapterPens import PointToSegmentPen
pointPen = PointToSegmentPen(pen)
self.drawPoints(pointPen)
def drawPoints(self, pointPen):
"""
Draw the contour with **pointPen**.
"""
try:
pointPen.beginPath(identifier=self.identifier)
except TypeError:
pointPen.beginPath()
warn("The beginPath method needs an identifier kwarg. The contour's identifier value has been discarded.", DeprecationWarning)
for point in self._points:
try:
pointPen.addPoint((point.x, point.y), segmentType=point.segmentType, smooth=point.smooth, name=point.name, identifier=point.identifier)
except TypeError:
pointPen.addPoint((point.x, point.y), segmentType=point.segmentType, smooth=point.smooth, name=point.name)
warn("The addPoint method needs an identifier kwarg. The point's identifier value has been discarded.", DeprecationWarning)
pointPen.endPath()
# ----------
# Identifier
# ----------
def _get_identifiers(self):
identifiers = None
glyph = self.glyph
if glyph is not None:
identifiers = glyph.identifiers
if identifiers is None:
identifiers = set()
return identifiers
identifiers = property(_get_identifiers, doc="Set of identifiers for the glyph that this contour belongs to. This is primarily for internal use.")
def _get_identifier(self):
return self._identifier
def _set_identifier(self, value):
oldIdentifier = self.identifier
if value == oldIdentifier:
return
# don't allow a duplicate
identifiers = self.identifiers
assert value not in identifiers
# free the old identifier
if oldIdentifier in identifiers:
identifiers.remove(oldIdentifier)
# store
self._identifier = value
if value is not None:
identifiers.add(value)
# post notifications
self.postNotification("Contour.IdentifierChanged", data=dict(oldValue=oldIdentifier, newValue=value))
self.dirty = True
identifier = property(_get_identifier, _set_identifier, doc="The identifier. Setting this will post *Contour.IdentifierChanged* and *Contour.Changed* notifications.")
def generateIdentifier(self):
"""
Create a new, unique identifier for and assign it to the contour.
This will post *Contour.IdentifierChanged* and *Contour.Changed* notifications.
"""
identifier = makeRandomIdentifier(existing=self.identifiers)
self.identifier = identifier
def generateIdentifierForPoint(self, point):
"""
Create a new, unique identifier for and assign it to the contour.
This will post *Contour.IdentifierChanged* and *Contour.Changed* notifications.
"""
identifier = makeRandomIdentifier(existing=self.identifiers)
point.identifier = identifier
self.dirty = True
# ------------------------
# Notification Observation
# ------------------------
def endSelfNotificationObservation(self):
super(Contour, self).endSelfNotificationObservation()
self._font = None
self._layerSet = None
self._layer = None
self._glyph = None
# -----------------------------
# Serialization/Deserialization
# -----------------------------
def getDataForSerialization(self, **kwargs):
def get_points(key):
# store the point pen protocol calls
# this will store the identifier and the point data
pointData = []
self.drawPoints(Recorder(pointData));
return pointData;
getters = [('pen', get_points)]
return self._serialize(getters, **kwargs)
def setDataFromSerialization(self, data):
self.clear()
self.identifier = None;
if 'pen' in data:
# play back
Recorder(data['pen'])._play(self)
class Recorder(object):
"""
Records all method calls it receives in a list of tuples in the form of
[(:str:command, :list:args, :dict: kwargd)]
Method calls to be recorded must not start with an underscore.
This class defines a public method with a private(!) attribute name:
"Recorder._play(self, target)" because that way calls to all methods
that don't start with underscores can be recorded.
This is useful to record the commands of both pen protocols
and it may become useful for other things as well, like recording
undo commands.
Example Session PointPen:
data_glyphA = []
recorderPointPen = new Recorder(data_glyphA)
glyphA.drawPoints(recorderPointPen)
# The point data of the glyph is now stored within data
# we can either replay it immediately or take it away and use it
# to replay it later
stored_data = pickle.dumps(data_glyphA)
restored_data_glyphA = pickle.loads(stored_data)
player = new Recorder(restored_data_glyphA)
# The recorder behaves like glyphA.drawPoints
player._play(glyphB)
Example Session SegmentPen:
data_glyphA = []
recorderPen = new Recorder(data_glyphA)
glyphA.draw(recorderPen)
# reuse it immediately
# The recorder behaves like glyphA.draw
recorderPen._play(glyphB)
"""
def __init__(self, data=None):
self.__dict__['_data'] = data if data is not None else [];
def _play(self, target):
""" Replay all methof calls this Recorder to target.
Public Method(!)
"""
for cmd, args, kwds in self._data:
getattr(target, cmd)(*args, **kwds);
def __setattr__(self, name, value):
raise AttributeError('It\'s not allowed to set attributes here.', name)
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError(name)
def command(*args, **kwds):
self._data.append((name, args, kwds))
# cache the method, don't use __setattr__
self.__dict__[name] = command
return command;
# -----
# Tests
# -----
def _testIdentifier():
"""
>>> from defcon import Glyph
>>> glyph = Glyph()
>>> contour = Contour()
>>> glyph.appendContour(contour)
>>> contour.identifier = "contour 1"
>>> contour.identifier
'contour 1'
>>> list(sorted(glyph.identifiers))
['contour 1']
>>> contour = Contour()
>>> glyph.appendContour(contour)
>>> contour.identifier = "contour 1"
Traceback (most recent call last):
...
AssertionError
>>> contour.identifier = "contour 2"
>>> list(sorted(glyph.identifiers))
['contour 1', 'contour 2']
>>> contour.identifier = "not contour 2 anymore"
>>> contour.identifier
'not contour 2 anymore'
>>> list(sorted(glyph.identifiers))
['contour 1', 'not contour 2 anymore']
>>> contour.identifier = None
>>> contour.identifier
>>> list(sorted(glyph.identifiers))
['contour 1']
"""
def _testBounds():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> contour = font['A'][0]
>>> contour.bounds
(0, 0, 700, 700)
"""
def _testControlPointBounds():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> contour = font['A'][0]
>>> contour.controlPointBounds
(0, 0, 700, 700)
"""
def _testClockwise():
"""
# get
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> contour = font['A'][0]
>>> contour.clockwise
False
>>> contour = font['A'][1]
>>> contour.clockwise
True
>>> contour._clockwiseCache = None
>>> contour.clockwise = False
>>> contour.clockwise
False
>>> contour._clockwiseCache = None
>>> contour.clockwise = True
>>> contour.clockwise
True
# set
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> contour = font['A'][0]
>>> contour.clockwise = False
>>> contour.clockwise
False
>>> contour._clockwiseCache = None
>>> contour.clockwise = True
>>> contour.clockwise
True
"""
def _testOpen():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath('TestOpenContour.ufo'))
>>> glyph = font['A']
>>> glyph[0].open
True
>>> glyph[1].open
False
>>> glyph[2].open
True
>>> glyph[3].open
False
"""
def _testOnCurvePoints():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> contour = glyph[0]
>>> len(contour.onCurvePoints)
4
>>> [(point.x, point.y) for point in contour.onCurvePoints]
[(0, 0), (700, 0), (700, 700), (0, 700)]
>>> glyph = font['B']
>>> contour = glyph[0]
>>> len(contour.onCurvePoints)
4
>>> [(point.x, point.y) for point in contour.onCurvePoints]
[(0, 350), (350, 0), (700, 350), (350, 700)]
"""
def _testSegments():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> def simpleSegment(segment):
... return [(i.x, i.y, i.segmentType) for i in segment]
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> contour = glyph[0]
>>> [simpleSegment(segment) for segment in contour.segments]
[[(700, 0, 'line')], [(700, 700, 'line')], [(0, 700, 'line')], [(0, 0, 'line')]]
>>> glyph = font['B']
>>> contour = glyph[0]
>>> [simpleSegment(segment) for segment in contour.segments]
[[(0, 157, None), (157, 0, None), (350, 0, 'curve')], [(543, 0, None), (700, 157, None), (700, 350, 'curve')], [(700, 543, None), (543, 700, None), (350, 700, 'curve')], [(157, 700, None), (0, 543, None), (0, 350, 'curve')]]
"""
def _testLen():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> contour = font['A'][0]
>>> len(contour)
4
>>> contour = font['B'][0]
>>> len(contour)
12
"""
def _testIter():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> contour = font['A'][0]
>>> [(point.x, point.y) for point in contour]
[(0, 0), (700, 0), (700, 700), (0, 700)]
"""
def _testReverse():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> contour = font['A'][0]
>>> contour.reverse()
>>> [(point.x, point.y) for point in contour._points]
[(0, 0), (0, 700), (700, 700), (700, 0)]
>>> contour.reverse()
>>> [(point.x, point.y) for point in contour._points]
[(0, 0), (700, 0), (700, 700), (0, 700)]
"""
def _testMove():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> contour = font['A'][0]
>>> contour.move((100, 100))
>>> contour.bounds
(100, 100, 800, 800)
>>> contour.dirty = True
"""
def _testPointInside():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> contour = font['A'][0]
>>> contour.pointInside((100, 100))
True
>>> contour.pointInside((0, 0))
False
>>> contour.pointInside((-100, -100))
False
"""
def _testIndex():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> contour = font['B'][0]
>>> 2 == contour.index(contour[2])
True
"""
def _testSetStartPoint():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> contour = font['B'][0]
>>> start = [(point.segmentType, point.x, point.y) for point in contour]
>>> contour.setStartPoint(6)
>>> contour.dirty
True
>>> contour.setStartPoint(6)
>>> end = [(point.segmentType, point.x, point.y) for point in contour]
>>> start == end
True
>>> contour = font['A'][0]
>>> start = [(point.segmentType, point.x, point.y) for point in contour]
>>> contour.setStartPoint(2)
>>> contour.setStartPoint(2)
>>> end = [(point.segmentType, point.x, point.y) for point in contour]
>>> start == end
True
>>> contour = font['B'][0]
>>> start = [(point.segmentType, point.x, point.y) for point in contour]
>>> contour.setStartPoint(3)
>>> contour.setStartPoint(9)
>>> end = [(point.segmentType, point.x, point.y) for point in contour]
>>> start == end
True
"""
def _testPositionForProspectivePointInsertionAtSegmentAndT():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> contour = font['A'][0]
>>> contour.positionForProspectivePointInsertionAtSegmentAndT(0, .5)
((350.0, 0.0), False)
>>> contour = font['B'][0]
>>> contour.positionForProspectivePointInsertionAtSegmentAndT(0, .5)
((102.625, 102.625), True)
>>> contour = font['B'][1]
>>> contour.positionForProspectivePointInsertionAtSegmentAndT(0, .5)
((226.125, 473.5), True)
"""
def _testSplitAndInsertPointAtSegmentAndT():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> contour = font['A'][0]
>>> contour.splitAndInsertPointAtSegmentAndT(0, .5)
>>> [(point.x, point.y, point.segmentType) for point in contour]
[(0, 0, 'line'), (350.0, 0.0, 'line'), (700, 0, 'line'), (700, 700, 'line'), (0, 700, 'line')]
>>> contour = font['B'][0]
>>> contour.splitAndInsertPointAtSegmentAndT(0, .5)
>>> [(point.x, point.y, point.segmentType) for point in contour]
[(0, 350, 'curve'), (0.0, 253.5, None), (39.25, 166.0, None), (102.625, 102.625, 'curve'), (166.0, 39.25, None), (253.5, 0.0, None), (350, 0, 'curve'), (543, 0, None), (700, 157, None), (700, 350, 'curve'), (700, 543, None), (543, 700, None), (350, 700, 'curve'), (157, 700, None), (0, 543, None)]
"""
def _testRemoveSegment():
"""
>>> print "need removeSegment tests!"
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
|
|
"""Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float64)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
|
|
# -*- python -*-
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import optparse
import os
import re
import socket
import subprocess
import sys
import unittest
if sys.platform == 'win32':
RETURNCODE_KILL = -9
else:
RETURNCODE_KILL = -9 & 0xff
def AssertEquals(x, y):
if x != y:
raise AssertionError('%r != %r' % (x, y))
def ParseNumber(number):
if number.startswith('0x'):
return int(number[2:], 16)
return int(number)
def FilenameToUnix(str):
return str.replace('\\', '/')
def MakeOutFileName(output_dir, name, ext):
# File name should be consistent with .out file name from nacl.scons
return os.path.join(output_dir, 'gdb_' + name + ext)
def KillProcess(process):
try:
process.kill()
except OSError:
# If process is already terminated, kill() throws
# "WindowsError: [Error 5] Access is denied" on Windows.
pass
process.wait()
SEL_LDR_RSP_SOCKET_ADDR = ('localhost', 4014)
def EnsurePortIsAvailable(addr=SEL_LDR_RSP_SOCKET_ADDR):
# As a sanity check, check that the TCP port is available by binding
# to it ourselves (and then unbinding). Otherwise, we could end up
# talking to an old instance of sel_ldr that is still hanging
# around, or to some unrelated service that uses the same port
# number. Of course, there is still a race condition because an
# unrelated process could bind the port after we unbind.
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
sock.bind(addr)
sock.close()
def DecodeNexeArgsForSubprocess(arg_list):
if arg_list is None:
return []
return arg_list.split(',')
def LaunchSelLdr(sel_ldr_command, options, name):
args = sel_ldr_command + ['-g', '-a']
if options.irt is not None:
args += ['-B', options.irt]
if options.ld_so is not None:
args += ['--', options.ld_so,
'--library-path', options.library_path]
args += ([FilenameToUnix(options.nexe)] +
DecodeNexeArgsForSubprocess(options.nexe_args) +
[name])
EnsurePortIsAvailable()
return subprocess.Popen(args)
def GenerateManifest(output_dir, nexe, runnable_ld, name):
manifest_filename = MakeOutFileName(output_dir, name, '.nmf')
manifest_dir = os.path.dirname(manifest_filename)
runnable_ld_url = {'url': os.path.relpath(runnable_ld, manifest_dir)}
nexe_url = {'url': os.path.relpath(nexe, manifest_dir)}
manifest = {
'program': {
'x86-32': runnable_ld_url,
'x86-64': runnable_ld_url,
},
'files': {
'main.nexe': {
'x86-32': nexe_url,
'x86-64': nexe_url,
},
},
}
with open(manifest_filename, 'w') as manifest_file:
json.dump(manifest, manifest_file)
return manifest_filename
class RecordParser(object):
STATUS_RE = re.compile('[^,]+')
KEY_RE = re.compile('([^"{\[=]+)=')
VALUE_PREFIX_RE = re.compile('"|{|\[')
STRING_VALUE_RE = re.compile('([^"]*)"')
def __init__(self, line):
self.line = line
self.pos = 0
def Skip(self, c):
if self.line.startswith(c, self.pos):
self.pos += len(c)
return True
return False
def Match(self, r):
match = r.match(self.line, self.pos)
if match is not None:
self.pos = match.end()
return match
def ParseString(self):
string_value_match = self.Match(self.STRING_VALUE_RE)
assert string_value_match is not None
return string_value_match.group(1)
def ParseValue(self):
value_prefix_match = self.Match(self.VALUE_PREFIX_RE)
assert value_prefix_match is not None
if value_prefix_match.group(0) == '"':
return self.ParseString()
elif value_prefix_match.group(0) == '{':
return self.ParseDict()
else:
return self.ParseList()
def ParseListMembers(self):
result = []
while True:
# List syntax:
# [foo, bar]
# [foo=x, bar=y] - we parse this as [{foo=x}, {bar=y}]
key_match = self.Match(self.KEY_RE)
value = self.ParseValue()
if key_match is not None:
result.append({key_match.group(1): value})
else:
result.append(value)
if not self.Skip(','):
break
return result
def ParseList(self):
if self.Skip(']'):
return []
result = self.ParseListMembers()
assert self.Skip(']')
return result
def ParseDictMembers(self):
result = {}
while True:
key_match = self.Match(self.KEY_RE)
assert key_match is not None
result[key_match.group(1)] = self.ParseValue()
if not self.Skip(','):
break
return result
def ParseDict(self):
if self.Skip('}'):
return {}
result = self.ParseDictMembers()
assert self.Skip('}')
return result
def Parse(self):
status_match = self.Match(self.STATUS_RE)
assert status_match is not None
result = {}
if self.Skip(','):
result = self.ParseDictMembers()
AssertEquals(self.pos, len(self.line))
return (status_match.group(0), result)
class Gdb(object):
def __init__(self, options, name):
self._options = options
self._name = name
args = [options.gdb, '--interpreter=mi']
self._log = sys.stderr
self._gdb = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
self._expected_success = True
def Wait(self):
# Require a graceful exit from gdb.
self._gdb.communicate()
AssertEquals(self._gdb.returncode == 0, self._expected_success)
def _SendRequest(self, request):
self._log.write('To GDB: %s\n' % request)
self._gdb.stdin.write(request)
self._gdb.stdin.write('\n')
return self._GetResponse()
def _GetResponse(self):
results = []
while True:
line = self._gdb.stdout.readline().rstrip()
if line == '':
return results
self._log.write('From GDB: %s\n' % line)
if line == '(gdb)':
return results
results.append(line)
def _GetResultRecord(self, result):
for line in result:
if line.startswith('^'):
return RecordParser(line).Parse()
raise AssertionError('No result record found in %r' % result)
def _GetLastExecAsyncRecord(self, result):
for line in reversed(result):
if line.startswith('*'):
return RecordParser(line).Parse()
raise AssertionError('No asynchronous execute status record found in %r'
% result)
def Command(self, command):
status, items = self._GetResultRecord(self._SendRequest(command))
AssertEquals(status, '^done')
return items
def ExpectToFailCommand(self, command):
status, items = self._GetResultRecord(self._SendRequest(command))
AssertEquals(status, '^error')
def ResumeCommand(self, command):
status, items = self._GetResultRecord(self._SendRequest(command))
AssertEquals(status, '^running')
status, items = self._GetLastExecAsyncRecord(self._GetResponse())
AssertEquals(status, '*stopped')
return items
def ResumeAndExpectStop(self, resume_command, expected_stop_reason):
stop_info = self.ResumeCommand(resume_command)
if 'reason' not in stop_info or stop_info['reason'] != expected_stop_reason:
raise AssertionError(
'GDB reported stop reason %r but we expected %r (full info is %r)'
% (stop_info.get('reason'), expected_stop_reason, stop_info))
def Quit(self):
status, items = self._GetResultRecord(self._SendRequest('-gdb-exit'))
AssertEquals(status, '^exit')
def Disconnect(self):
status, items = self._GetResultRecord(self._SendRequest('disconnect'))
AssertEquals(status, '^done')
def Detach(self):
status, items = self._GetResultRecord(self._SendRequest('detach'))
AssertEquals(status, '^done')
def Kill(self):
status, items = self._GetResultRecord(self._SendRequest('kill'))
AssertEquals(status, '^done')
def KillProcess(self):
self._expected_success = False
KillProcess(self._gdb)
def Eval(self, expression):
return self.Command('-data-evaluate-expression ' + expression)['value']
def GetPC(self):
return ParseNumber(self.Eval('$pc')) & ((1 << 32) - 1)
def LoadManifestFile(self):
assert self._manifest_file is not None
# gdb uses bash-like escaping which removes slashes from Windows paths.
self.Command('nacl-manifest ' + FilenameToUnix(self._manifest_file))
def Connect(self):
self._GetResponse()
self.Reconnect()
def Reconnect(self):
if self._options.irt is not None:
self.Command('nacl-irt ' + FilenameToUnix(self._options.irt))
if self._options.ld_so is not None:
self._manifest_file = GenerateManifest(self._options.output_dir,
self._options.nexe,
self._options.ld_so,
self._name)
self.LoadManifestFile()
self.Command('set breakpoint pending on')
else:
self.Command('file ' + FilenameToUnix(self._options.nexe))
self.Command('target remote :4014')
def FetchMainNexe(self):
nexe_filename = MakeOutFileName(
self._options.output_dir, 'remote', '.nexe')
self.Command('remote get nexe ' + FilenameToUnix(nexe_filename))
return nexe_filename
def FetchIrtNexe(self):
nexe_filename = MakeOutFileName(
self._options.output_dir, 'remote', '.nexe')
self.Command('remote get irt ' + FilenameToUnix(nexe_filename))
return nexe_filename
def GetMainNexe(self):
if self._options.ld_so is not None:
return self._options.ld_so
return self._options.nexe
def GetIrtNexe(self):
return self._options.irt
def DecodeOptions():
parser = optparse.OptionParser()
parser.add_option('--output_dir', help='Output directory for log files')
parser.add_option('--gdb', help='Filename of GDB')
parser.add_option('--irt', help='Filename of irt.nexe (optional)')
parser.add_option('--ld_so', help='Filename of dynamic linker (optional)')
parser.add_option('--library_path',
help='Directory containing dynamic libraries, '
'if using dynamic linking (optional)')
parser.add_option('--nexe', help='Filename of main NaCl executable')
parser.add_option('--nexe_args', help='Comma-separated list of arguments')
return parser.parse_args()
def Main():
global g_options
global g_sel_ldr_command
g_options, g_sel_ldr_command = DecodeOptions()
sys.argv = [sys.argv[0]]
unittest.main()
class GdbTest(unittest.TestCase):
"""Base class for tests of gdb, assumes a single sel_ldr + gdb."""
def GetTestName(self):
parts = self.id().split('.')
return parts[-1][len('test_'):]
def AssertSelLdrExits(self, expected_returncode=RETURNCODE_KILL):
self.sel_ldr.wait()
self.assertEqual(expected_returncode, self.sel_ldr.returncode)
def LaunchSelLdr(self):
self.sel_ldr = LaunchSelLdr(
g_sel_ldr_command, g_options, self.GetTestName())
def LaunchGdb(self):
try:
self.gdb = Gdb(g_options, self.GetTestName())
self.gdb.Connect()
except:
KillProcess(self.sel_ldr)
raise
def setUp(self):
self.LaunchSelLdr()
self.LaunchGdb()
def tearDown(self):
try:
if self.gdb:
self.gdb.Quit()
self.gdb.Wait()
self.AssertSelLdrExits()
finally:
if self.gdb:
self.gdb.KillProcess()
KillProcess(self.sel_ldr)
|
|
import os
import random
import time
import hashlib
import warnings
from tempfile import mkdtemp
from shutil import rmtree
from six.moves.urllib.parse import urlparse
from six import BytesIO
from twisted.trial import unittest
from twisted.internet import defer
from scrapy.pipelines.files import FilesPipeline, FSFilesStore, S3FilesStore, GCSFilesStore
from scrapy.item import Item, Field
from scrapy.http import Request, Response
from scrapy.settings import Settings
from scrapy.utils.python import to_bytes
from scrapy.utils.test import assert_aws_environ, get_s3_content_and_delete
from scrapy.utils.test import assert_gcs_environ, get_gcs_content_and_delete
from scrapy.utils.boto import is_botocore
from tests import mock
def _mocked_download_func(request, info):
response = request.meta.get('response')
return response() if callable(response) else response
class FilesPipelineTestCase(unittest.TestCase):
def setUp(self):
self.tempdir = mkdtemp()
self.pipeline = FilesPipeline.from_settings(Settings({'FILES_STORE': self.tempdir}))
self.pipeline.download_func = _mocked_download_func
self.pipeline.open_spider(None)
def tearDown(self):
rmtree(self.tempdir)
def test_file_path(self):
file_path = self.pipeline.file_path
self.assertEqual(file_path(Request("https://dev.mydeco.com/mydeco.pdf")),
'full/c9b564df929f4bc635bdd19fde4f3d4847c757c5.pdf')
self.assertEqual(file_path(Request("http://www.maddiebrown.co.uk///catalogue-items//image_54642_12175_95307.txt")),
'full/4ce274dd83db0368bafd7e406f382ae088e39219.txt')
self.assertEqual(file_path(Request("https://dev.mydeco.com/two/dirs/with%20spaces%2Bsigns.doc")),
'full/94ccc495a17b9ac5d40e3eabf3afcb8c2c9b9e1a.doc')
self.assertEqual(file_path(Request("http://www.dfsonline.co.uk/get_prod_image.php?img=status_0907_mdm.jpg")),
'full/4507be485f38b0da8a0be9eb2e1dfab8a19223f2.jpg')
self.assertEqual(file_path(Request("http://www.dorma.co.uk/images/product_details/2532/")),
'full/97ee6f8a46cbbb418ea91502fd24176865cf39b2')
self.assertEqual(file_path(Request("http://www.dorma.co.uk/images/product_details/2532")),
'full/244e0dd7d96a3b7b01f54eded250c9e272577aa1')
self.assertEqual(file_path(Request("http://www.dorma.co.uk/images/product_details/2532"),
response=Response("http://www.dorma.co.uk/images/product_details/2532"),
info=object()),
'full/244e0dd7d96a3b7b01f54eded250c9e272577aa1')
def test_fs_store(self):
assert isinstance(self.pipeline.store, FSFilesStore)
self.assertEqual(self.pipeline.store.basedir, self.tempdir)
path = 'some/image/key.jpg'
fullpath = os.path.join(self.tempdir, 'some', 'image', 'key.jpg')
self.assertEqual(self.pipeline.store._get_filesystem_path(path), fullpath)
@defer.inlineCallbacks
def test_file_not_expired(self):
item_url = "http://example.com/file.pdf"
item = _create_item_with_files(item_url)
patchers = [
mock.patch.object(FilesPipeline, 'inc_stats', return_value=True),
mock.patch.object(FSFilesStore, 'stat_file', return_value={
'checksum': 'abc', 'last_modified': time.time()}),
mock.patch.object(FilesPipeline, 'get_media_requests',
return_value=[_prepare_request_object(item_url)])
]
for p in patchers:
p.start()
result = yield self.pipeline.process_item(item, None)
self.assertEqual(result['files'][0]['checksum'], 'abc')
for p in patchers:
p.stop()
@defer.inlineCallbacks
def test_file_expired(self):
item_url = "http://example.com/file2.pdf"
item = _create_item_with_files(item_url)
patchers = [
mock.patch.object(FSFilesStore, 'stat_file', return_value={
'checksum': 'abc',
'last_modified': time.time() - (self.pipeline.expires * 60 * 60 * 24 * 2)}),
mock.patch.object(FilesPipeline, 'get_media_requests',
return_value=[_prepare_request_object(item_url)]),
mock.patch.object(FilesPipeline, 'inc_stats', return_value=True)
]
for p in patchers:
p.start()
result = yield self.pipeline.process_item(item, None)
self.assertNotEqual(result['files'][0]['checksum'], 'abc')
for p in patchers:
p.stop()
class DeprecatedFilesPipeline(FilesPipeline):
def file_key(self, url):
media_guid = hashlib.sha1(to_bytes(url)).hexdigest()
media_ext = os.path.splitext(url)[1]
return 'empty/%s%s' % (media_guid, media_ext)
class DeprecatedFilesPipelineTestCase(unittest.TestCase):
def setUp(self):
self.tempdir = mkdtemp()
def init_pipeline(self, pipeline_class):
self.pipeline = pipeline_class.from_settings(Settings({'FILES_STORE': self.tempdir}))
self.pipeline.download_func = _mocked_download_func
self.pipeline.open_spider(None)
def test_default_file_key_method(self):
self.init_pipeline(FilesPipeline)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.assertEqual(self.pipeline.file_key("https://dev.mydeco.com/mydeco.pdf"),
'full/c9b564df929f4bc635bdd19fde4f3d4847c757c5.pdf')
self.assertEqual(len(w), 1)
self.assertTrue('file_key(url) method is deprecated' in str(w[-1].message))
def test_overridden_file_key_method(self):
self.init_pipeline(DeprecatedFilesPipeline)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.assertEqual(self.pipeline.file_path(Request("https://dev.mydeco.com/mydeco.pdf")),
'empty/c9b564df929f4bc635bdd19fde4f3d4847c757c5.pdf')
self.assertEqual(len(w), 1)
self.assertTrue('file_key(url) method is deprecated' in str(w[-1].message))
def tearDown(self):
rmtree(self.tempdir)
class FilesPipelineTestCaseFields(unittest.TestCase):
def test_item_fields_default(self):
class TestItem(Item):
name = Field()
file_urls = Field()
files = Field()
for cls in TestItem, dict:
url = 'http://www.example.com/files/1.txt'
item = cls({'name': 'item1', 'file_urls': [url]})
pipeline = FilesPipeline.from_settings(Settings({'FILES_STORE': 's3://example/files/'}))
requests = list(pipeline.get_media_requests(item, None))
self.assertEqual(requests[0].url, url)
results = [(True, {'url': url})]
pipeline.item_completed(results, item, None)
self.assertEqual(item['files'], [results[0][1]])
def test_item_fields_override_settings(self):
class TestItem(Item):
name = Field()
files = Field()
stored_file = Field()
for cls in TestItem, dict:
url = 'http://www.example.com/files/1.txt'
item = cls({'name': 'item1', 'files': [url]})
pipeline = FilesPipeline.from_settings(Settings({
'FILES_STORE': 's3://example/files/',
'FILES_URLS_FIELD': 'files',
'FILES_RESULT_FIELD': 'stored_file'
}))
requests = list(pipeline.get_media_requests(item, None))
self.assertEqual(requests[0].url, url)
results = [(True, {'url': url})]
pipeline.item_completed(results, item, None)
self.assertEqual(item['stored_file'], [results[0][1]])
class FilesPipelineTestCaseCustomSettings(unittest.TestCase):
default_cls_settings = {
"EXPIRES": 90,
"FILES_URLS_FIELD": "file_urls",
"FILES_RESULT_FIELD": "files"
}
file_cls_attr_settings_map = {
("EXPIRES", "FILES_EXPIRES", "expires"),
("FILES_URLS_FIELD", "FILES_URLS_FIELD", "files_urls_field"),
("FILES_RESULT_FIELD", "FILES_RESULT_FIELD", "files_result_field")
}
def setUp(self):
self.tempdir = mkdtemp()
def tearDown(self):
rmtree(self.tempdir)
def _generate_fake_settings(self, prefix=None):
def random_string():
return "".join([chr(random.randint(97, 123)) for _ in range(10)])
settings = {
"FILES_EXPIRES": random.randint(100, 1000),
"FILES_URLS_FIELD": random_string(),
"FILES_RESULT_FIELD": random_string(),
"FILES_STORE": self.tempdir
}
if not prefix:
return settings
return {prefix.upper() + "_" + k if k != "FILES_STORE" else k: v for k, v in settings.items()}
def _generate_fake_pipeline(self):
class UserDefinedFilePipeline(FilesPipeline):
EXPIRES = 1001
FILES_URLS_FIELD = "alfa"
FILES_RESULT_FIELD = "beta"
return UserDefinedFilePipeline
def test_different_settings_for_different_instances(self):
"""
If there are different instances with different settings they should keep
different settings.
"""
custom_settings = self._generate_fake_settings()
another_pipeline = FilesPipeline.from_settings(Settings(custom_settings))
one_pipeline = FilesPipeline(self.tempdir)
for pipe_attr, settings_attr, pipe_ins_attr in self.file_cls_attr_settings_map:
default_value = self.default_cls_settings[pipe_attr]
self.assertEqual(getattr(one_pipeline, pipe_attr), default_value)
custom_value = custom_settings[settings_attr]
self.assertNotEqual(default_value, custom_value)
self.assertEqual(getattr(another_pipeline, pipe_ins_attr), custom_value)
def test_subclass_attributes_preserved_if_no_settings(self):
"""
If subclasses override class attributes and there are no special settings those values should be kept.
"""
pipe_cls = self._generate_fake_pipeline()
pipe = pipe_cls.from_settings(Settings({"FILES_STORE": self.tempdir}))
for pipe_attr, settings_attr, pipe_ins_attr in self.file_cls_attr_settings_map:
custom_value = getattr(pipe, pipe_ins_attr)
self.assertNotEqual(custom_value, self.default_cls_settings[pipe_attr])
self.assertEqual(getattr(pipe, pipe_ins_attr), getattr(pipe, pipe_attr))
def test_subclass_attrs_preserved_custom_settings(self):
"""
If file settings are defined but they are not defined for subclass
settings should be preserved.
"""
pipeline_cls = self._generate_fake_pipeline()
settings = self._generate_fake_settings()
pipeline = pipeline_cls.from_settings(Settings(settings))
for pipe_attr, settings_attr, pipe_ins_attr in self.file_cls_attr_settings_map:
value = getattr(pipeline, pipe_ins_attr)
setting_value = settings.get(settings_attr)
self.assertNotEqual(value, self.default_cls_settings[pipe_attr])
self.assertEqual(value, setting_value)
def test_no_custom_settings_for_subclasses(self):
"""
If there are no settings for subclass and no subclass attributes, pipeline should use
attributes of base class.
"""
class UserDefinedFilesPipeline(FilesPipeline):
pass
user_pipeline = UserDefinedFilesPipeline.from_settings(Settings({"FILES_STORE": self.tempdir}))
for pipe_attr, settings_attr, pipe_ins_attr in self.file_cls_attr_settings_map:
# Values from settings for custom pipeline should be set on pipeline instance.
custom_value = self.default_cls_settings.get(pipe_attr.upper())
self.assertEqual(getattr(user_pipeline, pipe_ins_attr), custom_value)
def test_custom_settings_for_subclasses(self):
"""
If there are custom settings for subclass and NO class attributes, pipeline should use custom
settings.
"""
class UserDefinedFilesPipeline(FilesPipeline):
pass
prefix = UserDefinedFilesPipeline.__name__.upper()
settings = self._generate_fake_settings(prefix=prefix)
user_pipeline = UserDefinedFilesPipeline.from_settings(Settings(settings))
for pipe_attr, settings_attr, pipe_inst_attr in self.file_cls_attr_settings_map:
# Values from settings for custom pipeline should be set on pipeline instance.
custom_value = settings.get(prefix + "_" + settings_attr)
self.assertNotEqual(custom_value, self.default_cls_settings[pipe_attr])
self.assertEqual(getattr(user_pipeline, pipe_inst_attr), custom_value)
def test_custom_settings_and_class_attrs_for_subclasses(self):
"""
If there are custom settings for subclass AND class attributes
setting keys are preferred and override attributes.
"""
pipeline_cls = self._generate_fake_pipeline()
prefix = pipeline_cls.__name__.upper()
settings = self._generate_fake_settings(prefix=prefix)
user_pipeline = pipeline_cls.from_settings(Settings(settings))
for pipe_cls_attr, settings_attr, pipe_inst_attr in self.file_cls_attr_settings_map:
custom_value = settings.get(prefix + "_" + settings_attr)
self.assertNotEqual(custom_value, self.default_cls_settings[pipe_cls_attr])
self.assertEqual(getattr(user_pipeline, pipe_inst_attr), custom_value)
def test_cls_attrs_with_DEFAULT_prefix(self):
class UserDefinedFilesPipeline(FilesPipeline):
DEFAULT_FILES_RESULT_FIELD = "this"
DEFAULT_FILES_URLS_FIELD = "that"
pipeline = UserDefinedFilesPipeline.from_settings(Settings({"FILES_STORE": self.tempdir}))
self.assertEqual(pipeline.files_result_field, "this")
self.assertEqual(pipeline.files_urls_field, "that")
def test_user_defined_subclass_default_key_names(self):
"""Test situation when user defines subclass of FilesPipeline,
but uses attribute names for default pipeline (without prefixing
them with pipeline class name).
"""
settings = self._generate_fake_settings()
class UserPipe(FilesPipeline):
pass
pipeline_cls = UserPipe.from_settings(Settings(settings))
for pipe_attr, settings_attr, pipe_inst_attr in self.file_cls_attr_settings_map:
expected_value = settings.get(settings_attr)
self.assertEqual(getattr(pipeline_cls, pipe_inst_attr),
expected_value)
class TestS3FilesStore(unittest.TestCase):
@defer.inlineCallbacks
def test_persist(self):
assert_aws_environ()
uri = os.environ.get('S3_TEST_FILE_URI')
if not uri:
raise unittest.SkipTest("No S3 URI available for testing")
data = b"TestS3FilesStore: \xe2\x98\x83"
buf = BytesIO(data)
meta = {'foo': 'bar'}
path = ''
store = S3FilesStore(uri)
yield store.persist_file(
path, buf, info=None, meta=meta,
headers={'Content-Type': 'image/png'})
s = yield store.stat_file(path, info=None)
self.assertIn('last_modified', s)
self.assertIn('checksum', s)
self.assertEqual(s['checksum'], '3187896a9657a28163abb31667df64c8')
u = urlparse(uri)
content, key = get_s3_content_and_delete(
u.hostname, u.path[1:], with_key=True)
self.assertEqual(content, data)
if is_botocore():
self.assertEqual(key['Metadata'], {'foo': 'bar'})
self.assertEqual(
key['CacheControl'], S3FilesStore.HEADERS['Cache-Control'])
self.assertEqual(key['ContentType'], 'image/png')
else:
self.assertEqual(key.metadata, {'foo': 'bar'})
self.assertEqual(
key.cache_control, S3FilesStore.HEADERS['Cache-Control'])
self.assertEqual(key.content_type, 'image/png')
class TestGCSFilesStore(unittest.TestCase):
@defer.inlineCallbacks
def test_persist(self):
assert_gcs_environ()
uri = os.environ.get('GCS_TEST_FILE_URI')
if not uri:
raise unittest.SkipTest("No GCS URI available for testing")
data = b"TestGCSFilesStore: \xe2\x98\x83"
buf = BytesIO(data)
meta = {'foo': 'bar'}
path = 'full/filename'
store = GCSFilesStore(uri)
store.POLICY = 'authenticatedRead'
expected_policy = {'role': 'READER', 'entity': 'allAuthenticatedUsers'}
yield store.persist_file(path, buf, info=None, meta=meta, headers=None)
s = yield store.stat_file(path, info=None)
self.assertIn('last_modified', s)
self.assertIn('checksum', s)
self.assertEqual(s['checksum'], 'zc2oVgXkbQr2EQdSdw3OPA==')
u = urlparse(uri)
content, acl, blob = get_gcs_content_and_delete(u.hostname, u.path[1:]+path)
self.assertEqual(content, data)
self.assertEqual(blob.metadata, {'foo': 'bar'})
self.assertEqual(blob.cache_control, GCSFilesStore.CACHE_CONTROL)
self.assertEqual(blob.content_type, 'application/octet-stream')
self.assertIn(expected_policy, acl)
class ItemWithFiles(Item):
file_urls = Field()
files = Field()
def _create_item_with_files(*files):
item = ItemWithFiles()
item['file_urls'] = files
return item
def _prepare_request_object(item_url):
return Request(
item_url,
meta={'response': Response(item_url, status=200, body=b'data')})
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'
}
DOCUMENTATION = '''
---
module: bigip_routedomain
short_description: Manage route domains on a BIG-IP
description:
- Manage route domains on a BIG-IP
version_added: "2.2"
options:
bwc_policy:
description:
- The bandwidth controller for the route domain.
connection_limit:
description:
- The maximum number of concurrent connections allowed for the
route domain. Setting this to C(0) turns off connection limits.
description:
description:
- Specifies descriptive text that identifies the route domain.
flow_eviction_policy:
description:
- The eviction policy to use with this route domain. Apply an eviction
policy to provide customized responses to flow overflows and slow
flows on the route domain.
id:
description:
- The unique identifying integer representing the route domain.
required: true
parent:
description:
Specifies the route domain the system searches when it cannot
find a route in the configured domain.
required: false
partition:
description:
- Partition to create the route domain on. Partitions cannot be updated
once they are created.
required: false
default: Common
routing_protocol:
description:
- Dynamic routing protocols for the system to use in the route domain.
choices:
- BFD
- BGP
- IS-IS
- OSPFv2
- OSPFv3
- PIM
- RIP
- RIPng
service_policy:
description:
- Service policy to associate with the route domain.
state:
description:
- Whether the route domain should exist or not.
required: false
default: present
choices:
- present
- absent
strict:
description:
- Specifies whether the system enforces cross-routing restrictions
or not.
choices:
- enabled
- disabled
vlans:
description:
- VLANs for the system to use in the route domain
notes:
- Requires the f5-sdk Python package on the host. This is as easy as
pip install f5-sdk.
extends_documentation_fragment: f5
requirements:
- f5-sdk
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Create a route domain
bigip_routedomain:
id: "1234"
password: "secret"
server: "lb.mydomain.com"
state: "present"
user: "admin"
delegate_to: localhost
- name: Set VLANs on the route domain
bigip_routedomain:
id: "1234"
password: "secret"
server: "lb.mydomain.com"
state: "present"
user: "admin"
vlans:
- net1
- foo
delegate_to: localhost
'''
RETURN = '''
id:
description: The ID of the route domain that was changed
returned: changed
type: int
sample: 2
description:
description: The description of the route domain
returned: changed
type: string
sample: "route domain foo"
strict:
description: The new strict isolation setting
returned: changed
type: string
sample: "enabled"
parent:
description: The new parent route domain
returned: changed
type: int
sample: 0
vlans:
description: List of new VLANs the route domain is applied to
returned: changed
type: list
sample: ['/Common/http-tunnel', '/Common/socks-tunnel']
routing_protocol:
description: List of routing protocols applied to the route domain
returned: changed
type: list
sample: ['bfd', 'bgp']
bwc_policy:
description: The new bandwidth controller
returned: changed
type: string
sample: /Common/foo
connection_limit:
description: The new connection limit for the route domain
returned: changed
type: integer
sample: 100
flow_eviction_policy:
description: The new eviction policy to use with this route domain
returned: changed
type: string
sample: /Common/default-eviction-policy
service_policy:
description: The new service policy to use with this route domain
returned: changed
type: string
sample: /Common-my-service-policy
'''
try:
from f5.bigip import ManagementRoot
from icontrol.session import iControlUnexpectedHTTPError
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
PROTOCOLS = [
'BFD', 'BGP', 'IS-IS', 'OSPFv2', 'OSPFv3', 'PIM', 'RIP', 'RIPng'
]
STRICTS = ['enabled', 'disabled']
class BigIpRouteDomain(object):
def __init__(self, *args, **kwargs):
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
# The params that change in the module
self.cparams = dict()
kwargs['name'] = str(kwargs['id'])
# Stores the params that are sent to the module
self.params = kwargs
self.api = ManagementRoot(kwargs['server'],
kwargs['user'],
kwargs['password'],
port=kwargs['server_port'],
token=True)
def absent(self):
if not self.exists():
return False
if self.params['check_mode']:
return True
rd = self.api.tm.net.route_domains.route_domain.load(
name=self.params['name'],
partition=self.params['partition']
)
rd.delete()
if self.exists():
raise F5ModuleError("Failed to delete the route domain")
else:
return True
def present(self):
if self.exists():
return self.update()
else:
if self.params['check_mode']:
return True
return self.create()
def read(self):
"""Read information and transform it
The values that are returned by BIG-IP in the f5-sdk can have encoding
attached to them as well as be completely missing in some cases.
Therefore, this method will transform the data from the BIG-IP into a
format that is more easily consumable by the rest of the class and the
parameters that are supported by the module.
"""
p = dict()
r = self.api.tm.net.route_domains.route_domain.load(
name=self.params['name'],
partition=self.params['partition']
)
p['id'] = int(r.id)
p['name'] = str(r.name)
if hasattr(r, 'connectionLimit'):
p['connection_limit'] = int(r.connectionLimit)
if hasattr(r, 'description'):
p['description'] = str(r.description)
if hasattr(r, 'strict'):
p['strict'] = str(r.strict)
if hasattr(r, 'parent'):
p['parent'] = r.parent
if hasattr(r, 'vlans'):
p['vlans'] = list(set([str(x) for x in r.vlans]))
if hasattr(r, 'routingProtocol'):
p['routing_protocol'] = list(set([str(x) for x in r.routingProtocol]))
if hasattr(r, 'flowEvictionPolicy'):
p['flow_eviction_policy'] = str(r.flowEvictionPolicy)
if hasattr(r, 'bwcPolicy'):
p['bwc_policy'] = str(r.bwcPolicy)
if hasattr(r, 'servicePolicy'):
p['service_policy'] = str(r.servicePolicy)
return p
def domains(self):
result = []
domains = self.api.tm.net.route_domains.get_collection()
for domain in domains:
# Just checking for the addition of the partition here for
# different versions of BIG-IP
if '/' + self.params['partition'] + '/' in domain.name:
result.append(domain.name)
else:
full_name = '/%s/%s' % (self.params['partition'], domain.name)
result.append(full_name)
return result
def create(self):
params = dict()
params['id'] = self.params['id']
params['name'] = self.params['name']
params['partition'] = self.params['partition']
partition = self.params['partition']
description = self.params['description']
strict = self.params['strict']
parent = self.params['parent']
bwc_policy = self.params['bwc_policy']
vlans = self.params['vlans']
routing_protocol = self.params['routing_protocol']
connection_limit = self.params['connection_limit']
flow_eviction_policy = self.params['flow_eviction_policy']
service_policy = self.params['service_policy']
if description is not None:
params['description'] = description
if strict is not None:
params['strict'] = strict
if parent is not None:
parent = '/%s/%s' % (partition, parent)
if parent in self.domains():
params['parent'] = parent
else:
raise F5ModuleError(
"The parent route domain was not found"
)
if bwc_policy is not None:
policy = '/%s/%s' % (partition, bwc_policy)
params['bwcPolicy'] = policy
if vlans is not None:
params['vlans'] = []
for vlan in vlans:
vname = '/%s/%s' % (partition, vlan)
params['vlans'].append(vname)
if routing_protocol is not None:
params['routingProtocol'] = []
for protocol in routing_protocol:
if protocol in PROTOCOLS:
params['routingProtocol'].append(protocol)
else:
raise F5ModuleError(
"routing_protocol must be one of: %s" % (PROTOCOLS)
)
if connection_limit is not None:
params['connectionLimit'] = connection_limit
if flow_eviction_policy is not None:
policy = '/%s/%s' % (partition, flow_eviction_policy)
params['flowEvictionPolicy'] = policy
if service_policy is not None:
policy = '/%s/%s' % (partition, service_policy)
params['servicePolicy'] = policy
self.api.tm.net.route_domains.route_domain.create(**params)
exists = self.api.tm.net.route_domains.route_domain.exists(
name=self.params['name'],
partition=self.params['partition']
)
if exists:
return True
else:
raise F5ModuleError(
"An error occurred while creating the route domain"
)
def update(self):
changed = False
params = dict()
current = self.read()
check_mode = self.params['check_mode']
partition = self.params['partition']
description = self.params['description']
strict = self.params['strict']
parent = self.params['parent']
bwc_policy = self.params['bwc_policy']
vlans = self.params['vlans']
routing_protocol = self.params['routing_protocol']
connection_limit = self.params['connection_limit']
flow_eviction_policy = self.params['flow_eviction_policy']
service_policy = self.params['service_policy']
if description is not None:
if 'description' in current:
if description != current['description']:
params['description'] = description
else:
params['description'] = description
if strict is not None:
if strict != current['strict']:
params['strict'] = strict
if parent is not None:
parent = '/%s/%s' % (partition, parent)
if 'parent' in current:
if parent != current['parent']:
params['parent'] = parent
else:
params['parent'] = parent
if bwc_policy is not None:
policy = '/%s/%s' % (partition, bwc_policy)
if 'bwc_policy' in current:
if policy != current['bwc_policy']:
params['bwcPolicy'] = policy
else:
params['bwcPolicy'] = policy
if vlans is not None:
tmp = set()
for vlan in vlans:
vname = '/%s/%s' % (partition, vlan)
tmp.add(vname)
tmp = list(tmp)
if 'vlans' in current:
if tmp != current['vlans']:
params['vlans'] = tmp
else:
params['vlans'] = tmp
if routing_protocol is not None:
tmp = set()
for protocol in routing_protocol:
if protocol in PROTOCOLS:
tmp.add(protocol)
else:
raise F5ModuleError(
"routing_protocol must be one of: %s" % (PROTOCOLS)
)
tmp = list(tmp)
if 'routing_protocol' in current:
if tmp != current['routing_protocol']:
params['routingProtocol'] = tmp
else:
params['routingProtocol'] = tmp
if connection_limit is not None:
if connection_limit != current['connection_limit']:
params['connectionLimit'] = connection_limit
if flow_eviction_policy is not None:
policy = '/%s/%s' % (partition, flow_eviction_policy)
if 'flow_eviction_policy' in current:
if policy != current['flow_eviction_policy']:
params['flowEvictionPolicy'] = policy
else:
params['flowEvictionPolicy'] = policy
if service_policy is not None:
policy = '/%s/%s' % (partition, service_policy)
if 'service_policy' in current:
if policy != current['service_policy']:
params['servicePolicy'] = policy
else:
params['servicePolicy'] = policy
if params:
changed = True
self.cparams = camel_dict_to_snake_dict(params)
if check_mode:
return changed
else:
return changed
try:
rd = self.api.tm.net.route_domains.route_domain.load(
name=self.params['name'],
partition=self.params['partition']
)
rd.update(**params)
rd.refresh()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(e)
return True
def exists(self):
return self.api.tm.net.route_domains.route_domain.exists(
name=self.params['name'],
partition=self.params['partition']
)
def flush(self):
result = dict()
state = self.params['state']
if state == "present":
changed = self.present()
current = self.read()
result.update(current)
elif state == "absent":
changed = self.absent()
result.update(dict(changed=changed))
return result
def main():
argument_spec = f5_argument_spec()
meta_args = dict(
id=dict(required=True, type='int'),
description=dict(required=False, default=None),
strict=dict(required=False, default=None, choices=STRICTS),
parent=dict(required=False, type='int', default=None),
partition=dict(required=False, type='str', default='Common'),
vlans=dict(required=False, default=None, type='list'),
routing_protocol=dict(required=False, default=None, type='list'),
bwc_policy=dict(required=False, type='str', default=None),
connection_limit=dict(required=False, type='int', default=None),
flow_eviction_policy=dict(required=False, type='str', default=None),
service_policy=dict(required=False, type='str', default=None)
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
try:
obj = BigIpRouteDomain(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except F5ModuleError as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
from ansible.module_utils.f5_utils import *
if __name__ == '__main__':
main()
|
|
import sqlalchemy
from raggregate.models import DBSession
from raggregate.models.vote import Vote
from raggregate.models.submission import Submission
from raggregate.models.comment import Comment
from raggregate.models.epistle import Epistle
from raggregate.models.section import Section
from raggregate.queries import users
from raggregate.queries import submission
from raggregate.queries import section as section_queries
from raggregate.queries import subscribe as sub_queries
from raggregate.queries import motd as motd_queries
from raggregate.queries import notify as notify_queries
from raggregate.queries import general
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPFound
import re
import slugify
@view_config(renderer='new_page.mak', route_name='new_page')
@view_config(renderer='new_post.mak', route_name='new_post')
def submit(request):
s = request.session
p = request.session['safe_post']
r = request
qs = s['safe_get']
s['message'] = "Post a story."
dbsession = DBSession()
stories = None
sections = section_queries.get_sections()
new_url_text = ''
new_title_text = ''
route_name = r.matched_route.name
if route_name == 'new_page':
# require admin to load a new page form
if 'logged_in_admin' not in s or s['logged_in_admin'] == False:
return HTTPNotFound()
#if uses came in with a share button, redirect to existing discussion if there is one
if 'from' in qs and qs['from'] == 'button':
existing_post = submission.get_story_by_url_oldest(qs['url'])
if existing_post:
return HTTPFound(r.route_url('full', sub_id=existing_post.id))
new_url_text = qs['url']
if 'title' in qs:
new_title_text = qs['title']
if 'logged_in' not in s:
s['message'] = 'Sorry, you must <a href="{0}">log in</a> before you can share a link.'.format(r.route_url('login'))
return {'stories': [], 'success': False, 'code': 'ENOLOGIN'}
if p and 'title' in p:
if 'logged_in' not in s:
s['message'] = 'Sorry, please log in first'
return {'stories': [], 'success': False, 'code': 'ENOLOGIN'}
if 'section_id' not in p or p['section_id'] == '':
return {'stories': [], 'success': False, 'code': 'ENOSECTION'}
if 'url' in p and p['url'] != '' and p['url'] is not None:
p['url'] = general.strip_all_html(p['url'])
if not re.match(r'http[s]*:\/\/', p['url']):
p['url'] = 'http://' + p['url']
else:
# set to None so that NULL goes into the database
p['url'] = None
if route_name == 'new_page':
render_type = p['render_type']
slug = p['slug']
# if we can find this slug already, kill submission here.
try:
s = dbsession.query(Submission).filter(Submission.slug == slug).one()
s['message'] = 'This slug is already taken.'
success = False
except sqlalchemy.orm.exc.NoResultFound:
pass
else:
slug = ''
render_type = 'story_md'
if 'section_id' in p:
sub = Submission(p['title'][:100], p['description'], p['url'], s['users.id'], section = p['section_id'])
else:
sub = Submission(p['title'][:100], p['description'], p['url'], s['users.id'])
sub.render_type = render_type
# slug octet no longer derived from story's actual id
if slug == '':
slug = u"{title}-{uuid_first_octet}".format(
title = slugify.slugify(unicode(p['title'][:100])),
uuid_first_octet = str(general.gen_uuid())[:8])
sub.slug = slug
dbsession.add(sub)
dbsession.flush()
# add notify
if general.check_notify_default(s['users.id'], r):
notify_queries.create_notify(s['users.id'], sub.id, s['users.id'])
v = Vote(sub.id, s['users.id'], 1, "submission", None)
v.direction = 1
dbsession.add(v)
s['message'] = "Added."
try:
if request.registry.solr_conn:
# we flush here to ensure we have a vaild id object when added to solr
# we use this if statement so that the exception will be raised before
# dbsession is flushed, hence avoiding an unnecessary flush if the site
# is not using solr.
dbsession.flush()
request.registry.solr_conn.add({'id': sub.id, 'title': sub.title, 'description': sub.description})
request.registry.solr_conn.commit()
except AttributeError:
#solr is not configured for this connection
pass
return HTTPFound(r.route_url('home'))
return {'stories': stories, 'success': True, 'code': 0,
'new_url_text': new_url_text, 'new_title_text': new_title_text,
'sections': sections}
@view_config(renderer='list.mak', route_name='post')
@view_config(renderer='list.mak', route_name='list')
@view_config(renderer='list.mak', route_name='home')
def list(request):
from raggregate.queries import user_preference as up
s = request.session
p = request.session['safe_post']
r = request
qs = s['safe_get']
s['message'] = "Post a story."
dbsession = DBSession()
stories = None
filtered_section = None
section_found = False
sections = section_queries.get_sections()
direct_link = False
if s.get('users.id', None):
direct_link = True if up.get_user_prefs(s['users.id']).get('link_to_story', 'off') == 'on' else False
if r.params and 'op' in r.params:
sub_id = r.params['sub_id']
if r.params['op'] == 'del' or r.params['op'] == 'hide':
try:
story_to_del = submission.get_story_by_id(sub_id)
except sqlalchemy.orm.exc.NoResultFound:
story_to_del = None
if story_to_del:
if users.is_user_allowed_admin_action(s['users.id'], str(story_to_del.id), ):
if r.params['op'] == 'del':
story_to_del.description = "[deleted]"
story_to_del.url = "#"
story_to_del.title = "[deleted]"
story_to_del.deleted = True
dbsession.add(story_to_del)
dbsession.flush()
else:
print("Illegal deletion attempted on {0}".format(story_to_del.submitter.id))
if 'sort.default_order' in r.registry.settings:
sort = r.registry.settings['sort.default_order']
else:
# default to new sort order if server-specific setting doesn't exist
# this should only be the case on old clones; do NOT remove default_order
# from the ini just because you want new by default.
sort = 'new'
page_num = 1
per_page = 30
next_page = None
prev_page = None
# only pass through approved sort options
if 'sort' in qs:
if qs['sort'] == 'top':
sort = 'top'
if qs['sort'] == 'hot':
sort = 'hot'
if qs['sort'] == 'contro':
sort = 'contro'
if qs['sort'] == 'new':
sort = 'new'
if 'page_num' in qs:
try:
page_num = int(qs['page_num'])
except:
page_num = 1
if 'section' in qs and qs['section'] == 'all':
section = 'all'
else:
section = None
if 'section' in qs and qs['section'] != 'all' and qs['section'] != '':
section = qs['section']
try:
section = section_queries.get_section_by_name(section)
section_found = True
except sqlalchemy.orm.exc.NoResultFound:
try:
section = section_queries.get_section_by_id(section)
section_found = True
except:
from pyramid_tm import transaction
transaction.abort()
pass
# reset section variable to None if we couldn't the named section
if section_found == False:
section = None
else:
#if we did find something, set filtered_section so that we can
#reference the filtered section in the template.
filtered_section = section
if 'subscribe' in qs and isinstance(section, Section) and 'logged_in' in s:
if qs['subscribe'] == 'y':
sub_way = True
elif qs['subscribe'] == 'n':
sub_way = False
sub = sub_queries.create_subscription(s['users.id'], section.id, sub_way)
s['message'] = 'Subscription to section {0} updated'.format(section.name)
# @FIXME: make per_page configurable in a safe location
# it is probably unwise to allow this to be set in the query string
# because then a malicious user could say per_page = 10000000000
# and easily launch a DoS via that mechanism.
# if 'per_page' in qs:
# per_page = qs['per_page']
stories = submission.get_story_list(page_num = page_num, per_page = per_page, sort = sort, request = request, section = section)
max_stories = stories['max_stories']
stories = stories['stories']
# this should be split into its own def under queries.py
# as it is currently used in at least one other place
if max_stories > (page_num * per_page):
next_page = page_num + 1
if page_num > 1:
prev_page = page_num - 1
vote_dict = {}
subscribed_to_list = []
if 'logged_in' in s:
vote_dict = users.get_user_votes(s['users.id'], "on_all_submissions")
subscribed_to_list = sub_queries.get_subscribed_by_user_id(s['users.id'])
for story in stories:
#@TODO: Remember to not tally on every load once a real site deploys
story.tally_votes()
story.tally_comments()
print "\n\nsubscribed list: {0}\n\n".format(subscribed_to_list)
# Get message of the day
motd = motd_queries.get_random_message()
return {'stories': stories, 'success': True, 'code': 0, 'vote_dict': vote_dict,
'max_stories': max_stories, 'prev_page': prev_page, 'next_page': next_page,
'sections': sections,
'filtered_section': section, 'motd': motd,
'subscribed_to_list': subscribed_to_list,
'direct_link': direct_link}
@view_config(renderer='vote.mak', route_name='vote')
def vote(request):
s = request.session
p = request.session['safe_post']
dbsession = DBSession()
if 'logged_in' in s:
way = request.matchdict['way']
if way == 'up':
points = 1
elif way == 'down':
points = -1
comment_id = None
if 'target_type' in p and p['target_type'] == 'comment':
# the post comes in with comment id in sub_id spot
# here, we make sub_id the real sub_id
sub_id = submission.get_comment_parent_story(p['sub_id'])[0]
comment_id = p['sub_id']
vote_list = dbsession.query(Vote).filter(Vote.comment_id == comment_id).filter(Vote.user_id == s['users.id']).all()
else:
sub_id = p['sub_id']
vote_list = dbsession.query(Vote).filter(Vote.submission_id == p['sub_id']).filter(Vote.comment_id == None).filter(Vote.user_id == s['users.id']).all()
# find out if the user has already voted on this submission
if len(vote_list) > 0:
if vote_list[0].direction == points:
return {'message': 'You have already voted on this submission.', 'code': 'EOLDVOTE', 'success': False}
else:
dbsession.delete(vote_list[0])
v = Vote(sub_id, s['users.id'], points, p['target_type'], comment_id)
v.direction = points
dbsession = DBSession()
dbsession.add(v)
return HTTPFound(p['jump_to'])
else:
return {'message': 'Sorry, you are not logged in.', 'code': 'ENOLOGIN', 'success': False}
@view_config(renderer='full.mak', route_name='full')
def full(request):
message = ''
#@TODO: Change this to use slugs instead of literal guids
sub_id = request.matchdict['sub_id']
sub_id = submission.get_story_id_from_slug(sub_id)
dbsession = DBSession()
p = request.session['safe_post']
prm = request.session['safe_params']
s = request.session
logged_in = False
if 'logged_in' in s:
#return {'message': 'Sorry, please log in first.', 'story': {}, 'comments': {}, 'success': False, 'code': 'ENOLOGIN'}
logged_in = True
# record the comment
if 'op' in prm and prm['op'] == 'del' and logged_in:
if 'comment_id' in prm:
c = submission.get_comment_by_id(prm['comment_id'])
if users.is_user_allowed_admin_action(s['users.id'], str(c.id), ):
c.deleted = True
dbsession.add(c)
s['message'] = 'Comment deleted.'
if 'op' in prm and prm['op'] == 'edit' and logged_in:
if 'comment_id' in prm:
c = submission.get_comment_by_id(prm['comment_id'])
if users.is_user_allowed_admin_action(s['users.id'], str(c.id), ):
c.body = prm['body']
dbsession.add(c)
s['message'] = 'Comment updated.'
else:
if 'description-textarea' in request.session['safe_post'] and logged_in:
sub = submission.get_story_by_id(sub_id)
if users.is_user_allowed_admin_action(s['users.id'], str(sub.id)):
sub.description = prm['description-textarea']
dbsession.add(sub)
s['message'] = 'Description updated.'
if 'body' in request.session['safe_post'] and logged_in:
if p['parent_type'] == 'story':
in_reply_to = submission.get_story_by_id(p['comment_parent']).submitter.id
elif p['parent_type'] == 'comment':
c = submission.get_comment_by_id(p['comment_parent'])
in_reply_to = c.user_id
c = Comment(sub_id, s['users.id'], p['comment_parent'], prm['body'], in_reply_to = in_reply_to)
dbsession.add(c)
dbsession.flush()
# if enabled default, subscribe user to own comment.
# @TODO: make a preference for users to toggle this
if general.check_notify_default(s['users.id'], request):
notify_queries.create_notify(s['users.id'], c.id, s['users.id'])
v = Vote(sub_id, s['users.id'], 1, "comment", c.id)
v.direction = 1
dbsession.add(v)
notify_queries.fire_to_listeners(p['comment_parent'], s['users.id'], c.id, request)
s['message'] = 'Comment added.'
#@TODO: Stop using SA queries in views, move them to individual models
story = submission.get_story_by_id(sub_id)
story.tally_votes()
story_vote_dict = {}
comment_vote_dict = {}
if logged_in:
# see queries.py; these two should not be separate. #@FIXME
story_vote_dict = users.get_user_votes(s['users.id'], "on_submission", sub_id)
comment_vote_dict = users.get_user_votes(s['users.id'], "on_submissions_comments", sub_id)
page_num = 1
per_page = 30
if 'sort.comment_default_order' in request.registry.settings:
sort = request.registry.settings['sort.comment_default_order']
else:
# do NOT change the hardcoded default, change in the ini as above
sort = 'top'
next_page = None
prev_page = None
if 'comment_sort' in prm:
sort = prm['comment_sort']
if 'page_num' in prm:
try:
page_num = int(prm['page_num'])
except:
page_num = 1
# comments returns a dict; see queries.py
if 'comment_perma' not in prm:
comments = submission.get_comments(sub_id, organize_parentage=True, page_num = page_num, per_page = per_page, sort = sort)
else:
comments = submission.get_comments(sub_id, organize_parentage=True, page_num = page_num, per_page = per_page, sort = sort, target = 'comment', target_id = prm['comment_perma'])
for c in comments['comments']:
#@TODO: Don't do this on every load on a real deployment
c.tally_votes()
if c.deleted:
c.body = '[deleted]'
if page_num > 1:
prev_page = page_num - 1
if comments['max_comments'] > (page_num * per_page):
next_page = page_num + 1
return {'story': story, 'comments': comments, 'success': True, 'code': 0, 'story_vote_dict': story_vote_dict,
'comment_vote_dict': comment_vote_dict, 'next_page': next_page, 'prev_page': prev_page,
'render_type': story.render_type, }
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""BigMLer - cluster subcommand processing dispatching
"""
from __future__ import absolute_import
import sys
import os
import bigml.api
import bigmler.utils as u
import bigmler.resources as r
import bigmler.pre_model_steps as pms
import bigmler.processing.args as a
import bigmler.processing.clusters as pc
import bigmler.processing.sources as ps
import bigmler.processing.datasets as pd
from bigmler.defaults import DEFAULTS_FILE
from bigmler.centroid import centroid, remote_centroid
from bigmler.reports import clear_reports, upload_reports
from bigmler.command import get_stored_command
from bigmler.dispatcher import (SESSIONS_LOG, command_handling,
clear_log_files, get_test_dataset)
COMMAND_LOG = u".bigmler_cluster"
DIRS_LOG = u".bigmler_cluster_dir_stack"
LOG_FILES = [COMMAND_LOG, DIRS_LOG, u.NEW_DIRS_LOG]
MINIMUM_MODEL = "full=false"
DEFAULT_OUTPUT = u"centroids.csv"
def cluster_dispatcher(args=sys.argv[1:]):
"""Parses command line and calls the different processing functions
"""
# If --clear-logs the log files are cleared
if "--clear-logs" in args:
clear_log_files(LOG_FILES)
command = command_handling(args, COMMAND_LOG)
# Parses command line arguments.
command_args = a.parse_and_check(command)
resume = command_args.resume
if command_args.resume:
command_args, session_file, output_dir = get_stored_command(
args, command_args.debug, command_log=COMMAND_LOG,
dirs_log=DIRS_LOG, sessions_log=SESSIONS_LOG)
if command_args.predictions is None:
command_args.predictions = os.path.join(output_dir,
DEFAULT_OUTPUT)
else:
if command_args.output_dir is None:
command_args.output_dir = a.NOW
if command_args.predictions is None:
command_args.predictions = os.path.join(command_args.output_dir,
DEFAULT_OUTPUT)
if len(os.path.dirname(command_args.predictions).strip()) == 0:
command_args.predictions = os.path.join(command_args.output_dir,
command_args.predictions)
directory = u.check_dir(command_args.predictions)
session_file = os.path.join(directory, SESSIONS_LOG)
u.log_message(command.command + "\n", log_file=session_file)
try:
defaults_file = open(DEFAULTS_FILE, 'r')
contents = defaults_file.read()
defaults_file.close()
defaults_copy = open(os.path.join(directory, DEFAULTS_FILE),
'w', 0)
defaults_copy.write(contents)
defaults_copy.close()
except IOError:
pass
u.sys_log_message(u"%s\n" % os.path.abspath(directory),
log_file=DIRS_LOG)
# Creates the corresponding api instance
api = a.get_api_instance(command_args, u.check_dir(session_file))
# Selects the action to perform
if (a.has_train(command_args) or a.has_test(command_args)
or command_args.cluster_datasets is not None):
output_args = a.get_output_args(api, command_args, resume)
a.transform_args(command_args, command.flags, api,
command.user_defaults)
compute_output(**output_args)
u.log_message("_" * 80 + "\n", log_file=session_file)
def compute_output(api, args):
""" Creates one or more models using the `training_set` or uses the ids
of previously created BigML models to make predictions for the `test_set`.
"""
cluster = None
clusters = None
# no multi-label support at present
# variables from command-line options
resume = args.resume_
cluster_ids = args.cluster_ids_
output = args.predictions
# there's only one cluster to be generated at present
args.max_parallel_clusters = 1
# clusters cannot be published yet.
args.public_cluster = False
# It is compulsory to have a description to publish either datasets or
# clusters
if (not args.description_ and (args.public_cluster or
args.public_dataset)):
sys.exit("You should provide a description to publish.")
# When using --new-fields, it is compulsory to specify also a dataset
# id
if args.new_fields and not args.dataset:
sys.exit("To use --new-fields you must also provide a dataset id"
" to generate the new dataset from it.")
path = u.check_dir(output)
session_file = u"%s%s%s" % (path, os.sep, SESSIONS_LOG)
csv_properties = {}
# If logging is required set the file for logging
log = None
if args.log_file:
u.check_dir(args.log_file)
log = args.log_file
# If --clear_logs the log files are cleared
clear_log_files([log])
# basic pre-model step: creating or retrieving the source related info
source, resume, csv_properties, fields = pms.get_source_info(
api, args, resume, csv_properties, session_file, path, log)
# basic pre-model step: creating or retrieving the dataset related info
dataset_properties = pms.get_dataset_info(
api, args, resume, source,
csv_properties, fields, session_file, path, log)
(_, datasets, test_dataset,
resume, csv_properties, fields) = dataset_properties
if args.cluster_file:
# cluster is retrieved from the contents of the given local JSON file
cluster, csv_properties, fields = u.read_local_resource(
args.cluster_file,
csv_properties=csv_properties)
clusters = [cluster]
cluster_ids = [cluster['resource']]
else:
# cluster is retrieved from the remote object
clusters, cluster_ids, resume = pc.clusters_processing(
datasets, clusters, cluster_ids, api, args, resume, fields=fields,
session_file=session_file, path=path, log=log)
if clusters:
cluster = clusters[0]
# We update the cluster's public state if needed
if cluster:
if isinstance(cluster, basestring):
if args.cluster_datasets is None and not a.has_test(args):
query_string = MINIMUM_MODEL
else:
query_string = ''
cluster = u.check_resource(cluster, api.get_cluster,
query_string=query_string)
clusters[0] = cluster
if (args.public_cluster or
(args.shared_flag and r.shared_changed(args.shared, cluster))):
cluster_args = {}
if args.shared_flag and r.shared_changed(args.shared, cluster):
cluster_args.update(shared=args.shared)
if args.public_cluster:
cluster_args.update(r.set_publish_cluster_args(args))
if cluster_args:
cluster = r.update_cluster(cluster, cluster_args, args,
api=api, path=path,
session_file=session_file)
clusters[0] = cluster
# We get the fields of the cluster if we haven't got
# them yet and need them
if cluster and args.test_set:
fields = pc.get_cluster_fields(cluster, csv_properties, args)
# If predicting
if clusters and (a.has_test(args) or (test_dataset and args.remote)):
if test_dataset is None:
test_dataset = get_test_dataset(args)
# Remote centroids: centroids are computed as batch centroids
# in bigml.com except when --no-batch flag is set on
if args.remote and not args.no_batch:
# create test source from file
test_name = "%s - test" % args.name
if args.test_source is None:
test_properties = ps.test_source_processing(
api, args, resume, name=test_name,
session_file=session_file, path=path, log=log)
(test_source, resume,
csv_properties, test_fields) = test_properties
else:
test_source_id = bigml.api.get_source_id(args.test_source)
test_source = api.check_resource(test_source_id)
if test_dataset is None:
# create test dataset from test source
dataset_args = r.set_basic_dataset_args(args, name=test_name)
test_dataset, resume = pd.alternative_dataset_processing(
test_source, "test", dataset_args, api, args,
resume, session_file=session_file, path=path, log=log)
else:
test_dataset_id = bigml.api.get_dataset_id(test_dataset)
test_dataset = api.check_resource(test_dataset_id)
test_fields = pd.get_fields_structure(test_dataset,
csv_properties)
batch_centroid_args = r.set_batch_centroid_args(
args, fields=fields,
dataset_fields=test_fields)
remote_centroid(cluster, test_dataset, batch_centroid_args, args,
api, resume, prediction_file=output,
session_file=session_file, path=path, log=log)
else:
centroid(clusters, fields, args, session_file=session_file)
if cluster and args.cluster_datasets is not None:
cluster = api.check_resource(cluster)
centroids_info = cluster['object']['clusters']['clusters']
centroids = {centroid['name']: centroid['id']
for centroid in centroids_info}
datasets = cluster['object']['cluster_datasets']
if args.cluster_datasets == '':
centroid_ids = centroids.values()
else:
centroid_ids = [centroids[cluster_name] for cluster_name in
args.cluster_datasets_
if datasets.get(centroids[cluster_name], '') == '']
for centroid_id in centroid_ids:
dataset_args = {'centroid': centroid_id}
r.create_dataset(cluster, dataset_args, args, api=api, path=path,
session_file=session_file, log=log,
dataset_type='cluster')
if cluster and args.cluster_models is not None:
cluster = api.check_resource(cluster)
centroids_info = cluster['object']['clusters']['clusters']
centroids = {centroid['name']: centroid['id']
for centroid in centroids_info}
models = cluster['object']['cluster_models']
if args.cluster_models == '':
centroid_ids = centroids.values()
else:
centroid_ids = [centroids[cluster_name] for cluster_name in
args.cluster_models_
if models.get(centroids[cluster_name], '') == '']
for centroid_id in centroid_ids:
model_args = {'centroid': centroid_id}
r.create_model(cluster, model_args, args, api=api, path=path,
session_file=session_file, log=log,
model_type='cluster')
u.print_generated_files(path, log_file=session_file,
verbosity=args.verbosity)
if args.reports:
clear_reports(path)
if args.upload:
upload_reports(args.reports, path)
|
|
# Copyright 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import copy
import netaddr
import six
from oslo_log import log
from oslo_vmware import exceptions as vmware_exceptions
from networking_vsphere._i18n import _LI, _LW
from networking_vsphere.common import constants as dvs_const
from networking_vsphere.utils import dvs_util
from networking_vsphere.utils import spec_builder
LOG = log.getLogger(__name__)
HASHED_RULE_INFO_KEYS = [
'source_ip_prefix',
'dest_ip_prefix',
'protocol',
'direction',
'ethertype',
'port_range_min',
'port_range_max',
'source_port_range_min',
'source_port_range_max'
]
class PortConfigSpecBuilder(spec_builder.SpecBuilder):
def __init__(self, spec_factory):
super(PortConfigSpecBuilder, self).__init__(spec_factory)
self.rule_obj = self.factory.create('ns0:DvsTrafficRule')
def traffic_rule(self):
return copy.copy(self.rule_obj)
def create_spec(self, spec_type):
return self.factory.create(spec_type)
@six.add_metaclass(abc.ABCMeta)
class TrafficRuleBuilder(object):
action = 'ns0:DvsAcceptNetworkRuleAction'
direction = 'both'
reverse_class = None
_backward_port_range = (None, None)
_port_range = (None, None)
def __init__(self, spec_builder, ethertype, protocol, name=None):
self.spec_builder = spec_builder
self.rule = spec_builder.traffic_rule()
self.rule.action = self.spec_builder.create_spec(self.action)
self.ip_qualifier = self.spec_builder.create_spec(
'ns0:DvsIpNetworkRuleQualifier')
self.ethertype = ethertype
if ethertype:
any_ip = '0.0.0.0/0' if ethertype == 'IPv4' else '::/0'
self.ip_qualifier.sourceAddress = self._cidr_spec(any_ip)
self.ip_qualifier.destinationAddress = self._cidr_spec(any_ip)
self.protocol = protocol
if protocol:
int_exp = self.spec_builder.create_spec('ns0:IntExpression')
int_exp.value = dvs_const.PROTOCOL.get(protocol, protocol)
int_exp.negate = 'false'
self.ip_qualifier.protocol = int_exp
self.name = name
def reverse(self, cidr_bool):
"""Returns reversed rule"""
name = 'reversed' + ' ' + (self.name or '')
rule = self.reverse_class(self.spec_builder, self.ethertype,
self.protocol, name=name.strip())
if cidr_bool:
if (self.ethertype == 'IPv6' and self.protocol == 'ipv6-icmp' and
self.type == 134):
LOG.error(str(self.type))
rule.cidr = 'FF02::2/128'
else:
rule.cidr = self.cidr
else:
rule.cidr = '0.0.0.0/0'
rule.port_range = self.backward_port_range
rule.backward_port_range = self.port_range
return rule
def build(self, sequence):
self.rule.qualifier = [self.ip_qualifier]
self.rule.direction = self.direction
self.rule.sequence = sequence
self.name = str(sequence) + '. ' + (self.name or '')
self.name = self.name.strip()
self.rule.description = self.name.strip()
return self.rule
@property
def port_range(self):
return self._port_range
@property
def backward_port_range(self):
return self._backward_port_range
@property
def cidr(self):
return self._cidr
def _port_range_spec(self, begin, end):
if begin == end:
result = self.spec_builder.create_spec('ns0:DvsSingleIpPort')
result.portNumber = begin
else:
result = self.spec_builder.create_spec('ns0:DvsIpPortRange')
result.startPortNumber = begin
result.endPortNumber = end
return result
def _cidr_spec(self, cidr):
cidr = netaddr.IPNetwork(cidr)
result = self.spec_builder.create_spec('ns0:IpRange')
result.addressPrefix = str(cidr.ip)
result.prefixLength = str(cidr.prefixlen)
return result
def _has_port(self, min_port):
if min_port:
if self.protocol == 'icmp' or self.protocol == 'ipv6-icmp':
LOG.info(_LI('Vmware dvs driver does not support '
'"type" and "code" for ICMP/ipv6-icmp protocol.'))
return False
else:
return True
else:
return False
class IngressRule(TrafficRuleBuilder):
direction = 'incomingPackets'
def __init__(self, spec_builder, ethertype, protocol, name=None):
super(IngressRule, self).__init__(
spec_builder, ethertype, protocol, name)
self.reverse_class = EgressRule
@TrafficRuleBuilder.port_range.setter
def port_range(self, range_):
begin, end = self._port_range = range_
if begin:
self.ip_qualifier.destinationIpPort = self._port_range_spec(begin,
end)
@TrafficRuleBuilder.backward_port_range.setter
def backward_port_range(self, range_):
begin, end = self._backward_port_range = range_
if begin:
self.ip_qualifier.sourceIpPort = self._port_range_spec(begin, end)
@TrafficRuleBuilder.cidr.setter
def cidr(self, cidr):
self._cidr = cidr
if cidr:
self.ip_qualifier.sourceAddress = self._cidr_spec(cidr)
class EgressRule(TrafficRuleBuilder):
direction = 'outgoingPackets'
def __init__(self, spec_builder, ethertype, protocol, name=None):
super(EgressRule, self).__init__(
spec_builder, ethertype, protocol, name)
self.reverse_class = IngressRule
@TrafficRuleBuilder.port_range.setter
def port_range(self, range_):
begin, end = self._port_range = range_
if begin:
self.ip_qualifier.destinationIpPort = self._port_range_spec(begin,
end)
@TrafficRuleBuilder.backward_port_range.setter
def backward_port_range(self, range_):
begin, end = self._backward_port_range = range_
if begin:
self.ip_qualifier.sourceIpPort = self._port_range_spec(begin, end)
@TrafficRuleBuilder.cidr.setter
def cidr(self, cidr):
self._cidr = cidr
if cidr:
self.ip_qualifier.destinationAddress = self._cidr_spec(cidr)
class DropAllRule(TrafficRuleBuilder):
action = 'ns0:DvsDropNetworkRuleAction'
@dvs_util.wrap_retry
def update_port_rules(dvs, ports):
try:
builder = PortConfigSpecBuilder(dvs.connection.vim.client.factory)
port_config_list = []
hashed_rules = {}
for port in ports:
key = port.get('binding:vif_details', {}).get('dvs_port_key')
if key:
port_config = port_configuration(
builder, key, port['security_group_rules'], hashed_rules)
port_config_list.append(port_config)
if port_config_list:
task = dvs.connection.invoke_api(
dvs.connection.vim,
'ReconfigureDVPort_Task',
dvs._dvs,
port=port_config_list
)
dvs.connection.wait_for_task(task)
except vmware_exceptions.VimException as e:
if 'The object or item referred to could not be found' in str(e):
pass
else:
raise exceptions.wrap_wmvare_vim_exception(e)
def port_configuration(builder, port_key, sg_rules, hashed_rules):
rules = []
seq = 0
reverse_seq = len(sg_rules) * 10
for rule_info in sg_rules:
rule_hash = _get_rule_hash(rule_info)
if rule_hash in hashed_rules:
rule, reverse_rule = hashed_rules[rule_hash]
built_rule = copy.copy(rule)
built_reverse_rule = copy.copy(reverse_rule)
built_rule.description = str(seq) + '. regular'
built_rule.sequence = seq
built_reverse_rule.description = '%s. reversed %s' % (
str(reverse_seq), built_rule.description)
built_reverse_rule.sequence = reverse_seq
else:
rule = _create_rule(builder, rule_info, name='regular')
built_rule = rule.build(seq)
cidr_revert = not _rule_excepted(rule)
reverse_rule = rule.reverse(cidr_revert)
built_reverse_rule = reverse_rule.build(reverse_seq)
hashed_rules[rule_hash] = (built_rule, built_reverse_rule)
rules.extend([built_rule, built_reverse_rule])
seq += 10
reverse_seq += 10
seq = len(rules) * 10
rules.append(DropAllRule(builder, 'IPv4', None,
name='drop all').build(seq))
seq += 10
rules.append(DropAllRule(builder, 'IPv6', None,
name='drop all').build(seq))
filter_policy = builder.filter_policy(rules)
setting = builder.port_setting()
setting.filterPolicy = filter_policy
spec = builder.port_config_spec(setting=setting)
spec.key = port_key
return spec
def _rule_excepted(rule):
if rule.direction == 'incomingPackets' and rule.protocol == 'udp':
if (rule.ethertype == 'IPv4' and rule.port_range == (68, 68) and
rule.backward_port_range == (67, 67)):
return True
if (rule.ethertype == 'IPv6' and rule.port_range == (546, 546) and
rule.backward_port_range == (547, 547)):
return True
return False
def _get_rule_hash(rule):
rule_tokens = []
for k in sorted(rule):
if k in HASHED_RULE_INFO_KEYS:
rule_tokens.append('%s:%s' % (k, rule[k]))
return ','.join(rule_tokens)
def _create_rule(builder, rule_info, ip=None, name=None):
if rule_info['direction'] == 'ingress':
rule_class = IngressRule
cidr = rule_info.get('source_ip_prefix')
else:
rule_class = EgressRule
cidr = rule_info.get('dest_ip_prefix')
rule = rule_class(
spec_builder=builder,
ethertype=rule_info['ethertype'],
protocol=rule_info.get('protocol'),
name=name
)
rule.cidr = ip or cidr
if rule_info.get('protocol') in ('tcp', 'udp'):
rule.port_range = (rule_info.get('port_range_min'),
rule_info.get('port_range_max'))
rule.backward_port_range = (
rule_info.get(
'source_port_range_min') or dvs_const.MIN_EPHEMERAL_PORT,
rule_info.get(
'source_port_range_max') or dvs_const.MAX_EPHEMERAL_PORT)
if rule_info.get('protocol') in ('ipv6-icmp', 'icmp'):
rule.type = rule_info.get('source_port_range_min')
return rule
|
|
# -*- coding: utf-8 -*-
'''
Create adjacency matrices and analyse terms dynamically
'''
print('Create dynamic adjacency matrices and ESOMs')
#--------------------------------------------
#run create_Info_Files.py before running this
#--------------------------------------------
import pickle, time, igraph, glob, os, somoclu, collections
import itertools, codecs, seaborn, math, pprint, random, re
from matplotlib import rc
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import interactive
from scipy.spatial import distance
from matplotlib.pyplot import cm
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib.colors as colors
import seaborn as sns
import sklearn.cluster as clusterAlgs
#--------------------------------------------
print(time.asctime( time.localtime(time.time()) ))
t = time.time()
edgeReadPath = './data/artworks_edges/dynamic'
adjMatWritePath = './data/artworks_adjacencyMats/dynamic'
distMatWritePath = './data/artworks_distanceMats/dynamic'
potentMatWritePath = './data/artworks_potentialMats/dynamic'
gravMatWritePath = './data/artworks_gravityMats/dynamic'
umatrixWritePath = './data/artworks_UMX/dynamic'
figWritePath = './data/artworks_figs/dynamic'
greenwichFigWritePath = figWritePath+'/greenwich'
greenwichUmatrixWritePath = umatrixWritePath+'/greenwich'
gephiWritePath = './data/artworks_gephi/dynamic'
statsWritePath = './data/artworks_stats'
if not os.path.exists('./data/artworks_tmp'):
os.makedirs('./data/artworks_tmp')
if not os.path.exists(adjMatWritePath):
os.makedirs(adjMatWritePath)
if not os.path.exists(distMatWritePath):
os.makedirs(distMatWritePath)
if not os.path.exists(potentMatWritePath):
os.makedirs(potentMatWritePath)
os.makedirs(gravMatWritePath)
if not os.path.exists(umatrixWritePath):
os.makedirs(umatrixWritePath)
if not os.path.exists(figWritePath):
os.makedirs(figWritePath)
if not os.path.exists(gephiWritePath):
os.makedirs(gephiWritePath)
if not os.path.exists(greenwichFigWritePath):
os.makedirs(greenwichFigWritePath)
if not os.path.exists(greenwichUmatrixWritePath):
os.makedirs(greenwichUmatrixWritePath)
LVLs = ['lvlA']#['lvl1','lvl2','lvl3','lvlA'] #'lvl1','lvl2','lvl3',
heatmapFonts = [4]#[12,7,6,4]#12,7,6,
yearPeriods = ['2000s'] #['1800s','2000s']
trueYearsIni = [1964]#[1800,1964]
termLabelDict = pickle.load(open('./data/artworks_verification_labels/WmatrixLabelDict.pck','rb'))
def recRank(mylist):#Perform the Reciprocal Rank Fusion for a list of rank values
finscore = []
mylist=[x+1 for x in mylist]
for rank in mylist:
finscore.append(1/(20+rank))
return sum(finscore)
def toroidDistance(myarray,width,height):
somDim2 = []
for idx,x in enumerate(myarray[:-1]):
newxa = myarray[idx+1:]
for nx in newxa:
somDim2.append(np.sqrt(min(abs(x[0] - nx[0]), width - abs(x[0] - nx[0]))**2 + min(abs(x[1] - nx[1]), height - abs(x[1]-nx[1]))**2))
SD = np.array(somDim2)
return distance.squareform(SD)
def toroidDistanceSingle(coords1,coords2,width,height):
return (np.sqrt(min(abs(coords1[0] - coords2[0]), width - abs(coords1[0] - coords2[0]))**2 + min(abs(coords1[1] - coords2[1]), height - abs(coords1[1]-coords2[1]))**2))
def toroidCoordinateFinder(coorx,distx,coory,disty,w,h):
if coorx+distx>=w:
ncx = coorx+distx-w
elif coorx+distx<0:
ncx = w+coorx+distx
else:
ncx = coorx+distx
if coory+disty>=h:
ncy = coory+disty-h
elif coory+disty<0:
ncy = h+coory+disty
else:
ncy = coory+disty
return (ncx,ncy)
for lIdx,lvl in enumerate(LVLs):
heatMapFont = heatmapFonts[lIdx]
for idy,years in enumerate(yearPeriods):
files = glob.glob(edgeReadPath+'/'+years+lvl+'_*.csv')
files.sort(key=lambda x: os.path.getmtime(x))
try:
edgeDict = pickle.load(open('./data/artworks_tmp/edgeDictDynamic'+years+lvl+'.pck','rb'))
except:
edgeDict = {'uniquePersistentTerms':[]}
termsYears = []
for filename in files:
periodIdx = filename[filename.index(lvl)+5:-4]
tmpTerms = []
edgeDict[periodIdx] = {}
with codecs.open(filename, 'r','utf8') as f:
# print(filename)
adjList = []
next(f)
for line in f:
line = line.split(',')
tripletuple = line[:2]
tmpTerms.extend(tripletuple)
tripletuple.append(int(line[2].strip()))
adjList.append(tuple(tripletuple))
edgeDict[periodIdx]['adjList'] = adjList
termsYears.append(list(set(tmpTerms)))
print('There are %s unique nodes for period %s' %(len(termsYears[-1]),periodIdx))
repetitiveTerms = collections.Counter(list(itertools.chain.from_iterable(termsYears)))
edgeDict['allTerms'] = list(repetitiveTerms.keys())
edgeDict['uniquePersistentTerms'] = [x for x,v in repetitiveTerms.items() if v == len(files)]
edgeDict['uniquePersistentTerms'].sort()
pass
with open(statsWritePath+'/'+years+lvl+'_unique_persistent_terms.txt','w') as f:
for word in edgeDict['uniquePersistentTerms']:
f.write(word+'\n')
statement = ('For %s in the %s there are %s unique persistent terms globally out of %s unique terms' %(lvl,years,len(edgeDict['uniquePersistentTerms']),len(edgeDict['allTerms'])))
time.sleep(5)
print(statement)
'''set up SOM'''#--------------------------------------------------------------------
## n_columns, n_rows = 200, 120
## lablshift = 1
if lvl == 'lvl1':
n_columns, n_rows = 20, 12
lablshift = .2
elif lvl == 'lvl2':
n_columns, n_rows = 40, 24
lablshift = .3
elif lvl == 'lvl3':
n_columns, n_rows = 50, 30
lablshift = .4
elif lvl == 'lvlA':
n_columns, n_rows = 60, 40
lablshift = .5
epochs2 = 3
som = somoclu.Somoclu(n_columns, n_rows, maptype="toroid", initialization="pca")
savefig = True
SOMdimensionsString = 'x'.join([str(x) for x in [n_columns,n_rows]])
#--------------------------------------------------------------------------------
yearList = []
count = 0
termPrRanks, termAuthRanks, termHubRanks, termBetweenRanks = {}, {}, {}, {}
for filename in files:
periodIdx = filename[filename.index(lvl)+5:-4]
## if periodIdx != '7':
## continue
yearList.append(periodIdx)
print(periodIdx)
# try:
# gUndirected = edgeDict[periodIdx]['graph']
# except:
gUndirected=igraph.Graph.Full(0, directed = False)
gUndirected.es['weight'] = 1
'''ReRanking the nodes based on their reciprocal rank between timeslots'''
try:
gUndirected.add_vertices(edgeDict['topTermsByPR'])
print('used top Terms By PageRank')
# print(edgeDict['topTermsByPR'][:5])
except:
gUndirected.add_vertices(edgeDict['uniquePersistentTerms'])
print('used alphabetically ranked terms')
pass
myEdges,myWeights = [], []
nodesWithEdges = []
for x in edgeDict[periodIdx]['adjList']:
if x[0] in edgeDict['uniquePersistentTerms'] and x[1] in edgeDict['uniquePersistentTerms']:
myEdges.append((x[0],x[1]))
myWeights.append(x[2])
nodesWithEdges.extend(x[:2])
print('Full No of edges: %s and pruned No of edges %s' %(len(edgeDict[periodIdx]['adjList']),len(myEdges)))
gUndirected.add_edges(myEdges)
gUndirected.es["weight"] = myWeights
edgeDict[periodIdx]['graph'] = gUndirected
gUndirected.vs['label'] = gUndirected.vs['name']
nodes = gUndirected.vs['name']
# print(nodes[:5])
#--------------------------------------------------------------------------------
'''Extract centrality measures'''#-----------------------------------------------
#--------------------------------------------------------------------------------
edgeDict[periodIdx]['term'] = {'degree':{},'pageRank':{},'maxnormPageRank':{}, 'minnormPageRank':{}, 'authority':{}, 'hub':{}, 'betweenness':{}}
pageRank = gUndirected.pagerank(weights = 'weight', directed=False)
authority = gUndirected.authority_score(weights = 'weight') #HITS authority score
hub = gUndirected.hub_score(weights = 'weight')#HITS hub score
betweenness = gUndirected.betweenness(weights = 'weight', directed = False)
# print('extracted pagerank')
maxPR = max(pageRank)
maxnormPageRank = [x/maxPR for x in pageRank]
minPR = min(pageRank)
minnormPageRank = [x/minPR for x in pageRank]
maxminPr = max(minnormPageRank)
minmaxPRdiff = maxPR-minPR
minmaxnormPageRank = [1+3*((x-minPR)/minmaxPRdiff) for x in pageRank]
for x in nodes:
edgeDict[periodIdx]['term']['pageRank'][x] = pageRank[nodes.index(x)]
edgeDict[periodIdx]['term']['maxnormPageRank'][x] = maxnormPageRank[nodes.index(x)]
edgeDict[periodIdx]['term']['minnormPageRank'][x] = minnormPageRank[nodes.index(x)]
edgeDict[periodIdx]['term']['degree'][x] = gUndirected.degree(x)
edgeDict[periodIdx]['term']['authority'][x] = authority[nodes.index(x)]
edgeDict[periodIdx]['term']['hub'][x] = hub[nodes.index(x)]
edgeDict[periodIdx]['term']['betweenness'][x] = betweenness[nodes.index(x)]
tmpPRrank = sorted(edgeDict[periodIdx]['term']['pageRank'], key=lambda k: [edgeDict[periodIdx]['term']['pageRank'][k],edgeDict[periodIdx]['term']['degree'][k],k],reverse =True)
for x in nodes:
if x not in termPrRanks:
termPrRanks[x] = [tmpPRrank.index(x)]
else:
termPrRanks[x].append(tmpPRrank.index(x))
tmpAuthrank = sorted(edgeDict[periodIdx]['term']['authority'], key=lambda k: [edgeDict[periodIdx]['term']['authority'][k],edgeDict[periodIdx]['term']['degree'][k],k],reverse =True)
for x in nodes:
if x not in termAuthRanks:
termAuthRanks[x] = [tmpAuthrank.index(x)]
else:
termAuthRanks[x].append(tmpAuthrank.index(x))
tmpHubrank = sorted(edgeDict[periodIdx]['term']['hub'], key=lambda k: [edgeDict[periodIdx]['term']['hub'][k],edgeDict[periodIdx]['term']['degree'][k],k],reverse =True)
for x in nodes:
if x not in termHubRanks:
termHubRanks[x] = [tmpHubrank.index(x)]
else:
termHubRanks[x].append(tmpHubrank.index(x))
tmpBetweenrank = sorted(edgeDict[periodIdx]['term']['betweenness'], key=lambda k: [edgeDict[periodIdx]['term']['betweenness'][k],edgeDict[periodIdx]['term']['degree'][k],k],reverse =True)
for x in nodes:
if x not in termBetweenRanks:
termBetweenRanks[x] = [tmpBetweenrank.index(x)]
else:
termBetweenRanks[x].append(tmpBetweenrank.index(x))
# -----------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------
'''creating undirected adjacency mat'''#--------------------------------------------------------
#-----------------------------------------------------------------------------------------------
if not os.path.exists(adjMatWritePath):
os.makedirs(adjMatWritePath)
print('creating adjacency matrix')
adjMat = gUndirected.get_adjacency(attribute='weight')
adjMat = np.array(adjMat.data)
print('writing undirected adjacency matrix to file')
with open(adjMatWritePath+'/AdjMat'+years+lvl+'_'+periodIdx+'.txt', 'w') as d:
d.write('Term\t'+'\t'.join(nodes)+'\n')
for s in nodes:
distLine = [str(x) for x in adjMat[nodes.index(s)].tolist()]
d.write(s+'\t'+'\t'.join(distLine)+'\n')
#-----------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------------
'''SOM data extraction from here on------------------------------------------------------------------------------------'''
# ------------------------------------------------------------------------------------------------------------------------
'''Extract Self Organizing Maps of undirected weighted adj mats'''#change filename depending on labeled or numbered terms
nummedOrNot = ''#'nummed' are the labels numbers or text (leave blank)?
labelFormat = 'code' #switch terms by Wmatrix code or label?
df = pd.read_table(adjMatWritePath+'/'+nummedOrNot+'AdjMat'+years+lvl+'_'+periodIdx+'.txt', sep="\t", header=0,index_col=0)
dfmax = df.max()
dfmax[dfmax == 0] = 1
df = df / dfmax
originallabels = df.index.tolist()
# print(originallabels[:5])
labels = originallabels # labels = [termLabelDict[nodes[x]][labelFormat] for x in originallabels] #switch terms by Wmatrix code or label?
som.update_data(df.values)
U, s, V = np.linalg.svd(df.values, full_matrices=False)
if periodIdx == yearList[0]:
epochs = 10
radius0 = 0
scale0 = 0.1
else:
radius0 = n_rows//5
scale0 = 0.03
epochs = epochs2
#-------clustering params---------------
# algorithm = clusterAlgs.SpectralClustering()
clusterAlgLabel = 'KMeans8'# KMeans8 , SpectralClustering
#---------------------------------------
if savefig:
if not os.path.exists(figWritePath+'/Clusters/'+clusterAlgLabel+'/SOMs/'+SOMdimensionsString+'_epochs'+str(epochs2)):
os.makedirs(figWritePath+'/Clusters/'+clusterAlgLabel+'/SOMs/'+SOMdimensionsString+'_epochs'+str(epochs2))
SOMfilename = figWritePath+'/Clusters/'+clusterAlgLabel+'/SOMs/'+SOMdimensionsString+'_epochs'+str(epochs2)+'/SOM_'+nummedOrNot+'AdjMat'+years+lvl+'_'+periodIdx+'.png'
SOMfilenameNoLabels = figWritePath+'/Clusters/'+clusterAlgLabel+'/SOMs/'+SOMdimensionsString+'_epochs'+str(epochs2)+'/noLabelsSOM_AdjMat'+years+lvl+'_'+periodIdx+'.png'
# SOMfilenameNoBMUs = figWritePath+'/Clusters/'+clusterAlgLabel+'/SOMs/'+SOMdimensionsString+'_epochs'+str(epochs2)+'/noBMUsSOM_AdjMat'+years+lvl+'_'+periodIdx+'.png'
else:
SOMfilename = None
som.train(epochs=epochs, radius0=radius0, scale0=scale0)
#----------------------clustering-----------
try:
som.cluster(algorithm=algorithm)
print('Clustering algorithm employed: %s' %clusterAlgLabel)
except:
som.cluster()
print('Clustering algorithm employed: K-means with 8 centroids')
pass
#----------------------clustering-----------
rc('font', **{'size': 11}); figsize = (20, 20/float(n_columns/n_rows))
som.view_umatrix(figsize = figsize, colormap="Spectral_r", bestmatches=True, labels=labels,filename=SOMfilename)
plt.close()
som.view_umatrix(figsize = figsize, colormap="Spectral_r", bestmatches=True, filename=SOMfilenameNoLabels)
plt.close()
# som.view_umatrix(figsize = figsize, colormap="Spectral_r", filename=SOMfilenameNoBMUs)
# plt.close()
edgeDict[periodIdx]['somCoords'] = {SOMdimensionsString:som.bmus}
colors = []
for bm in som.bmus:
colors.append(som.clusters[bm[1], bm[0]])
# areas = [200]*len(som.bmus)
areas = [x*70 for x in minmaxnormPageRank]
#-----------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------
'''write and show the umatrix (umx)'''#---------------------------------------------------------
#-----------------------------------------------------------------------------------------------
## somUmatrix = edgeDict[periodIdx]['somUmatrix'][SOMdimensionsString]
## print('writing umatrix to file')
## np.savetxt(umatrixWritePath+'/umx'+years+lvl+'_'+periodIdx+'.umx',somUmatrix,delimiter='\t', newline='\n',header='% '+ '%s %s'%(n_rows,n_columns))
##
## print('writing BMU coords to file')
## with open(umatrixWritePath+'/umx'+years+lvl+'_'+periodIdx+'.bm','w') as f:
## with open(umatrixWritePath+'/umx'+years+lvl+'_'+periodIdx+'.names','w') as fn:
## f.write('% '+'%s %s\n' %(n_rows,n_columns))
## fn.write('% '+str(len(nodes))+'\n')
## for idx,coos in enumerate(edgeDict[periodIdx]['somCoords'][SOMdimensionsString]):
## f.write('%s %s %s\n' %(idx,coos[1],coos[0]))
## fn.write('%s %s %s\n' %(idx,nodes[idx],nodes[idx]))
##
## print('plotting umatrix 3D surface')
## fig = plt.figure()
## ax = fig.gca(projection='3d')
## X = np.arange(0, n_columns, 1)
## Y = np.arange(0, n_rows, 1)
## X, Y = np.meshgrid(X, Y)
## N=somUmatrix/somUmatrix.max()
## surf = ax.plot_surface(X, Y, somUmatrix, facecolors=cm.jet(N),rstride=1, cstride=1)#,facecolors=cm.jet(somUmatrix) cmap=cm.coolwarm, linewidth=0, antialiased=False)
## m = cm.ScalarMappable(cmap=cm.jet)
## m.set_array(somUmatrix)
## plt.colorbar(m, shrink=0.5, aspect=5)
## plt.title('SOM umatrix 3D surface vizualization (Level '+lvl+' terms | 5 year period prior to '+str(int(periodIdx)*5+trueYearsIni[idy])+')')
## mng = plt.get_current_fig_manager()
## mng.window.state('zoomed')
## interactive(True)
## plt.show()
## fig.savefig(figWritePath+'/SOM Umatrices/umxSurf'+years+lvl+'_'+periodIdx+'.png',bbox_inches='tight')
## plt.close()
## interactive(False)
#-----------------------------------------------------------------------------------------------
'''Plotting BMU coordinates with labels'''#-----------------------------------------------------
#-----------------------------------------------------------------------------------------------
## labelFormat = 'code'
## fig, ax = plt.subplots()
## xDimension = [x[0] for x in edgeDict[periodIdx]['somCoords'][SOMdimensionsString]]#[:10]]
## yDimension = [x[1] for x in edgeDict[periodIdx]['somCoords'][SOMdimensionsString]]#[:10]]
## plt.scatter(xDimension,yDimension, c=colors, s = areas, alpha = 0.7)
## labels = [str(colors[x])+'_'+termLabelDict[" ".join(re.findall("[a-zA-Z]+", nodes[x]))][labelFormat] for x in range(len(xDimension))]
## doneLabs = set([''])
## for label, x, y in zip(labels, xDimension, yDimension):
## lblshiftRatio = 2
## labFinshift = ''
## while labFinshift in doneLabs:
## potentialPositions = [(x, y+lablshift), (x+lblshiftRatio*lablshift, y), (x-lblshiftRatio*lablshift, y), (x+lblshiftRatio*lablshift, y+lblshiftRatio*lablshift),
## (x-lblshiftRatio*lablshift, y+lblshiftRatio*lablshift), (x+lblshiftRatio*lablshift, y-lblshiftRatio*lablshift), (x+lblshiftRatio*lablshift, y+lblshiftRatio*lablshift),
## (x-lblshiftRatio*lablshift, y+lblshiftRatio*lablshift)]
## for pP in potentialPositions:
## labFinshift = pP
## if labFinshift not in doneLabs:
## break
## lblshiftRatio+=1
## doneLabs.add(labFinshift)
## plt.annotate(label, xy = (x, y), xytext = labFinshift, textcoords = 'data', ha = 'center', va = 'center',bbox = dict(boxstyle = 'round,pad=0.1', fc = 'white', alpha = 0.4))
## lIdx+=1
##
## xCc = [x[1] for x in som.centroidBMcoords]
## yCc = [x[0] for x in som.centroidBMcoords]
## plt.scatter(xCc,yCc, c= range(len(som.centroidBMcoords)), s= [1000]*len(som.centroidBMcoords), alpha = 0.4)
##
## plt.xlim(0,n_columns)
## plt.ylim(0,n_rows)
## ax.invert_yaxis()
## plt.title('Labeled SOM. Level '+lvl+' terms, timeslot '+periodIdx+' (5 year period prior to '+str(int(periodIdx)*5+trueYearsIni[idy])+')')
## mng = plt.get_current_fig_manager()
## mng.window.state('zoomed')
## interactive(True)
## plt.show()
## fig.savefig(figWritePath+'/Clusters/'+clusterAlgLabel+'/SOMs/'+SOMdimensionsString+'_epochs'+str(epochs2)+'/SOM_Wmatrix'+labelFormat+'LabeledAdjMat'+years+lvl+'_'+periodIdx+'.png',bbox_inches='tight')
## plt.close()
## interactive(False)
#-----------------------------------------------------------------------------------------------
'''pageRank and HITS term fluctuation'''
numOfPlots = [5, 10, 20]
marker, color = ['*', '+', 'o','d','h','p','s','v','^','d'], ['g','r','m','c','y','k']#line, ["-","--","-.",":"] #list(colors.cnames.keys())
marker.sort()
color.sort()
asmarker = itertools.cycle(marker)
ascolor = itertools.cycle(color)
# asline = itertools.cycle(line)
if not os.path.exists(figWritePath+'/centrality fluctuations over time/PageRank'):
os.makedirs(figWritePath+'/centrality fluctuations over time/PageRank')
os.makedirs(figWritePath+'/centrality fluctuations over time/HITS')
os.makedirs(figWritePath+'/centrality fluctuations over time/Betweenness')
allPeriods = list(edgeDict.keys())
allPeriods.remove('uniquePersistentTerms')
allPeriods.remove('allTerms')
try:
allPeriods.remove('topTermsByPR')
except:
pass
allPeriods.sort()
termPRRankDict = {}
termPRSequences = {}
termAuthRankDict = {}
termAuthSequences = {}
termHubRankDict = {}
termHubSequences = {}
termBetweenRankDict = {}
termBetweenSequences = {}
for x in nodes:
prSequence, authSequence, hubSequence, betweenSequence = [], [] ,[], []
for p in allPeriods:
prSequence.append(edgeDict[p]['term']['pageRank'][x])
authSequence.append(edgeDict[p]['term']['authority'][x])
hubSequence.append(edgeDict[p]['term']['hub'][x])
betweenSequence.append(edgeDict[p]['term']['betweenness'][x])
termPRSequences[x] = prSequence
termPRRankDict[x] = recRank(termPrRanks[x])
termAuthSequences[x] = authSequence
termAuthRankDict[x] = recRank(termAuthRanks[x])
termHubSequences[x] = hubSequence
termHubRankDict[x] = recRank(termHubRanks[x])
termBetweenSequences[x] = betweenSequence
termBetweenRankDict[x] = recRank(termBetweenRanks[x])
termPRRanked = sorted(termPRRankDict, key=termPRRankDict.get, reverse=True)
termAuthRanked = sorted(termAuthRankDict, key=termAuthRankDict.get, reverse=True)
termHubRanked = sorted(termHubRankDict, key=termHubRankDict.get, reverse=True)
termBetweenRanked = sorted(termBetweenRankDict, key=termBetweenRankDict.get, reverse=True)
edgeDict['topTermsByPR'] = termPRRanked
pickle.dump(edgeDict,open('./data/artworks_tmp/edgeDictDynamic'+years+lvl+'.pck','wb'), protocol = 2)
elapsed = time.time() - t
print('Total time Elapsed: %.2f seconds' % elapsed)
|
|
# Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/Variable_Stiffness_Variable_Velocity/with_padding_2s/')
from data_padding_hshv_2s import Fmat_original_hshv
from data_padding_hslv_2s import Fmat_original_hslv
from data_padding_lshv_2s import Fmat_original_lshv
from data_padding_lslv_2s import Fmat_original_lslv
# Scaling function
def scaling(mat):
Fvec_a = mat[0:201,0:]
Fvec_b = mat[201:402,0:]
Fvec_c = mat[402:603,0:]
# With Scaling
max_a = np.max(abs(Fvec_a))
min_a = np.min(abs(Fvec_a))
mean_a = np.mean(Fvec_a)
std_a = np.std(Fvec_a)
#Fvec_a = (Fvec_a)/max_a
#Fvec_a = (Fvec_a-mean_a)
#Fvec_a = (Fvec_a-mean_a)/max_a
Fvec_a = (Fvec_a-mean_a)/std_a
# With Scaling
max_b = np.max(abs(Fvec_b))
min_b = np.min(abs(Fvec_b))
mean_b = np.mean(Fvec_b)
std_b = np.std(Fvec_b)
#Fvec_b = (Fvec_b)/max_b
#Fvec_b = (Fvec_b-mean_b)
#Fvec_b = (Fvec_b-mean_b)/max_b
#Fvec_b = (Fvec_b-mean_b)/std_b
# With Scaling
max_c = np.max(abs(Fvec_c))
min_c = np.min(abs(Fvec_c))
mean_c = np.mean(Fvec_c)
std_c = np.std(Fvec_c)
#Fvec_c = (Fvec_c)/max_c
#Fvec_c = (Fvec_c-mean_c)
#Fvec_c = (Fvec_c-mean_c)/max_c
Fvec_c = (Fvec_c-mean_c)/std_c
#Fvec_c = Fvec_c*np.max((max_a,max_b))/max_c
Fvec = np.row_stack([Fvec_a,Fvec_b,Fvec_c])
n_Fvec, m_Fvec = np.shape(Fvec)
#print 'Feature_Vector_Shape:',n_Fvec, m_Fvec
return Fvec
# Returns mu,sigma for 20 hidden-states from feature-vectors(123,35) for RF,SF,RM,SM models
def feature_to_mu_cov(fvec1,fvec2):
index = 0
m,n = np.shape(fvec1)
#print m,n
mu_1 = np.zeros((20,1))
mu_2 = np.zeros((20,1))
cov = np.zeros((20,2,2))
DIVS = m/20
while (index < 20):
m_init = index*DIVS
temp_fvec1 = fvec1[(m_init):(m_init+DIVS),0:]
temp_fvec2 = fvec2[(m_init):(m_init+DIVS),0:]
temp_fvec1 = np.reshape(temp_fvec1,DIVS*n)
temp_fvec2 = np.reshape(temp_fvec2,DIVS*n)
mu_1[index] = np.mean(temp_fvec1)
mu_2[index] = np.mean(temp_fvec2)
cov[index,:,:] = np.cov(np.concatenate((temp_fvec1,temp_fvec2),axis=0))
if index == 0:
print 'mean = ', mu_2[index]
print 'mean = ', scp.mean(fvec2[(m_init):(m_init+DIVS),0:])
print np.shape(np.concatenate((temp_fvec1,temp_fvec2),axis=0))
print cov[index,:,:]
print scp.std(fvec2[(m_init):(m_init+DIVS),0:])
print scp.std(temp_fvec2)
index = index+1
return mu_1,mu_2,cov
if __name__ == '__main__':
# Scaling wrt all data
Fmat_rf = scaling(np.column_stack((Fmat_original_hshv[:,0:15],Fmat_original_hslv[:,0:15],Fmat_original_lshv[:,0:15],Fmat_original_lslv[:,0:15])))
Fmat_rm = scaling(np.column_stack((Fmat_original_hshv[:,15:30],Fmat_original_hslv[:,15:30],Fmat_original_lshv[:,15:30],Fmat_original_lslv[:,15:30])))
Fmat_sf = scaling(np.column_stack((Fmat_original_hshv[:,30:45],Fmat_original_hslv[:,30:45],Fmat_original_lshv[:,30:45],Fmat_original_lslv[:,30:45])))
Fmat_sm = scaling(np.column_stack((Fmat_original_hshv[:,45:60],Fmat_original_hslv[:,45:60],Fmat_original_lshv[:,45:60],Fmat_original_lslv[:,45:60])))
Fmat_hshv = np.matrix(np.column_stack((Fmat_rf[:,0:15],Fmat_rm[:,0:15],Fmat_sf[:,0:15],Fmat_sm[:,0:15])))
Fmat_hslv = np.matrix(np.column_stack((Fmat_rf[:,15:30],Fmat_rm[:,15:30],Fmat_sf[:,15:30],Fmat_sm[:,15:30])))
Fmat_lshv = np.matrix(np.column_stack((Fmat_rf[:,30:45],Fmat_rm[:,30:45],Fmat_sf[:,30:45],Fmat_sm[:,30:45])))
Fmat_lslv = np.matrix(np.column_stack((Fmat_rf[:,45:60],Fmat_rm[:,45:60],Fmat_sf[:,45:60],Fmat_sm[:,45:60])))
#Fmat_rf_hshv = scaling(Fmat_original_hshv[:,0:15])
#Fmat_rm_hshv = scaling(Fmat_original_hshv[:,15:30])
#Fmat_sf_hshv = scaling(Fmat_original_hshv[:,30:45])
#Fmat_sm_hshv = scaling(Fmat_original_hshv[:,45:60])
#Fmat_hshv = np.matrix(np.column_stack((Fmat_rf_hshv,Fmat_rm_hshv,Fmat_sf_hshv,Fmat_sm_hshv)))
#Fmat_rf_hslv = scaling(Fmat_original_hslv[:,0:15])
#Fmat_rm_hslv = scaling(Fmat_original_hslv[:,15:30])
#Fmat_sf_hslv = scaling(Fmat_original_hslv[:,30:45])
#Fmat_sm_hslv = scaling(Fmat_original_hslv[:,45:60])
#Fmat_hslv = np.matrix(np.column_stack((Fmat_rf_hslv,Fmat_rm_hslv,Fmat_sf_hslv,Fmat_sm_hslv)))
#Fmat_rf_lshv = scaling(Fmat_original_lshv[:,0:15])
#Fmat_rm_lshv = scaling(Fmat_original_lshv[:,15:30])
#Fmat_sf_lshv = scaling(Fmat_original_lshv[:,30:45])
#Fmat_sm_lshv = scaling(Fmat_original_lshv[:,45:60])
#Fmat_lshv = np.matrix(np.column_stack((Fmat_rf_lshv,Fmat_rm_lshv,Fmat_sf_lshv,Fmat_sm_lshv)))
#Fmat_rf_lslv = scaling(Fmat_original_lslv[:,0:15])
#Fmat_rm_lslv = scaling(Fmat_original_lslv[:,15:30])
#Fmat_sf_lslv = scaling(Fmat_original_lslv[:,30:45])
#Fmat_sm_lslv = scaling(Fmat_original_lslv[:,45:60])
#Fmat_lslv = np.matrix(np.column_stack((Fmat_rf_lslv,Fmat_rm_lslv,Fmat_sf_lslv,Fmat_sm_lslv)))
#Fmat = np.matrix(np.column_stack((Fmat_hshv,Fmat_hslv,Fmat_lshv,Fmat_lslv)))
# HMM - Implementation:
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.09, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.15, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.10, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.10, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.40, 0.20, 0.10, 0.04, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.03, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.20, 0.40, 0.20, 0.10, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.20, 0.40, 0.20, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.30, 0.50, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.40, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# pi - initial probabilities per state
pi = [0.05] * 20
# Confusion Matrix
cmat = np.zeros((4,4))
#############################################################################################################################################
# HSHV as testing set and Rest as training set
# Checking the Data-Matrix
mu_rf_force_hshv,mu_rf_motion_hshv,cov_rf_hshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hslv[0:201,0:15], Fmat_lshv[0:201,0:15], Fmat_lslv[0:201,0:15])))), (np.matrix(np.column_stack((Fmat_hslv[402:603,0:15], Fmat_lshv[402:603,0:15], Fmat_lslv[402:603,0:15])))))
mu_rm_force_hshv,mu_rm_motion_hshv,cov_rm_hshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hslv[0:201,15:30], Fmat_lshv[0:201,15:30], Fmat_lslv[0:201,15:30])))), (np.matrix(np.column_stack((Fmat_hslv[402:603,15:30], Fmat_lshv[402:603,15:30], Fmat_lslv[402:603,15:30])))))
mu_sf_force_hshv,mu_sf_motion_hshv,cov_sf_hshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hslv[0:201,30:45], Fmat_lshv[0:201,30:45], Fmat_lslv[0:201,30:45])))), (np.matrix(np.column_stack((Fmat_hslv[402:603,30:45], Fmat_lshv[402:603,30:45], Fmat_lslv[402:603,30:45])))))
mu_sm_force_hshv,mu_sm_motion_hshv,cov_sm_hshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hslv[0:201,45:60], Fmat_lshv[0:201,45:60], Fmat_lslv[0:201,45:60])))), (np.matrix(np.column_stack((Fmat_hslv[402:603,45:60], Fmat_lshv[402:603,45:60], Fmat_lslv[402:603,45:60])))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_hshv = [0.0]*20
B_rm_hshv = [0.0]*20
B_sf_hshv = [0.0]*20
B_sm_hshv = [0.0]*20
for num_states in range(20):
B_rf_hshv[num_states] = [[mu_rf_force_hshv[num_states][0],mu_rf_motion_hshv[num_states][0]],[cov_rf_hshv[num_states][0][0],cov_rf_hshv[num_states][0][1],cov_rf_hshv[num_states][1][0],cov_rf_hshv[num_states][1][1]]]
B_rm_hshv[num_states] = [[mu_rm_force_hshv[num_states][0],mu_rm_motion_hshv[num_states][0]],[cov_rm_hshv[num_states][0][0],cov_rm_hshv[num_states][0][1],cov_rm_hshv[num_states][1][0],cov_rm_hshv[num_states][1][1]]]
B_sf_hshv[num_states] = [[mu_sf_force_hshv[num_states][0],mu_sf_motion_hshv[num_states][0]],[cov_sf_hshv[num_states][0][0],cov_sf_hshv[num_states][0][1],cov_sf_hshv[num_states][1][0],cov_sf_hshv[num_states][1][1]]]
B_sm_hshv[num_states] = [[mu_sm_force_hshv[num_states][0],mu_sm_motion_hshv[num_states][0]],[cov_sm_hshv[num_states][0][0],cov_sm_hshv[num_states][0][1],cov_sm_hshv[num_states][1][0],cov_sm_hshv[num_states][1][1]]]
print cov_sm_hshv[num_states][0][0],cov_sm_hshv[num_states][0][1],cov_sm_hshv[num_states][1][0],cov_sm_hshv[num_states][1][1]
print "----"
#print B_sm_hshv
#print mu_sm_motion_hshv
# generate RF, RM, SF, SM models from parameters
model_rf_hshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf_hshv, pi) # Will be Trained
model_rm_hshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm_hshv, pi) # Will be Trained
model_sf_hshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf_hshv, pi) # Will be Trained
model_sm_hshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm_hshv, pi) # Will be Trained
# For Training
total_seq_rf_force_hshv = np.matrix(np.column_stack((Fmat_hslv[0:201,0:15], Fmat_lshv[0:201,0:15], Fmat_lslv[0:201,0:15])))
total_seq_rm_force_hshv = np.matrix(np.column_stack((Fmat_hslv[0:201,15:30], Fmat_lshv[0:201,15:30], Fmat_lslv[0:201,15:30])))
total_seq_sf_force_hshv = np.matrix(np.column_stack((Fmat_hslv[0:201,30:45], Fmat_lshv[0:201,30:45], Fmat_lslv[0:201,30:45])))
total_seq_sm_force_hshv = np.matrix(np.column_stack((Fmat_hslv[0:201,45:60], Fmat_lshv[0:201,45:60], Fmat_lslv[0:201,45:60])))
total_seq_rf_motion_hshv = np.matrix(np.column_stack((Fmat_hslv[402:603,0:15], Fmat_lshv[402:603,0:15], Fmat_lslv[402:603,0:15])))
total_seq_rm_motion_hshv = np.matrix(np.column_stack((Fmat_hslv[402:603,15:30], Fmat_lshv[402:603,15:30], Fmat_lslv[402:603,15:30])))
total_seq_sf_motion_hshv = np.matrix(np.column_stack((Fmat_hslv[402:603,30:45], Fmat_lshv[402:603,30:45], Fmat_lslv[402:603,30:45])))
total_seq_sm_motion_hshv = np.matrix(np.column_stack((Fmat_hslv[402:603,45:60], Fmat_lshv[402:603,45:60], Fmat_lslv[402:603,45:60])))
total_seq_rf_hshv = np.zeros((402,45))
total_seq_rm_hshv = np.zeros((402,45))
total_seq_sf_hshv = np.zeros((402,45))
total_seq_sm_hshv = np.zeros((402,45))
i = 0
j = 0
while i < 402:
total_seq_rf_hshv[i] = total_seq_rf_force_hshv[j]
total_seq_rf_hshv[i+1] = total_seq_rf_motion_hshv[j]
total_seq_rm_hshv[i] = total_seq_rm_force_hshv[j]
total_seq_rm_hshv[i+1] = total_seq_rm_motion_hshv[j]
total_seq_sf_hshv[i] = total_seq_sf_force_hshv[j]
total_seq_sf_hshv[i+1] = total_seq_sf_motion_hshv[j]
total_seq_sm_hshv[i] = total_seq_sm_force_hshv[j]
total_seq_sm_hshv[i+1] = total_seq_sm_motion_hshv[j]
j=j+1
i=i+2
train_seq_rf_hshv = (np.array(total_seq_rf_hshv).T).tolist()
train_seq_rm_hshv = (np.array(total_seq_rm_hshv).T).tolist()
train_seq_sf_hshv = (np.array(total_seq_sf_hshv).T).tolist()
train_seq_sm_hshv = (np.array(total_seq_sm_hshv).T).tolist()
#print train_seq_rf_hshv
final_ts_rf_hshv = ghmm.SequenceSet(F,train_seq_rf_hshv)
final_ts_rm_hshv = ghmm.SequenceSet(F,train_seq_rm_hshv)
final_ts_sf_hshv = ghmm.SequenceSet(F,train_seq_sf_hshv)
final_ts_sm_hshv = ghmm.SequenceSet(F,train_seq_sm_hshv)
model_rf_hshv.baumWelch(final_ts_rf_hshv)
model_rm_hshv.baumWelch(final_ts_rm_hshv)
model_sf_hshv.baumWelch(final_ts_sf_hshv)
model_sm_hshv.baumWelch(final_ts_sm_hshv)
# For Testing
total_seq_obj_hshv = np.zeros((402,60))
total_seq_obj_force_hshv = Fmat_hshv[0:201,:]
total_seq_obj_motion_hshv = Fmat_hshv[402:603,:]
i = 0
j = 0
while i < 402:
total_seq_obj_hshv[i] = total_seq_obj_force_hshv[j]
total_seq_obj_hshv[i+1] = total_seq_obj_motion_hshv[j]
j=j+1
i=i+2
rf_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
rm_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
sf_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
sm_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
k = 0
while (k < np.size(total_seq_obj_hshv,1)):
test_seq_obj_hshv = (np.array(total_seq_obj_hshv[:,k]).T).tolist()
new_test_seq_obj_hshv = np.array(test_seq_obj_hshv)
#print new_test_seq_obj_hshv
ts_obj_hshv = new_test_seq_obj_hshv
#print np.shape(ts_obj_hshv)
final_ts_obj_hshv = ghmm.EmissionSequence(F,ts_obj_hshv.tolist())
# Find Viterbi Path
path_rf_obj_hshv = model_rf_hshv.viterbi(final_ts_obj_hshv)
path_rm_obj_hshv = model_rm_hshv.viterbi(final_ts_obj_hshv)
path_sf_obj_hshv = model_sf_hshv.viterbi(final_ts_obj_hshv)
path_sm_obj_hshv = model_sm_hshv.viterbi(final_ts_obj_hshv)
obj_hshv = max(path_rf_obj_hshv[1],path_rm_obj_hshv[1],path_sf_obj_hshv[1],path_sm_obj_hshv[1])
if obj_hshv == path_rf_obj_hshv[1]:
rf_hshv[0,k] = 1
elif obj_hshv == path_rm_obj_hshv[1]:
rm_hshv[0,k] = 1
elif obj_hshv == path_sf_obj_hshv[1]:
sf_hshv[0,k] = 1
else:
sm_hshv[0,k] = 1
k = k+1
#print rf_hshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_hshv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_hshv[0,15:30])
cmat[0][2] = cmat[0][2] + np.sum(rf_hshv[0,30:45])
cmat[0][3] = cmat[0][3] + np.sum(rf_hshv[0,45:60])
cmat[1][0] = cmat[1][0] + np.sum(rm_hshv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_hshv[0,15:30])
cmat[1][2] = cmat[1][2] + np.sum(rm_hshv[0,30:45])
cmat[1][3] = cmat[1][3] + np.sum(rm_hshv[0,45:60])
cmat[2][0] = cmat[2][0] + np.sum(sf_hshv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_hshv[0,15:30])
cmat[2][2] = cmat[2][2] + np.sum(sf_hshv[0,30:45])
cmat[2][3] = cmat[2][3] + np.sum(sf_hshv[0,45:60])
cmat[3][0] = cmat[3][0] + np.sum(sm_hshv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_hshv[0,15:30])
cmat[3][2] = cmat[3][2] + np.sum(sm_hshv[0,30:45])
cmat[3][3] = cmat[3][3] + np.sum(sm_hshv[0,45:60])
#print cmat
#############################################################################################################################################
# HSLV as testing set and Rest as training set
mu_rf_force_hslv,mu_rf_motion_hslv,cov_rf_hslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:201,0:15], Fmat_lshv[0:201,0:15], Fmat_lslv[0:201,0:15])))), (np.matrix(np.column_stack((Fmat_hshv[402:603,0:15], Fmat_lshv[402:603,0:15], Fmat_lslv[402:603,0:15])))))
mu_rm_force_hslv,mu_rm_motion_hslv,cov_rm_hslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:201,15:30], Fmat_lshv[0:201,15:30], Fmat_lslv[0:201,15:30])))), (np.matrix(np.column_stack((Fmat_hshv[402:603,15:30], Fmat_lshv[402:603,15:30], Fmat_lslv[402:603,15:30])))))
mu_sf_force_hslv,mu_sf_motion_hslv,cov_sf_hslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:201,30:45], Fmat_lshv[0:201,30:45], Fmat_lslv[0:201,30:45])))), (np.matrix(np.column_stack((Fmat_hshv[402:603,30:45], Fmat_lshv[402:603,30:45], Fmat_lslv[402:603,30:45])))))
mu_sm_force_hslv,mu_sm_motion_hslv,cov_sm_hslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:201,45:60], Fmat_lshv[0:201,45:60], Fmat_lslv[0:201,45:60])))), (np.matrix(np.column_stack((Fmat_hshv[402:603,45:60], Fmat_lshv[402:603,45:60], Fmat_lslv[402:603,45:60])))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_hslv = [0.0]*20
B_rm_hslv = [0.0]*20
B_sf_hslv = [0.0]*20
B_sm_hslv = [0.0]*20
for num_states in range(20):
B_rf_hslv[num_states] = [[mu_rf_force_hslv[num_states][0],mu_rf_motion_hslv[num_states][0]],[cov_rf_hslv[num_states][0][0],cov_rf_hslv[num_states][0][1],cov_rf_hslv[num_states][1][0],cov_rf_hslv[num_states][1][1]]]
B_rm_hslv[num_states] = [[mu_rm_force_hslv[num_states][0],mu_rm_motion_hslv[num_states][0]],[cov_rm_hslv[num_states][0][0],cov_rm_hslv[num_states][0][1],cov_rm_hslv[num_states][1][0],cov_rm_hslv[num_states][1][1]]]
B_sf_hslv[num_states] = [[mu_sf_force_hslv[num_states][0],mu_sf_motion_hslv[num_states][0]],[cov_sf_hslv[num_states][0][0],cov_sf_hslv[num_states][0][1],cov_sf_hslv[num_states][1][0],cov_sf_hslv[num_states][1][1]]]
B_sm_hslv[num_states] = [[mu_sm_force_hslv[num_states][0],mu_sm_motion_hslv[num_states][0]],[cov_sm_hslv[num_states][0][0],cov_sm_hslv[num_states][0][1],cov_sm_hslv[num_states][1][0],cov_sm_hslv[num_states][1][1]]]
print cov_sm_hslv[num_states][0][0],cov_sm_hslv[num_states][0][1],cov_sm_hslv[num_states][1][0],cov_sm_hslv[num_states][1][1]
print "----"
#print B_sm_hslv
#print mu_sm_motion_hslv
# generate RF, RM, SF, SM models from parameters
model_rf_hslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf_hslv, pi) # Will be Trained
model_rm_hslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm_hslv, pi) # Will be Trained
model_sf_hslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf_hslv, pi) # Will be Trained
model_sm_hslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm_hslv, pi) # Will be Trained
# For Training
total_seq_rf_force_hslv = np.matrix(np.column_stack((Fmat_hshv[0:201,0:15], Fmat_lshv[0:201,0:15], Fmat_lslv[0:201,0:15])))
total_seq_rm_force_hslv = np.matrix(np.column_stack((Fmat_hshv[0:201,15:30], Fmat_lshv[0:201,15:30], Fmat_lslv[0:201,15:30])))
total_seq_sf_force_hslv = np.matrix(np.column_stack((Fmat_hshv[0:201,30:45], Fmat_lshv[0:201,30:45], Fmat_lslv[0:201,30:45])))
total_seq_sm_force_hslv = np.matrix(np.column_stack((Fmat_hshv[0:201,45:60], Fmat_lshv[0:201,45:60], Fmat_lslv[0:201,45:60])))
total_seq_rf_motion_hslv = np.matrix(np.column_stack((Fmat_hshv[402:603,0:15], Fmat_lshv[402:603,0:15], Fmat_lslv[402:603,0:15])))
total_seq_rm_motion_hslv = np.matrix(np.column_stack((Fmat_hshv[402:603,15:30], Fmat_lshv[402:603,15:30], Fmat_lslv[402:603,15:30])))
total_seq_sf_motion_hslv = np.matrix(np.column_stack((Fmat_hshv[402:603,30:45], Fmat_lshv[402:603,30:45], Fmat_lslv[402:603,30:45])))
total_seq_sm_motion_hslv = np.matrix(np.column_stack((Fmat_hshv[402:603,45:60], Fmat_lshv[402:603,45:60], Fmat_lslv[402:603,45:60])))
total_seq_rf_hslv = np.zeros((402,45))
total_seq_rm_hslv = np.zeros((402,45))
total_seq_sf_hslv = np.zeros((402,45))
total_seq_sm_hslv = np.zeros((402,45))
i = 0
j = 0
while i < 402:
total_seq_rf_hslv[i] = total_seq_rf_force_hslv[j]
total_seq_rf_hslv[i+1] = total_seq_rf_motion_hslv[j]
total_seq_rm_hslv[i] = total_seq_rm_force_hslv[j]
total_seq_rm_hslv[i+1] = total_seq_rm_motion_hslv[j]
total_seq_sf_hslv[i] = total_seq_sf_force_hslv[j]
total_seq_sf_hslv[i+1] = total_seq_sf_motion_hslv[j]
total_seq_sm_hslv[i] = total_seq_sm_force_hslv[j]
total_seq_sm_hslv[i+1] = total_seq_sm_motion_hslv[j]
j=j+1
i=i+2
train_seq_rf_hslv = (np.array(total_seq_rf_hslv).T).tolist()
train_seq_rm_hslv = (np.array(total_seq_rm_hslv).T).tolist()
train_seq_sf_hslv = (np.array(total_seq_sf_hslv).T).tolist()
train_seq_sm_hslv = (np.array(total_seq_sm_hslv).T).tolist()
#print train_seq_rf_hslv
final_ts_rf_hslv = ghmm.SequenceSet(F,train_seq_rf_hslv)
final_ts_rm_hslv = ghmm.SequenceSet(F,train_seq_rm_hslv)
final_ts_sf_hslv = ghmm.SequenceSet(F,train_seq_sf_hslv)
final_ts_sm_hslv = ghmm.SequenceSet(F,train_seq_sm_hslv)
model_rf_hslv.baumWelch(final_ts_rf_hslv)
model_rm_hslv.baumWelch(final_ts_rm_hslv)
model_sf_hslv.baumWelch(final_ts_sf_hslv)
model_sm_hslv.baumWelch(final_ts_sm_hslv)
# For Testing
total_seq_obj_hslv = np.zeros((402,60))
total_seq_obj_force_hslv = Fmat_hslv[0:201,:]
total_seq_obj_motion_hslv = Fmat_hslv[402:603,:]
i = 0
j = 0
while i < 402:
total_seq_obj_hslv[i] = total_seq_obj_force_hslv[j]
total_seq_obj_hslv[i+1] = total_seq_obj_motion_hslv[j]
j=j+1
i=i+2
rf_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
rm_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
sf_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
sm_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
k = 0
while (k < np.size(total_seq_obj_hslv,1)):
test_seq_obj_hslv = (np.array(total_seq_obj_hslv[:,k]).T).tolist()
new_test_seq_obj_hslv = np.array(test_seq_obj_hslv)
#print new_test_seq_obj_hslv
ts_obj_hslv = new_test_seq_obj_hslv
#print np.shape(ts_obj_hslv)
final_ts_obj_hslv = ghmm.EmissionSequence(F,ts_obj_hslv.tolist())
# Find Viterbi Path
path_rf_obj_hslv = model_rf_hslv.viterbi(final_ts_obj_hslv)
path_rm_obj_hslv = model_rm_hslv.viterbi(final_ts_obj_hslv)
path_sf_obj_hslv = model_sf_hslv.viterbi(final_ts_obj_hslv)
path_sm_obj_hslv = model_sm_hslv.viterbi(final_ts_obj_hslv)
obj_hslv = max(path_rf_obj_hslv[1],path_rm_obj_hslv[1],path_sf_obj_hslv[1],path_sm_obj_hslv[1])
if obj_hslv == path_rf_obj_hslv[1]:
rf_hslv[0,k] = 1
elif obj_hslv == path_rm_obj_hslv[1]:
rm_hslv[0,k] = 1
elif obj_hslv == path_sf_obj_hslv[1]:
sf_hslv[0,k] = 1
else:
sm_hslv[0,k] = 1
k = k+1
#print rf_hshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_hslv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_hslv[0,15:30])
cmat[0][2] = cmat[0][2] + np.sum(rf_hslv[0,30:45])
cmat[0][3] = cmat[0][3] + np.sum(rf_hslv[0,45:60])
cmat[1][0] = cmat[1][0] + np.sum(rm_hslv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_hslv[0,15:30])
cmat[1][2] = cmat[1][2] + np.sum(rm_hslv[0,30:45])
cmat[1][3] = cmat[1][3] + np.sum(rm_hslv[0,45:60])
cmat[2][0] = cmat[2][0] + np.sum(sf_hslv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_hslv[0,15:30])
cmat[2][2] = cmat[2][2] + np.sum(sf_hslv[0,30:45])
cmat[2][3] = cmat[2][3] + np.sum(sf_hslv[0,45:60])
cmat[3][0] = cmat[3][0] + np.sum(sm_hslv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_hslv[0,15:30])
cmat[3][2] = cmat[3][2] + np.sum(sm_hslv[0,30:45])
cmat[3][3] = cmat[3][3] + np.sum(sm_hslv[0,45:60])
#print cmat
############################################################################################################################################
# LSHV as testing set and Rest as training set
mu_rf_force_lshv,mu_rf_motion_lshv,cov_rf_lshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:201,0:15], Fmat_hslv[0:201,0:15], Fmat_lslv[0:201,0:15])))), (np.matrix(np.column_stack((Fmat_hshv[402:603,0:15], Fmat_hslv[402:603,0:15], Fmat_lslv[402:603,0:15])))))
mu_rm_force_lshv,mu_rm_motion_lshv,cov_rm_lshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:201,15:30], Fmat_hslv[0:201,15:30], Fmat_lslv[0:201,15:30])))), (np.matrix(np.column_stack((Fmat_hshv[402:603,15:30], Fmat_hslv[402:603,15:30], Fmat_lslv[402:603,15:30])))))
mu_sf_force_lshv,mu_sf_motion_lshv,cov_sf_lshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:201,30:45], Fmat_hslv[0:201,30:45], Fmat_lslv[0:201,30:45])))), (np.matrix(np.column_stack((Fmat_hshv[402:603,30:45], Fmat_hslv[402:603,30:45], Fmat_lslv[402:603,30:45])))))
mu_sm_force_lshv,mu_sm_motion_lshv,cov_sm_lshv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:201,45:60], Fmat_hslv[0:201,45:60], Fmat_lslv[0:201,45:60])))), (np.matrix(np.column_stack((Fmat_hshv[402:603,45:60], Fmat_hslv[402:603,45:60], Fmat_lslv[402:603,45:60])))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_lshv = [0.0]*20
B_rm_lshv = [0.0]*20
B_sf_lshv = [0.0]*20
B_sm_lshv = [0.0]*20
for num_states in range(20):
B_rf_lshv[num_states] = [[mu_rf_force_lshv[num_states][0],mu_rf_motion_lshv[num_states][0]],[cov_rf_lshv[num_states][0][0],cov_rf_lshv[num_states][0][1],cov_rf_lshv[num_states][1][0],cov_rf_lshv[num_states][1][1]]]
B_rm_lshv[num_states] = [[mu_rm_force_lshv[num_states][0],mu_rm_motion_lshv[num_states][0]],[cov_rm_lshv[num_states][0][0],cov_rm_lshv[num_states][0][1],cov_rm_lshv[num_states][1][0],cov_rm_lshv[num_states][1][1]]]
B_sf_lshv[num_states] = [[mu_sf_force_lshv[num_states][0],mu_sf_motion_lshv[num_states][0]],[cov_sf_lshv[num_states][0][0],cov_sf_lshv[num_states][0][1],cov_sf_lshv[num_states][1][0],cov_sf_lshv[num_states][1][1]]]
B_sm_lshv[num_states] = [[mu_sm_force_lshv[num_states][0],mu_sm_motion_lshv[num_states][0]],[cov_sm_lshv[num_states][0][0],cov_sm_lshv[num_states][0][1],cov_sm_lshv[num_states][1][0],cov_sm_lshv[num_states][1][1]]]
print cov_sm_lshv[num_states][0][0],cov_sm_lshv[num_states][0][1],cov_sm_lshv[num_states][1][0],cov_sm_lshv[num_states][1][1]
print "----"
#print B_sm_lshv
#print mu_sm_motion_lshv
# generate RF, RM, SF, SM models from parameters
model_rf_lshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf_lshv, pi) # Will be Trained
model_rm_lshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm_lshv, pi) # Will be Trained
model_sf_lshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf_lshv, pi) # Will be Trained
model_sm_lshv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm_lshv, pi) # Will be Trained
# For Training
total_seq_rf_force_lshv = np.matrix(np.column_stack((Fmat_hshv[0:201,0:15], Fmat_hslv[0:201,0:15], Fmat_lslv[0:201,0:15])))
total_seq_rm_force_lshv = np.matrix(np.column_stack((Fmat_hshv[0:201,15:30], Fmat_hslv[0:201,15:30], Fmat_lslv[0:201,15:30])))
total_seq_sf_force_lshv = np.matrix(np.column_stack((Fmat_hshv[0:201,30:45], Fmat_hslv[0:201,30:45], Fmat_lslv[0:201,30:45])))
total_seq_sm_force_lshv = np.matrix(np.column_stack((Fmat_hshv[0:201,45:60], Fmat_hslv[0:201,45:60], Fmat_lslv[0:201,45:60])))
total_seq_rf_motion_lshv = np.matrix(np.column_stack((Fmat_hshv[402:603,0:15], Fmat_hslv[402:603,0:15], Fmat_lslv[402:603,0:15])))
total_seq_rm_motion_lshv = np.matrix(np.column_stack((Fmat_hshv[402:603,15:30], Fmat_hslv[402:603,15:30], Fmat_lslv[402:603,15:30])))
total_seq_sf_motion_lshv = np.matrix(np.column_stack((Fmat_hshv[402:603,30:45], Fmat_hslv[402:603,30:45], Fmat_lslv[402:603,30:45])))
total_seq_sm_motion_lshv = np.matrix(np.column_stack((Fmat_hshv[402:603,45:60], Fmat_hslv[402:603,45:60], Fmat_lslv[402:603,45:60])))
total_seq_rf_lshv = np.zeros((402,45))
total_seq_rm_lshv = np.zeros((402,45))
total_seq_sf_lshv = np.zeros((402,45))
total_seq_sm_lshv = np.zeros((402,45))
i = 0
j = 0
while i < 402:
total_seq_rf_lshv[i] = total_seq_rf_force_lshv[j]
total_seq_rf_lshv[i+1] = total_seq_rf_motion_lshv[j]
total_seq_rm_lshv[i] = total_seq_rm_force_lshv[j]
total_seq_rm_lshv[i+1] = total_seq_rm_motion_lshv[j]
total_seq_sf_lshv[i] = total_seq_sf_force_lshv[j]
total_seq_sf_lshv[i+1] = total_seq_sf_motion_lshv[j]
total_seq_sm_lshv[i] = total_seq_sm_force_lshv[j]
total_seq_sm_lshv[i+1] = total_seq_sm_motion_lshv[j]
j=j+1
i=i+2
train_seq_rf_lshv = (np.array(total_seq_rf_lshv).T).tolist()
train_seq_rm_lshv = (np.array(total_seq_rm_lshv).T).tolist()
train_seq_sf_lshv = (np.array(total_seq_sf_lshv).T).tolist()
train_seq_sm_lshv = (np.array(total_seq_sm_lshv).T).tolist()
#print train_seq_rf_lshv
final_ts_rf_lshv = ghmm.SequenceSet(F,train_seq_rf_lshv)
final_ts_rm_lshv = ghmm.SequenceSet(F,train_seq_rm_lshv)
final_ts_sf_lshv = ghmm.SequenceSet(F,train_seq_sf_lshv)
final_ts_sm_lshv = ghmm.SequenceSet(F,train_seq_sm_lshv)
model_rf_lshv.baumWelch(final_ts_rf_lshv)
model_rm_lshv.baumWelch(final_ts_rm_lshv)
model_sf_lshv.baumWelch(final_ts_sf_lshv)
model_sm_lshv.baumWelch(final_ts_sm_lshv)
# For Testing
total_seq_obj_lshv = np.zeros((402,60))
total_seq_obj_force_lshv = Fmat_lshv[0:201,:]
total_seq_obj_motion_lshv = Fmat_lshv[402:603,:]
i = 0
j = 0
while i < 402:
total_seq_obj_lshv[i] = total_seq_obj_force_lshv[j]
total_seq_obj_lshv[i+1] = total_seq_obj_motion_lshv[j]
j=j+1
i=i+2
rf_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
rm_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
sf_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
sm_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
k = 0
while (k < np.size(total_seq_obj_lshv,1)):
test_seq_obj_lshv = (np.array(total_seq_obj_lshv[:,k]).T).tolist()
new_test_seq_obj_lshv = np.array(test_seq_obj_lshv)
#print new_test_seq_obj_lshv
ts_obj_lshv = new_test_seq_obj_lshv
#print np.shape(ts_obj_lshv)
final_ts_obj_lshv = ghmm.EmissionSequence(F,ts_obj_lshv.tolist())
# Find Viterbi Path
path_rf_obj_lshv = model_rf_lshv.viterbi(final_ts_obj_lshv)
path_rm_obj_lshv = model_rm_lshv.viterbi(final_ts_obj_lshv)
path_sf_obj_lshv = model_sf_lshv.viterbi(final_ts_obj_lshv)
path_sm_obj_lshv = model_sm_lshv.viterbi(final_ts_obj_lshv)
obj_lshv = max(path_rf_obj_lshv[1],path_rm_obj_lshv[1],path_sf_obj_lshv[1],path_sm_obj_lshv[1])
if obj_lshv == path_rf_obj_lshv[1]:
rf_lshv[0,k] = 1
elif obj_lshv == path_rm_obj_lshv[1]:
rm_lshv[0,k] = 1
elif obj_lshv == path_sf_obj_lshv[1]:
sf_lshv[0,k] = 1
else:
sm_lshv[0,k] = 1
k = k+1
#print rf_lshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_lshv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_lshv[0,15:30])
cmat[0][2] = cmat[0][2] + np.sum(rf_lshv[0,30:45])
cmat[0][3] = cmat[0][3] + np.sum(rf_lshv[0,45:60])
cmat[1][0] = cmat[1][0] + np.sum(rm_lshv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_lshv[0,15:30])
cmat[1][2] = cmat[1][2] + np.sum(rm_lshv[0,30:45])
cmat[1][3] = cmat[1][3] + np.sum(rm_lshv[0,45:60])
cmat[2][0] = cmat[2][0] + np.sum(sf_lshv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_lshv[0,15:30])
cmat[2][2] = cmat[2][2] + np.sum(sf_lshv[0,30:45])
cmat[2][3] = cmat[2][3] + np.sum(sf_lshv[0,45:60])
cmat[3][0] = cmat[3][0] + np.sum(sm_lshv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_lshv[0,15:30])
cmat[3][2] = cmat[3][2] + np.sum(sm_lshv[0,30:45])
cmat[3][3] = cmat[3][3] + np.sum(sm_lshv[0,45:60])
#print cmat
#############################################################################################################################################
# LSLV as testing set and Rest as training set
mu_rf_force_lslv,mu_rf_motion_lslv,cov_rf_lslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:201,0:15], Fmat_hslv[0:201,0:15], Fmat_lshv[0:201,0:15])))), (np.matrix(np.column_stack((Fmat_hshv[402:603,0:15], Fmat_hslv[402:603,0:15], Fmat_lshv[402:603,0:15])))))
mu_rm_force_lslv,mu_rm_motion_lslv,cov_rm_lslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:201,15:30], Fmat_hslv[0:201,15:30], Fmat_lshv[0:201,15:30])))), (np.matrix(np.column_stack((Fmat_hshv[402:603,15:30], Fmat_hslv[402:603,15:30], Fmat_lshv[402:603,15:30])))))
mu_sf_force_lslv,mu_sf_motion_lslv,cov_sf_lslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:201,30:45], Fmat_hslv[0:201,30:45], Fmat_lshv[0:201,30:45])))), (np.matrix(np.column_stack((Fmat_hshv[402:603,30:45], Fmat_hslv[402:603,30:45], Fmat_lshv[402:603,30:45])))))
mu_sm_force_lslv,mu_sm_motion_lslv,cov_sm_lslv = feature_to_mu_cov((np.matrix(np.column_stack((Fmat_hshv[0:201,45:60], Fmat_hslv[0:201,45:60], Fmat_lshv[0:201,45:60])))), (np.matrix(np.column_stack((Fmat_hshv[402:603,45:60], Fmat_hslv[402:603,45:60], Fmat_lshv[402:603,45:60])))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_lslv = [0.0]*20
B_rm_lslv = [0.0]*20
B_sf_lslv = [0.0]*20
B_sm_lslv = [0.0]*20
for num_states in range(20):
B_rf_lslv[num_states] = [[mu_rf_force_lslv[num_states][0],mu_rf_motion_lslv[num_states][0]],[cov_rf_lslv[num_states][0][0],cov_rf_lslv[num_states][0][1],cov_rf_lslv[num_states][1][0],cov_rf_lslv[num_states][1][1]]]
B_rm_lslv[num_states] = [[mu_rm_force_lslv[num_states][0],mu_rm_motion_lslv[num_states][0]],[cov_rm_lslv[num_states][0][0],cov_rm_lslv[num_states][0][1],cov_rm_lslv[num_states][1][0],cov_rm_lslv[num_states][1][1]]]
B_sf_lslv[num_states] = [[mu_sf_force_lslv[num_states][0],mu_sf_motion_lslv[num_states][0]],[cov_sf_lslv[num_states][0][0],cov_sf_lslv[num_states][0][1],cov_sf_lslv[num_states][1][0],cov_sf_lslv[num_states][1][1]]]
B_sm_lslv[num_states] = [[mu_sm_force_lslv[num_states][0],mu_sm_motion_lslv[num_states][0]],[cov_sm_lslv[num_states][0][0],cov_sm_lslv[num_states][0][1],cov_sm_lslv[num_states][1][0],cov_sm_lslv[num_states][1][1]]]
print cov_sm_lslv[num_states][0][0],cov_sm_lslv[num_states][0][1],cov_sm_lslv[num_states][1][0],cov_sm_lslv[num_states][1][1]
print "----"
#print B_sm_lslv
#print mu_sm_motion_lslv
# generate RF, RM, SF, SM models from parameters
model_rf_lslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf_lslv, pi) # Will be Trained
model_rm_lslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm_lslv, pi) # Will be Trained
model_sf_lslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf_lslv, pi) # Will be Trained
model_sm_lslv = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm_lslv, pi) # Will be Trained
# For Training
total_seq_rf_force_lslv = np.matrix(np.column_stack((Fmat_hshv[0:201,0:15], Fmat_hslv[0:201,0:15], Fmat_lshv[0:201,0:15])))
total_seq_rm_force_lslv = np.matrix(np.column_stack((Fmat_hshv[0:201,15:30], Fmat_hslv[0:201,15:30], Fmat_lshv[0:201,15:30])))
total_seq_sf_force_lslv = np.matrix(np.column_stack((Fmat_hshv[0:201,30:45], Fmat_hslv[0:201,30:45], Fmat_lshv[0:201,30:45])))
total_seq_sm_force_lslv = np.matrix(np.column_stack((Fmat_hshv[0:201,45:60], Fmat_hslv[0:201,45:60], Fmat_lshv[0:201,45:60])))
total_seq_rf_motion_lslv = np.matrix(np.column_stack((Fmat_hshv[402:603,0:15], Fmat_hslv[402:603,0:15], Fmat_lshv[402:603,0:15])))
total_seq_rm_motion_lslv = np.matrix(np.column_stack((Fmat_hshv[402:603,15:30], Fmat_hslv[402:603,15:30], Fmat_lshv[402:603,15:30])))
total_seq_sf_motion_lslv = np.matrix(np.column_stack((Fmat_hshv[402:603,30:45], Fmat_hslv[402:603,30:45], Fmat_lshv[402:603,30:45])))
total_seq_sm_motion_lslv = np.matrix(np.column_stack((Fmat_hshv[402:603,45:60], Fmat_hslv[402:603,45:60], Fmat_lshv[402:603,45:60])))
total_seq_rf_lslv = np.zeros((402,45))
total_seq_rm_lslv = np.zeros((402,45))
total_seq_sf_lslv = np.zeros((402,45))
total_seq_sm_lslv = np.zeros((402,45))
i = 0
j = 0
while i < 402:
total_seq_rf_lslv[i] = total_seq_rf_force_lslv[j]
total_seq_rf_lslv[i+1] = total_seq_rf_motion_lslv[j]
total_seq_rm_lslv[i] = total_seq_rm_force_lslv[j]
total_seq_rm_lslv[i+1] = total_seq_rm_motion_lslv[j]
total_seq_sf_lslv[i] = total_seq_sf_force_lslv[j]
total_seq_sf_lslv[i+1] = total_seq_sf_motion_lslv[j]
total_seq_sm_lslv[i] = total_seq_sm_force_lslv[j]
total_seq_sm_lslv[i+1] = total_seq_sm_motion_lslv[j]
j=j+1
i=i+2
train_seq_rf_lslv = (np.array(total_seq_rf_lslv).T).tolist()
train_seq_rm_lslv = (np.array(total_seq_rm_lslv).T).tolist()
train_seq_sf_lslv = (np.array(total_seq_sf_lslv).T).tolist()
train_seq_sm_lslv = (np.array(total_seq_sm_lslv).T).tolist()
#print train_seq_rf_lslv
final_ts_rf_lslv = ghmm.SequenceSet(F,train_seq_rf_lslv)
final_ts_rm_lslv = ghmm.SequenceSet(F,train_seq_rm_lslv)
final_ts_sf_lslv = ghmm.SequenceSet(F,train_seq_sf_lslv)
final_ts_sm_lslv = ghmm.SequenceSet(F,train_seq_sm_lslv)
model_rf_lslv.baumWelch(final_ts_rf_lslv)
model_rm_lslv.baumWelch(final_ts_rm_lslv)
model_sf_lslv.baumWelch(final_ts_sf_lslv)
model_sm_lslv.baumWelch(final_ts_sm_lslv)
# For Testing
total_seq_obj_lslv = np.zeros((402,60))
total_seq_obj_force_lslv = Fmat_lslv[0:201,:]
total_seq_obj_motion_lslv = Fmat_lslv[402:603,:]
i = 0
j = 0
while i < 402:
total_seq_obj_lslv[i] = total_seq_obj_force_lslv[j]
total_seq_obj_lslv[i+1] = total_seq_obj_motion_lslv[j]
j=j+1
i=i+2
rf_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
rm_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
sf_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
sm_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
k = 0
while (k < np.size(total_seq_obj_lslv,1)):
test_seq_obj_lslv = (np.array(total_seq_obj_lslv[:,k]).T).tolist()
new_test_seq_obj_lslv = np.array(test_seq_obj_lslv)
#print new_test_seq_obj_lslv
ts_obj_lslv = new_test_seq_obj_lslv
#print np.shape(ts_obj_lslv)
# Find Viterbi Path
final_ts_obj_lslv = ghmm.EmissionSequence(F,ts_obj_lslv.tolist())
path_rf_obj_lslv = model_rf_lslv.viterbi(final_ts_obj_lslv)
path_rm_obj_lslv = model_rm_lslv.viterbi(final_ts_obj_lslv)
path_sf_obj_lslv = model_sf_lslv.viterbi(final_ts_obj_lslv)
path_sm_obj_lslv = model_sm_lslv.viterbi(final_ts_obj_lslv)
obj_lslv = max(path_rf_obj_lslv[1],path_rm_obj_lslv[1],path_sf_obj_lslv[1],path_sm_obj_lslv[1])
if obj_lslv == path_rf_obj_lslv[1]:
rf_lslv[0,k] = 1
elif obj_lslv == path_rm_obj_lslv[1]:
rm_lslv[0,k] = 1
elif obj_lslv == path_sf_obj_lslv[1]:
sf_lslv[0,k] = 1
else:
sm_lslv[0,k] = 1
k = k+1
#print rf_lslv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_lslv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_lslv[0,15:30])
cmat[0][2] = cmat[0][2] + np.sum(rf_lslv[0,30:45])
cmat[0][3] = cmat[0][3] + np.sum(rf_lslv[0,45:60])
cmat[1][0] = cmat[1][0] + np.sum(rm_lslv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_lslv[0,15:30])
cmat[1][2] = cmat[1][2] + np.sum(rm_lslv[0,30:45])
cmat[1][3] = cmat[1][3] + np.sum(rm_lslv[0,45:60])
cmat[2][0] = cmat[2][0] + np.sum(sf_lslv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_lslv[0,15:30])
cmat[2][2] = cmat[2][2] + np.sum(sf_lslv[0,30:45])
cmat[2][3] = cmat[2][3] + np.sum(sf_lslv[0,45:60])
cmat[3][0] = cmat[3][0] + np.sum(sm_lslv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_lslv[0,15:30])
cmat[3][2] = cmat[3][2] + np.sum(sm_lslv[0,30:45])
cmat[3][3] = cmat[3][3] + np.sum(sm_lslv[0,45:60])
#print cmat
############################################################################################################################################
# Plot Confusion Matrix
# Plot Confusion Matrix
Nlabels = 4
fig = pp.figure()
ax = fig.add_subplot(111)
figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels])
ax.set_title('Performance of HMM Models')
pp.xlabel("Targets")
pp.ylabel("Predictions")
ax.set_xticks([0.5,1.5,2.5,3.5])
ax.set_xticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
ax.set_yticks([3.5,2.5,1.5,0.5])
ax.set_yticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
figbar = fig.colorbar(figplot)
i = 0
while (i < 4):
j = 0
while (j < 4):
pp.text(j+0.5,3.5-i,cmat[i][j])
j = j+1
i = i+1
pp.savefig('results_force_motion_20_states.png')
pp.show()
|
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.callbacks import events
from neutron_lib.callbacks import priority_group
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants as lib_const
from neutron_lib.db import api as db_api
from neutron_lib import exceptions as lib_exc
from neutron_lib.plugins import constants as plugin_constants
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_log import log as logging
from neutron._i18n import _
from neutron.db import servicetype_db as st_db
from neutron.services import provider_configuration
from neutron.services import service_base
LOG = logging.getLogger(__name__)
@registry.has_registry_receivers
class DriverController(object):
"""Driver controller for the L3 service plugin.
This component is responsible for dispatching router requests to L3
service providers and for performing the bookkeeping about which
driver is associated with a given router.
This is not intended to be accessed by the drivers or the l3 plugin.
All of the methods are marked as private to reflect this.
"""
def __init__(self, l3_plugin):
self.l3_plugin = l3_plugin
self._stm = st_db.ServiceTypeManager.get_instance()
self._stm.add_provider_configuration(
plugin_constants.L3, _LegacyPlusProviderConfiguration())
self._load_drivers()
def _load_drivers(self):
self.drivers, self.default_provider = (
service_base.load_drivers(plugin_constants.L3, self.l3_plugin))
# store the provider name on each driver to make finding inverse easy
for provider_name, driver in self.drivers.items():
setattr(driver, 'name', provider_name)
@property
def _flavor_plugin(self):
if not hasattr(self, '_flavor_plugin_ref'):
self._flavor_plugin_ref = directory.get_plugin(
plugin_constants.FLAVORS)
return self._flavor_plugin_ref
@registry.receives(resources.ROUTER, [events.BEFORE_CREATE],
priority_group.PRIORITY_ROUTER_CONTROLLER)
def _check_router_request(self, resource, event, trigger, payload):
"""Validates that API request is sane (flags compat with flavor)."""
context = payload.context
router = payload.latest_state
drv = self._get_provider_for_create(context, router)
_ensure_driver_supports_request(drv, router)
@registry.receives(resources.ROUTER, [events.PRECOMMIT_CREATE],
priority_group.PRIORITY_ROUTER_CONTROLLER)
def _set_router_provider(self, resource, event, trigger, payload):
"""Associates a router with a service provider.
Association is done by flavor_id if it's specified, otherwise it will
fallback to determining which loaded driver supports the ha/distributed
attributes associated with the router.
"""
context = payload.context
router = payload.latest_state
router_db = payload.metadata['router_db']
router_id = payload.resource_id
if _flavor_specified(router):
router_db.flavor_id = router['flavor_id']
drv = self._get_provider_for_create(context, router)
self._stm.add_resource_association(context, plugin_constants.L3,
drv.name, router_id)
registry.publish(
resources.ROUTER_CONTROLLER, events.PRECOMMIT_ADD_ASSOCIATION,
trigger, payload=events.DBEventPayload(
context, request_body=router, states=(router_db,),
metadata={'old_driver': None, 'new_driver': drv},
resource_id=router_db.get('id')))
@registry.receives(resources.ROUTER, [events.PRECOMMIT_DELETE],
priority_group.PRIORITY_ROUTER_CONTROLLER)
def _clear_router_provider(self, resource, event, trigger, payload):
"""Remove the association between a router and a service provider."""
context = payload.context
router_id = payload.resource_id
drv = self.get_provider_for_router(context, router_id)
registry.publish(
resources.ROUTER_CONTROLLER, events.PRECOMMIT_DELETE_ASSOCIATIONS,
trigger, payload=events.DBEventPayload(
context,
metadata={'old_driver': drv, 'new_driver': None},
resource_id=router_id))
self._stm.del_resource_associations(context, [router_id])
@registry.receives(resources.ROUTER, [events.PRECOMMIT_UPDATE],
priority_group.PRIORITY_ROUTER_CONTROLLER)
def _update_router_provider(self, resource, event, trigger, payload=None):
"""Handle transition between providers.
The provider can currently be changed only by the caller updating
'ha' and/or 'distributed' attributes. If we allow updates of flavor_id
directly in the future those requests will also land here.
"""
drv = self.get_provider_for_router(payload.context,
payload.resource_id)
new_drv = None
if _flavor_specified(payload.request_body):
if (payload.request_body['flavor_id'] !=
payload.states[0]['flavor_id']):
# TODO(kevinbenton): this is currently disallowed by the API
# so we shouldn't hit it but this is a placeholder to add
# support later.
raise NotImplementedError()
# the following is to support updating the 'ha' and 'distributed'
# attributes via the API.
try:
_ensure_driver_supports_request(drv, payload.request_body)
except lib_exc.InvalidInput:
# the current driver does not support this request, we need to
# migrate to a new provider. populate the distributed and ha
# flags from the previous state if not in the update so we can
# determine the target provider appropriately.
# NOTE(kevinbenton): if the router is associated with a flavor
# we bail because changing the provider without changing
# the flavor will make things inconsistent. We can probably
# update the flavor automatically in the future.
if payload.states[0]['flavor_id']:
raise lib_exc.InvalidInput(error_message=_(
"Changing the 'ha' and 'distributed' attributes on a "
"router associated with a flavor is not supported"))
if 'distributed' not in payload.request_body:
payload.request_body['distributed'] = (payload.states[0]
['distributed'])
if 'ha' not in payload.request_body:
payload.request_body['ha'] = payload.states[0]['ha']
LOG.debug("Get a provider driver handle based on the ha flag: "
"%(ha_flag)s and distributed flag: %(distributed_flag)s",
{'ha_flag': payload.request_body['ha'],
'distributed_flag':
payload.request_body['distributed']})
new_drv = self._attrs_to_driver(payload.request_body)
if new_drv:
LOG.debug("Router %(id)s migrating from %(old)s provider to "
"%(new)s provider.", {'id': payload.resource_id,
'old': drv,
'new': new_drv})
_ensure_driver_supports_request(new_drv, payload.request_body)
# TODO(kevinbenton): notify old driver explicitly of driver change
with db_api.CONTEXT_WRITER.using(payload.context):
registry.publish(
resources.ROUTER_CONTROLLER,
events.PRECOMMIT_DELETE_ASSOCIATIONS,
trigger, payload=payload)
self._stm.del_resource_associations(
payload.context, [payload.resource_id])
self._stm.add_resource_association(
payload.context, plugin_constants.L3,
new_drv.name, payload.resource_id, expire_session=False)
registry.publish(
resources.ROUTER_CONTROLLER,
events.PRECOMMIT_ADD_ASSOCIATION,
trigger, payload=payload)
def get_provider_for_router(self, context, router_id):
"""Return the provider driver handle for a router id."""
driver_name = self._stm.get_provider_names_by_resource_ids(
context, [router_id]).get(router_id)
if not driver_name:
# this is an old router that hasn't been mapped to a provider
# yet so we do this now
router = self.l3_plugin.get_router(context, router_id)
driver = self._attrs_to_driver(router)
driver_name = driver.name
with db_api.CONTEXT_WRITER.using(context):
self._stm.add_resource_association(
context, plugin_constants.L3,
driver_name, router_id)
registry.publish(
resources.ROUTER_CONTROLLER,
events.PRECOMMIT_ADD_ASSOCIATION,
self, payload=events.DBEventPayload(
context, states=(router,),
metadata={'old_driver': None, 'new_driver': driver},
resource_id=router_id))
return self.drivers[driver_name]
def _get_provider_for_create(self, context, router):
"""Get provider based on flavor or ha/distributed flags."""
if not _flavor_specified(router):
return self._attrs_to_driver(router)
return self._get_l3_driver_by_flavor(context, router['flavor_id'])
def _get_l3_driver_by_flavor(self, context, flavor_id):
"""Get a provider driver handle for a given flavor_id."""
flavor = self._flavor_plugin.get_flavor(context, flavor_id)
provider = self._flavor_plugin.get_flavor_next_provider(
context, flavor['id'])[0]
# TODO(kevinbenton): the callback framework suppresses the nice errors
# these generate when they fail to lookup. carry them through
driver = self.drivers[provider['provider']]
return driver
def _attrs_to_driver(self, router):
"""Get a provider driver handle based on the ha/distributed flags."""
distributed = _is_distributed(
router.get('distributed', lib_const.ATTR_NOT_SPECIFIED))
ha = _is_ha(router.get('ha', lib_const.ATTR_NOT_SPECIFIED))
drivers = list(self.drivers.values())
# make sure default is tried before the rest if defined
if self.default_provider:
drivers.insert(0, self.drivers[self.default_provider])
for driver in drivers:
if _is_driver_compatible(distributed, ha, driver):
return driver
raise NotImplementedError(
_("Could not find a service provider that supports "
"distributed=%(d)s and ha=%(h)s") % {'d': distributed, 'h': ha}
)
def uses_scheduler(self, context, router_id):
"""Returns True if the integrated L3 scheduler should be used."""
return (self.get_provider_for_router(context, router_id).
use_integrated_agent_scheduler)
class _LegacyPlusProviderConfiguration(
provider_configuration.ProviderConfiguration):
def __init__(self):
# loads up ha, dvr, and single_node service providers automatically.
# If an operator has setup explicit values that conflict with these,
# the operator defined values will take priority.
super(_LegacyPlusProviderConfiguration, self).__init__(
svc_type=plugin_constants.L3)
for name, driver in (('dvrha', 'dvrha.DvrHaDriver'),
('dvr', 'dvr.DvrDriver'), ('ha', 'ha.HaDriver'),
('single_node', 'single_node.SingleNodeDriver')):
path = 'neutron.services.l3_router.service_providers.%s' % driver
try:
self.add_provider({'service_type': plugin_constants.L3,
'name': name, 'driver': path,
'default': False})
except lib_exc.Invalid:
LOG.debug("Could not add L3 provider '%s', it may have "
"already been explicitly defined.", name)
def _is_driver_compatible(distributed, ha, driver):
if not driver.distributed_support.is_compatible(distributed):
return False
if not driver.ha_support.is_compatible(ha):
return False
return True
def _is_distributed(distributed_attr):
if distributed_attr is False:
return False
if distributed_attr == lib_const.ATTR_NOT_SPECIFIED:
return cfg.CONF.router_distributed
return True
def _is_ha(ha_attr):
if ha_attr is False:
return False
if ha_attr == lib_const.ATTR_NOT_SPECIFIED:
return cfg.CONF.l3_ha
return True
def _flavor_specified(router):
return ('flavor_id' in router and
router['flavor_id'] != lib_const.ATTR_NOT_SPECIFIED)
def _ensure_driver_supports_request(drv, router_body):
r = router_body
for key, attr in (('distributed', 'distributed_support'),
('ha', 'ha_support')):
flag = r.get(key)
if flag not in [True, False]:
continue # not specified in body
if not getattr(drv, attr).is_compatible(flag):
raise lib_exc.InvalidInput(error_message=(
_("Provider %(name)s does not support %(key)s=%(flag)s")
% dict(name=drv.name, key=key, flag=flag)))
|
|
#!/usr/bin/python -B
'''
TDM HDB - File Hash Calc
By: TDM HDB Team
HDB Url: https://tdm.io/hdb
GitHub: https://github.com/TheDoxMedia/HDB-Hash-Calc
License: MIT
'''
import sys, time, json
from PySide import QtCore, QtGui
from gui.main_window import Ui_main_window
from gui.login_window import Ui_login_window
from core import hash, windowPosition, hdbConn
windowPos = windowPosition.windowPos
class mainWindow(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self)
self.ui = Ui_main_window()
self.ui.setupUi(self)
# Set window pos
windowPos(self, 'center')
# Other windows
self.loginWindow = loginWindow(self)
self.filename = None
self.val = {
'md5': '',
'sha1': '',
'sha256': '',
'sha512': ''
}
self.checkbox = {
'md5': False,
'sha1': False,
'sha256': False,
'sha512': False
}
# UI Connects
self.ui.select_btn.clicked.connect(self.selFile)
self.ui.clear_btn.clicked.connect(self.resetCalc)
self.ui.md5_check.clicked[bool].connect(self.toggleCheck)
self.ui.sha1_check.clicked[bool].connect(self.toggleCheck)
self.ui.sha256_check.clicked[bool].connect(self.toggleCheck)
self.ui.sha512_check.clicked[bool].connect(self.toggleCheck)
self.ui.user_btn.clicked.connect(self.openLoginWindow)
# Threading for Hasing
self.hashWorkerThread = {}
# User Vars
self.username = None
self.loggedIn = False
# Setup hdbConn
self.hdb = hdbConn.conn()
# Check for .hdbapp_settings
try:
hdbappSettings_raw = open('.hdbapp_settings', 'r')
self.hdbappSettings = json.load(hdbappSettings_raw)
if (self.hdbappSettings['hdb_loggedin']):
self.ui.user_btn.setText(self.hdbappSettings['username'])
# UPDATE User Vars
self.username = self.hdbappSettings['username']
self.loggedIn = True
except:
pass
def closeEvent(self, event):
sys.exit()
def selFile(self):
self.fileDialog = QtGui.QFileDialog(self)
self.updateSelFile(self.fileDialog.getOpenFileName())
def updateSelFile(self, val):
# Set vars
self.filename = str(val[0])
# Update UI
self.ui.file_txt.setText(self.filename)
self.workItems()
def updateAlgText(self, alg, val):
self.val[alg] = val
# compileObjName = str(alg+'_txt')
objToUpdate = getattr(self.ui, str(alg+'_txt'))
objToUpdate.setText(str(val))
def toggleCheck(self, status):
# Get source of toggle
source = self.sender()
alg = str(source.text()).lower()
# Update checkbox arr
self.checkbox[alg] = status
# Hash individual item
if (self.filename != None):
# Check to make sure the hash has not already been performed
if (self.val[alg] == ''):
# Hash
self.calcHash(alg, self.filename)
# IF unchecking a checkbox clear the value if its set
if (self.checkbox[alg] == False):
self.val[alg] = ''
objToUpdate = getattr(self.ui, str(alg+'_txt'))
objToUpdate.setText('')
def workItems(self):
for alg in self.checkbox:
# IF the checkbox is checked AND the hash val is empty
if (self.checkbox[alg] == True and (self.val[alg] == '')):
# Hash
self.calcHash(alg, self.filename)
def calcHash(self, alg, filename):
self.hashWorkerThread[alg] = hashWorker({
'filename': filename,
'alg': alg
})
self.connect(self.hashWorkerThread[alg], QtCore.SIGNAL('threadDone(QString, QString)'), self.updateAlgText, QtCore.Qt.DirectConnection)
self.hashWorkerThread[alg].start()
def resetCalc(self):
# Reset var vals
self.val = {
'md5': '',
'sha1': '',
'sha256': '',
'sha512': ''
}
# Reset checkbox vals
self.checkbox = {
'md5': False,
'sha1': False,
'sha256': False,
'sha512': False
}
# Clear file
self.filename = None
self.ui.file_txt.setText('')
# Clear alg txt
for alg in self.val:
objToReset = getattr(self.ui, str(alg+'_txt'))
objToReset.setText('')
# Clear checkboxes
for alg in self.checkbox:
objToReset = getattr(self.ui, str(alg+'_check'))
objToReset.setCheckState(QtCore.Qt.Unchecked)
# Other windows
def openLoginWindow(self):
if (self.loggedIn):
logoutQuestionBox = QtGui.QMessageBox.question(self, 'Logout?','Would you like to logout?', QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if (logoutQuestionBox == QtGui.QMessageBox.Yes):
resp = self.hdb.logout()
if (resp):
self.loggedIn = False
self.username = None
self.ui.user_btn.setText('User')
else:
self.loginWindow.show()
class loginWindow(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self)
self.ui = Ui_login_window()
self.ui.setupUi(self)
self.loggedInStatus = False
# Set screen pos
windowPos(self, 'center')
# UI Connects
self.ui.login_btn.clicked.connect(self.login)
def login(self):
if (not self.loggedInStatus):
# Disable gui elements
self.ui.username_txt.setReadOnly(True)
self.ui.appkey_txt.setReadOnly(True)
# Get values from window
username = self.ui.username_txt.text()
appKey = self.ui.appkey_txt.text()
# Update status text
self.ui.status_lbl.setText('Attempting login...')
self.loginWorkerThread = loginWorker({
'username': username,
'appKey': appKey
})
self.connect(self.loginWorkerThread, QtCore.SIGNAL('loginThreadDone(bool, QString)'), self.updateLoginStatusText, QtCore.Qt.DirectConnection)
self.loginWorkerThread.start()
def updateLoginStatusText(self, resp, txt):
self.ui.status_lbl.setText(txt)
if (not resp):
self.ui.username_txt.setReadOnly(False)
self.ui.appkey_txt.setReadOnly(False)
else:
self.loggedInStatus = True
self.ui.login_btn.setText('Logout')
class hashWorker(QtCore.QThread):
def __init__(self, arr):
QtCore.QThread.__init__(self)
self.filename = arr['filename']
self.alg = arr['alg']
def run(self):
hashVal = hash.process(self.alg, self.filename)
self.emit(QtCore.SIGNAL('threadDone(QString, QString)'), self.alg, hashVal)
class loginWorker(QtCore.QThread):
def __init__(self, arr):
QtCore.QThread.__init__(self)
self.username = arr['username']
self.appKey = arr['appKey']
# Setup hdbConn
self.hdb = hdbConn.conn()
resp = {}
def run(self):
resp = self.hdb.login(self.username, self.appKey)
self.emit(QtCore.SIGNAL('loginThreadDone(bool, QString)'), resp['success'], resp['status'])
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
hdb = mainWindow()
hdb.show()
sys.exit(app.exec_())
|
|
# pylint:disable=missing-class-docstring
class AngrError(Exception):
pass
class AngrValueError(AngrError, ValueError):
pass
class AngrLifterError(AngrError):
pass
class AngrExitError(AngrError):
pass
class AngrPathError(AngrError):
pass
class AngrVaultError(AngrError):
pass
class PathUnreachableError(AngrPathError):
pass
class SimulationManagerError(AngrError):
pass
class AngrInvalidArgumentError(AngrError):
pass
class AngrSurveyorError(AngrError):
pass
class AngrAnalysisError(AngrError):
pass
class AngrBladeError(AngrError):
pass
class AngrBladeSimProcError(AngrBladeError):
pass
class AngrAnnotatedCFGError(AngrError):
pass
class AngrBackwardSlicingError(AngrError):
pass
class AngrGirlScoutError(AngrError):
pass
class AngrCallableError(AngrSurveyorError):
pass
class AngrCallableMultistateError(AngrCallableError):
pass
class AngrSyscallError(AngrError):
pass
class AngrSimOSError(AngrError):
pass
class AngrAssemblyError(AngrError):
pass
# Congruency check failure
class AngrIncongruencyError(AngrAnalysisError):
pass
#
# ForwardAnalysis errors
#
class AngrForwardAnalysisError(AngrError):
pass
class AngrSkipJobNotice(AngrForwardAnalysisError):
pass
class AngrDelayJobNotice(AngrForwardAnalysisError):
pass
class AngrJobMergingFailureNotice(AngrForwardAnalysisError):
pass
class AngrJobWideningFailureNotice(AngrForwardAnalysisError):
pass
#
# CFG errors
#
class AngrCFGError(AngrError):
pass
#
# VFG Errors and notices
#
class AngrVFGError(AngrError):
pass
class AngrVFGRestartAnalysisNotice(AngrVFGError):
pass
#
# Data graph errors
#
class AngrDataGraphError(AngrAnalysisError):
# TODO: deprecated
pass
class AngrDDGError(AngrAnalysisError):
pass
#
# Loop analysis
#
class AngrLoopAnalysisError(AngrAnalysisError):
pass
#
# Exploration techniques
#
class AngrExplorationTechniqueError(AngrError):
pass
class AngrExplorerError(AngrExplorationTechniqueError):
pass
class AngrDirectorError(AngrExplorationTechniqueError):
pass
class AngrTracerError(AngrExplorationTechniqueError):
pass
#
# VariableRecovery errors
#
class AngrVariableRecoveryError(AngrAnalysisError):
pass
#
# AngrDB errors
#
class AngrDBError(AngrError):
pass
class AngrCorruptDBError(AngrDBError):
pass
class AngrIncompatibleDBError(AngrDBError):
pass
#
# Tracer
#
class TracerEnvironmentError(AngrError):
pass
#
# Simulation errors
#
class SimError(Exception):
bbl_addr = None
stmt_idx = None
ins_addr = None
executed_instruction_count = None
guard = None
def record_state(self, state):
self.bbl_addr = state.scratch.bbl_addr
self.stmt_idx = state.scratch.stmt_idx
self.ins_addr = state.scratch.ins_addr
self.executed_instruction_count = state.history.recent_instruction_count
self.guard = state.scratch.guard
return self
#
# State-related errors
#
class SimStateError(SimError):
pass
class SimMergeError(SimStateError):
pass
class SimMemoryError(SimStateError):
pass
class SimMemoryMissingError(SimMemoryError):
def __init__(self, missing_addr, missing_size, *args):
super().__init__(missing_addr, missing_size, *args)
self.missing_addr = missing_addr
self.missing_size = missing_size
class SimAbstractMemoryError(SimMemoryError):
pass
class SimRegionMapError(SimMemoryError):
pass
class SimMemoryLimitError(SimMemoryError):
pass
class SimMemoryAddressError(SimMemoryError):
pass
class SimFastMemoryError(SimMemoryError):
pass
class SimEventError(SimStateError):
pass
class SimPosixError(SimStateError):
pass
class SimFilesystemError(SimError):
pass
class SimSymbolicFilesystemError(SimFilesystemError):
pass
class SimFileError(SimMemoryError, SimFilesystemError):
pass
class SimHeapError(SimStateError):
pass
#
# Error class during VEX parsing
#
class SimUnsupportedError(SimError):
pass
#
# Solver-related errors
#
class SimSolverError(SimError):
pass
class SimSolverModeError(SimSolverError):
pass
class SimSolverOptionError(SimSolverError):
pass
class SimValueError(SimSolverError):
pass
class SimUnsatError(SimValueError):
pass
#
# SimIROp errors
#
class SimOperationError(SimError):
pass
class UnsupportedIROpError(SimOperationError, SimUnsupportedError):
pass
#
# SimIRExpr errors
#
class SimExpressionError(SimError):
pass
class UnsupportedIRExprError(SimExpressionError, SimUnsupportedError):
pass
class SimCCallError(SimExpressionError):
pass
class UnsupportedCCallError(SimCCallError, SimUnsupportedError):
pass
class SimUninitializedAccessError(SimExpressionError):
def __init__(self, expr_type, expr):
SimExpressionError.__init__(self)
self.expr_type = expr_type
self.expr = expr
def __repr__(self):
return "SimUninitializedAccessError (expr %s is used as %s)" % (self.expr, self.expr_type)
def __reduce__(self):
return (SimUninitializedAccessError, (self.expr_type, self.expr))
#
# SimIRStmt errors
#
class SimStatementError(SimError):
pass
class UnsupportedIRStmtError(SimStatementError, SimUnsupportedError):
pass
class UnsupportedDirtyError(UnsupportedIRStmtError, SimUnsupportedError):
pass
class SimMissingTempError(SimValueError, IndexError):
pass
#
# Engine-related errors
#
class SimEngineError(SimError):
pass
class SimIRSBError(SimEngineError):
pass
class SimTranslationError(SimEngineError):
pass
class SimProcedureError(SimEngineError):
pass
class SimProcedureArgumentError(SimProcedureError):
pass
class SimShadowStackError(SimProcedureError):
pass
class SimFastPathError(SimEngineError):
pass
class SimIRSBNoDecodeError(SimIRSBError):
pass
class AngrUnsupportedSyscallError(AngrSyscallError, SimProcedureError, SimUnsupportedError):
pass
UnsupportedSyscallError = AngrUnsupportedSyscallError
class SimReliftException(SimEngineError):
def __init__(self, state):
super(SimReliftException, self).__init__()
self.state = state
#
# SimSlicer errors
#
class SimSlicerError(SimError):
pass
#
# SimAction errors
#
class SimActionError(SimError):
pass
#
# SimCC errors
#
class SimCCError(SimError):
pass
#
# UCManager errors
#
class SimUCManagerError(SimError):
pass
class SimUCManagerAllocationError(SimUCManagerError):
pass
#
# SimUnicorn errors
#
class SimUnicornUnsupport(SimError):
pass
class SimUnicornError(SimError):
pass
class SimUnicornSymbolic(SimError):
pass
#
# Call-stack Errors
#
class SimEmptyCallStackError(SimError):
pass
#
# SimStateOptions Errors
#
class SimStateOptionsError(SimError):
pass
#
# Errors that may be handled by exception handling
#
class SimException(SimError):
pass
class SimSegfaultException(SimException, SimMemoryError):
def __init__(self, addr, reason, original_addr=None):
self.addr = addr
self.reason = reason
self.original_addr = original_addr
super(SimSegfaultError, self).__init__('%#x (%s)' % (addr, reason))
def __repr__(self):
return 'SimSegfaultException(%#x (%s%s)' % (
self.addr,
self.reason,
(', original %s' % self.original_addr.__repr__(max_depth=3)) if self.original_addr is not None else ''
)
def __reduce__(self):
return (SimSegfaultException, (self.addr, self.reason, self.original_addr))
SimSegfaultError = SimSegfaultException
class SimZeroDivisionException(SimException, SimOperationError):
pass
class AngrNoPluginError(AngrError):
pass
#
# Concrete Targets Execution errors
#
class SimConcreteMemoryError(AngrError):
pass
class SimConcreteRegisterError(AngrError):
pass
class SimConcreteBreakpointError(AngrError):
pass
#
# Decompiler errors
#
class UnsupportedNodeTypeError(AngrError, NotImplementedError):
pass
|
|
# Copyright (c) 2010 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swift.common.wsgi"""
from __future__ import with_statement
import errno
import logging
import mimetools
import socket
import unittest
import os
import pickle
from textwrap import dedent
from gzip import GzipFile
from StringIO import StringIO
from collections import defaultdict
from contextlib import closing
from urllib import quote
from eventlet import listen
import swift
from swift.common.swob import Request
from swift.common import wsgi, utils, ring
from test.unit import temptree
from mock import patch
def _fake_rings(tmpdir):
account_ring_path = os.path.join(tmpdir, 'account.ring.gz')
with closing(GzipFile(account_ring_path, 'wb')) as f:
pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': 6012},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': 6022}], 30),
f)
container_ring_path = os.path.join(tmpdir, 'container.ring.gz')
with closing(GzipFile(container_ring_path, 'wb')) as f:
pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': 6011},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': 6021}], 30),
f)
object_ring_path = os.path.join(tmpdir, 'object.ring.gz')
with closing(GzipFile(object_ring_path, 'wb')) as f:
pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': 6010},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': 6020}], 30),
f)
class TestWSGI(unittest.TestCase):
"""Tests for swift.common.wsgi"""
def setUp(self):
utils.HASH_PATH_PREFIX = 'startcap'
self._orig_parsetype = mimetools.Message.parsetype
def tearDown(self):
mimetools.Message.parsetype = self._orig_parsetype
def test_monkey_patch_mimetools(self):
sio = StringIO('blah')
self.assertEquals(mimetools.Message(sio).type, 'text/plain')
sio = StringIO('blah')
self.assertEquals(mimetools.Message(sio).plisttext, '')
sio = StringIO('blah')
self.assertEquals(mimetools.Message(sio).maintype, 'text')
sio = StringIO('blah')
self.assertEquals(mimetools.Message(sio).subtype, 'plain')
sio = StringIO('Content-Type: text/html; charset=ISO-8859-4')
self.assertEquals(mimetools.Message(sio).type, 'text/html')
sio = StringIO('Content-Type: text/html; charset=ISO-8859-4')
self.assertEquals(mimetools.Message(sio).plisttext,
'; charset=ISO-8859-4')
sio = StringIO('Content-Type: text/html; charset=ISO-8859-4')
self.assertEquals(mimetools.Message(sio).maintype, 'text')
sio = StringIO('Content-Type: text/html; charset=ISO-8859-4')
self.assertEquals(mimetools.Message(sio).subtype, 'html')
wsgi.monkey_patch_mimetools()
sio = StringIO('blah')
self.assertEquals(mimetools.Message(sio).type, None)
sio = StringIO('blah')
self.assertEquals(mimetools.Message(sio).plisttext, '')
sio = StringIO('blah')
self.assertEquals(mimetools.Message(sio).maintype, None)
sio = StringIO('blah')
self.assertEquals(mimetools.Message(sio).subtype, None)
sio = StringIO('Content-Type: text/html; charset=ISO-8859-4')
self.assertEquals(mimetools.Message(sio).type, 'text/html')
sio = StringIO('Content-Type: text/html; charset=ISO-8859-4')
self.assertEquals(mimetools.Message(sio).plisttext,
'; charset=ISO-8859-4')
sio = StringIO('Content-Type: text/html; charset=ISO-8859-4')
self.assertEquals(mimetools.Message(sio).maintype, 'text')
sio = StringIO('Content-Type: text/html; charset=ISO-8859-4')
self.assertEquals(mimetools.Message(sio).subtype, 'html')
def test_init_request_processor(self):
config = """
[DEFAULT]
swift_dir = TEMPDIR
[pipeline:main]
pipeline = catch_errors proxy-server
[app:proxy-server]
use = egg:swift#proxy
conn_timeout = 0.2
[filter:catch_errors]
use = egg:swift#catch_errors
"""
contents = dedent(config)
with temptree(['proxy-server.conf']) as t:
conf_file = os.path.join(t, 'proxy-server.conf')
with open(conf_file, 'w') as f:
f.write(contents.replace('TEMPDIR', t))
_fake_rings(t)
app, conf, logger, log_name = wsgi.init_request_processor(
conf_file, 'proxy-server')
# verify pipeline is catch_errors -> proxy-servery
expected = swift.common.middleware.catch_errors.CatchErrorMiddleware
self.assert_(isinstance(app, expected))
self.assert_(isinstance(app.app, swift.proxy.server.Application))
# config settings applied to app instance
self.assertEquals(0.2, app.app.conn_timeout)
# appconfig returns values from 'proxy-server' section
expected = {
'__file__': conf_file,
'here': os.path.dirname(conf_file),
'conn_timeout': '0.2',
'swift_dir': t,
}
self.assertEquals(expected, conf)
# logger works
logger.info('testing')
self.assertEquals('proxy-server', log_name)
def test_init_request_processor_from_conf_dir(self):
config_dir = {
'proxy-server.conf.d/pipeline.conf': """
[pipeline:main]
pipeline = catch_errors proxy-server
""",
'proxy-server.conf.d/app.conf': """
[app:proxy-server]
use = egg:swift#proxy
conn_timeout = 0.2
""",
'proxy-server.conf.d/catch-errors.conf': """
[filter:catch_errors]
use = egg:swift#catch_errors
"""
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as conf_root:
conf_dir = os.path.join(conf_root, 'proxy-server.conf.d')
with open(os.path.join(conf_dir, 'swift.conf'), 'w') as f:
f.write('[DEFAULT]\nswift_dir = %s' % conf_root)
_fake_rings(conf_root)
app, conf, logger, log_name = wsgi.init_request_processor(
conf_dir, 'proxy-server')
# verify pipeline is catch_errors -> proxy-servery
expected = swift.common.middleware.catch_errors.CatchErrorMiddleware
self.assert_(isinstance(app, expected))
self.assert_(isinstance(app.app, swift.proxy.server.Application))
# config settings applied to app instance
self.assertEquals(0.2, app.app.conn_timeout)
# appconfig returns values from 'proxy-server' section
expected = {
'__file__': conf_dir,
'here': conf_dir,
'conn_timeout': '0.2',
'swift_dir': conf_root,
}
self.assertEquals(expected, conf)
# logger works
logger.info('testing')
self.assertEquals('proxy-server', log_name)
def test_get_socket(self):
# stubs
conf = {}
ssl_conf = {
'cert_file': '',
'key_file': '',
}
# mocks
class MockSocket():
def __init__(self):
self.opts = defaultdict(dict)
def setsockopt(self, level, optname, value):
self.opts[level][optname] = value
def mock_listen(*args, **kwargs):
return MockSocket()
class MockSsl():
def __init__(self):
self.wrap_socket_called = []
def wrap_socket(self, sock, **kwargs):
self.wrap_socket_called.append(kwargs)
return sock
# patch
old_listen = wsgi.listen
old_ssl = wsgi.ssl
try:
wsgi.listen = mock_listen
wsgi.ssl = MockSsl()
# test
sock = wsgi.get_socket(conf)
# assert
self.assert_(isinstance(sock, MockSocket))
expected_socket_opts = {
socket.SOL_SOCKET: {
socket.SO_REUSEADDR: 1,
socket.SO_KEEPALIVE: 1,
},
}
if hasattr(socket, 'TCP_KEEPIDLE'):
expected_socket_opts[socket.IPPROTO_TCP] = {
socket.TCP_KEEPIDLE: 600,
}
self.assertEquals(sock.opts, expected_socket_opts)
# test ssl
sock = wsgi.get_socket(ssl_conf)
expected_kwargs = {
'certfile': '',
'keyfile': '',
}
self.assertEquals(wsgi.ssl.wrap_socket_called, [expected_kwargs])
finally:
wsgi.listen = old_listen
wsgi.ssl = old_ssl
def test_address_in_use(self):
# stubs
conf = {}
# mocks
def mock_listen(*args, **kwargs):
raise socket.error(errno.EADDRINUSE)
def value_error_listen(*args, **kwargs):
raise ValueError('fake')
def mock_sleep(*args):
pass
class MockTime():
"""Fast clock advances 10 seconds after every call to time
"""
def __init__(self):
self.current_time = old_time.time()
def time(self, *args, **kwargs):
rv = self.current_time
# advance for next call
self.current_time += 10
return rv
old_listen = wsgi.listen
old_sleep = wsgi.sleep
old_time = wsgi.time
try:
wsgi.listen = mock_listen
wsgi.sleep = mock_sleep
wsgi.time = MockTime()
# test error
self.assertRaises(Exception, wsgi.get_socket, conf)
# different error
wsgi.listen = value_error_listen
self.assertRaises(ValueError, wsgi.get_socket, conf)
finally:
wsgi.listen = old_listen
wsgi.sleep = old_sleep
wsgi.time = old_time
def test_run_server(self):
config = """
[DEFAULT]
eventlet_debug = yes
client_timeout = 30
max_clients = 1000
swift_dir = TEMPDIR
[pipeline:main]
pipeline = proxy-server
[app:proxy-server]
use = egg:swift#proxy
# while "set" values normally override default
set client_timeout = 20
# this section is not in conf during run_server
set max_clients = 10
"""
contents = dedent(config)
with temptree(['proxy-server.conf']) as t:
conf_file = os.path.join(t, 'proxy-server.conf')
with open(conf_file, 'w') as f:
f.write(contents.replace('TEMPDIR', t))
_fake_rings(t)
with patch('swift.common.wsgi.wsgi') as _wsgi:
with patch('swift.common.wsgi.eventlet') as _eventlet:
conf = wsgi.appconfig(conf_file)
logger = logging.getLogger('test')
sock = listen(('localhost', 0))
wsgi.run_server(conf, logger, sock)
self.assertEquals('HTTP/1.0',
_wsgi.HttpProtocol.default_request_version)
self.assertEquals(30, _wsgi.WRITE_TIMEOUT)
_eventlet.hubs.use_hub.assert_called_with(utils.get_hub())
_eventlet.patcher.monkey_patch.assert_called_with(all=False,
socket=True)
_eventlet.debug.hub_exceptions.assert_called_with(True)
_wsgi.server.assert_called()
args, kwargs = _wsgi.server.call_args
server_sock, server_app, server_logger = args
self.assertEquals(sock, server_sock)
self.assert_(isinstance(server_app, swift.proxy.server.Application))
self.assertEquals(20, server_app.client_timeout)
self.assert_(isinstance(server_logger, wsgi.NullLogger))
self.assert_('custom_pool' in kwargs)
self.assertEquals(1000, kwargs['custom_pool'].size)
def test_run_server_conf_dir(self):
config_dir = {
'proxy-server.conf.d/pipeline.conf': """
[pipeline:main]
pipeline = proxy-server
""",
'proxy-server.conf.d/app.conf': """
[app:proxy-server]
use = egg:swift#proxy
""",
'proxy-server.conf.d/default.conf': """
[DEFAULT]
eventlet_debug = yes
client_timeout = 30
"""
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as conf_root:
conf_dir = os.path.join(conf_root, 'proxy-server.conf.d')
with open(os.path.join(conf_dir, 'swift.conf'), 'w') as f:
f.write('[DEFAULT]\nswift_dir = %s' % conf_root)
_fake_rings(conf_root)
with patch('swift.common.wsgi.wsgi') as _wsgi:
with patch('swift.common.wsgi.eventlet') as _eventlet:
with patch.dict('os.environ', {'TZ': ''}):
conf = wsgi.appconfig(conf_dir)
logger = logging.getLogger('test')
sock = listen(('localhost', 0))
wsgi.run_server(conf, logger, sock)
self.assert_(os.environ['TZ'] is not '')
self.assertEquals('HTTP/1.0',
_wsgi.HttpProtocol.default_request_version)
self.assertEquals(30, _wsgi.WRITE_TIMEOUT)
_eventlet.hubs.use_hub.assert_called_with(utils.get_hub())
_eventlet.patcher.monkey_patch.assert_called_with(all=False,
socket=True)
_eventlet.debug.hub_exceptions.assert_called_with(True)
_wsgi.server.assert_called()
args, kwargs = _wsgi.server.call_args
server_sock, server_app, server_logger = args
self.assertEquals(sock, server_sock)
self.assert_(isinstance(server_app, swift.proxy.server.Application))
self.assert_(isinstance(server_logger, wsgi.NullLogger))
self.assert_('custom_pool' in kwargs)
def test_appconfig_dir_ignores_hidden_files(self):
config_dir = {
'server.conf.d/01.conf': """
[app:main]
use = egg:swift#proxy
port = 8080
""",
'server.conf.d/.01.conf.swp': """
[app:main]
use = egg:swift#proxy
port = 8081
""",
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = wsgi.appconfig(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'here': os.path.join(path, 'server.conf.d'),
'port': '8080',
}
self.assertEquals(conf, expected)
def test_pre_auth_wsgi_input(self):
oldenv = {}
newenv = wsgi.make_pre_authed_env(oldenv)
self.assertTrue('wsgi.input' in newenv)
self.assertEquals(newenv['wsgi.input'].read(), '')
oldenv = {'wsgi.input': StringIO('original wsgi.input')}
newenv = wsgi.make_pre_authed_env(oldenv)
self.assertTrue('wsgi.input' in newenv)
self.assertEquals(newenv['wsgi.input'].read(), '')
oldenv = {'swift.source': 'UT'}
newenv = wsgi.make_pre_authed_env(oldenv)
self.assertEquals(newenv['swift.source'], 'UT')
oldenv = {'swift.source': 'UT'}
newenv = wsgi.make_pre_authed_env(oldenv, swift_source='SA')
self.assertEquals(newenv['swift.source'], 'SA')
def test_pre_auth_req(self):
class FakeReq(object):
@classmethod
def fake_blank(cls, path, environ={}, body='', headers={}):
self.assertEquals(environ['swift.authorize']('test'), None)
self.assertFalse('HTTP_X_TRANS_ID' in environ)
was_blank = Request.blank
Request.blank = FakeReq.fake_blank
wsgi.make_pre_authed_request({'HTTP_X_TRANS_ID': '1234'},
'PUT', '/', body='tester', headers={})
wsgi.make_pre_authed_request({'HTTP_X_TRANS_ID': '1234'},
'PUT', '/', headers={})
Request.blank = was_blank
def test_pre_auth_req_with_quoted_path(self):
r = wsgi.make_pre_authed_request(
{'HTTP_X_TRANS_ID': '1234'}, 'PUT', path=quote('/a space'),
body='tester', headers={})
self.assertEquals(r.path, quote('/a space'))
def test_pre_auth_req_drops_query(self):
r = wsgi.make_pre_authed_request(
{'QUERY_STRING': 'original'}, 'GET', 'path')
self.assertEquals(r.query_string, 'original')
r = wsgi.make_pre_authed_request(
{'QUERY_STRING': 'original'}, 'GET', 'path?replacement')
self.assertEquals(r.query_string, 'replacement')
r = wsgi.make_pre_authed_request(
{'QUERY_STRING': 'original'}, 'GET', 'path?')
self.assertEquals(r.query_string, '')
def test_pre_auth_req_with_body(self):
r = wsgi.make_pre_authed_request(
{'QUERY_STRING': 'original'}, 'GET', 'path', 'the body')
self.assertEquals(r.body, 'the body')
def test_pre_auth_creates_script_name(self):
e = wsgi.make_pre_authed_env({})
self.assertTrue('SCRIPT_NAME' in e)
def test_pre_auth_copies_script_name(self):
e = wsgi.make_pre_authed_env({'SCRIPT_NAME': '/script_name'})
self.assertEquals(e['SCRIPT_NAME'], '/script_name')
def test_pre_auth_copies_script_name_unless_path_overridden(self):
e = wsgi.make_pre_authed_env({'SCRIPT_NAME': '/script_name'},
path='/override')
self.assertEquals(e['SCRIPT_NAME'], '')
self.assertEquals(e['PATH_INFO'], '/override')
def test_pre_auth_req_swift_source(self):
r = wsgi.make_pre_authed_request(
{'QUERY_STRING': 'original'}, 'GET', 'path', 'the body',
swift_source='UT')
self.assertEquals(r.body, 'the body')
self.assertEquals(r.environ['swift.source'], 'UT')
def test_pre_auth_req_with_empty_env_no_path(self):
r = wsgi.make_pre_authed_request(
{}, 'GET')
self.assertEquals(r.path, quote(''))
self.assertTrue('SCRIPT_NAME' in r.environ)
self.assertTrue('PATH_INFO' in r.environ)
def test_pre_auth_req_with_env_path(self):
r = wsgi.make_pre_authed_request(
{'PATH_INFO': '/unquoted path with %20'}, 'GET')
self.assertEquals(r.path, quote('/unquoted path with %20'))
self.assertEquals(r.environ['SCRIPT_NAME'], '')
def test_pre_auth_req_with_env_script(self):
r = wsgi.make_pre_authed_request({'SCRIPT_NAME': '/hello'}, 'GET')
self.assertEquals(r.path, quote('/hello'))
def test_pre_auth_req_with_env_path_and_script(self):
env = {'PATH_INFO': '/unquoted path with %20',
'SCRIPT_NAME': '/script'}
r = wsgi.make_pre_authed_request(env, 'GET')
expected_path = quote(env['SCRIPT_NAME'] + env['PATH_INFO'])
self.assertEquals(r.path, expected_path)
env = {'PATH_INFO': '', 'SCRIPT_NAME': '/script'}
r = wsgi.make_pre_authed_request(env, 'GET')
self.assertEquals(r.path, '/script')
env = {'PATH_INFO': '/path', 'SCRIPT_NAME': ''}
r = wsgi.make_pre_authed_request(env, 'GET')
self.assertEquals(r.path, '/path')
env = {'PATH_INFO': '', 'SCRIPT_NAME': ''}
r = wsgi.make_pre_authed_request(env, 'GET')
self.assertEquals(r.path, '')
def test_pre_auth_req_path_overrides_env(self):
env = {'PATH_INFO': '/path', 'SCRIPT_NAME': '/script'}
r = wsgi.make_pre_authed_request(env, 'GET', '/override')
self.assertEquals(r.path, '/override')
self.assertEquals(r.environ['SCRIPT_NAME'], '')
self.assertEquals(r.environ['PATH_INFO'], '/override')
class TestWSGIContext(unittest.TestCase):
def test_app_call(self):
statuses = ['200 Ok', '404 Not Found']
def app(env, start_response):
start_response(statuses.pop(0), [('Content-Length', '3')])
yield 'Ok\n'
wc = wsgi.WSGIContext(app)
r = Request.blank('/')
it = wc._app_call(r.environ)
self.assertEquals(wc._response_status, '200 Ok')
self.assertEquals(''.join(it), 'Ok\n')
r = Request.blank('/')
it = wc._app_call(r.environ)
self.assertEquals(wc._response_status, '404 Not Found')
self.assertEquals(''.join(it), 'Ok\n')
if __name__ == '__main__':
unittest.main()
|
|
from typing import Any, Dict, Optional, Union
from django.core.exceptions import ValidationError
from django.http import HttpRequest, HttpResponse
from django.shortcuts import render
from django.utils.translation import gettext as _
from django.views.decorators.http import require_safe
from confirmation.models import Confirmation, ConfirmationKeyException, get_object_from_key
from zerver.decorator import require_realm_admin, require_realm_owner
from zerver.forms import check_subdomain_available as check_subdomain
from zerver.lib.actions import (
do_deactivate_realm,
do_reactivate_realm,
do_set_realm_authentication_methods,
do_set_realm_message_editing,
do_set_realm_notifications_stream,
do_set_realm_property,
do_set_realm_signup_notifications_stream,
)
from zerver.lib.exceptions import OrganizationOwnerRequired
from zerver.lib.i18n import get_available_language_codes
from zerver.lib.request import REQ, JsonableError, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.retention import parse_message_retention_days
from zerver.lib.streams import access_stream_by_id
from zerver.lib.validator import (
check_bool,
check_capped_string,
check_dict,
check_int,
check_int_in,
check_string_or_int,
to_non_negative_int,
)
from zerver.models import Realm, UserProfile
@require_realm_admin
@has_request_variables
def update_realm(
request: HttpRequest,
user_profile: UserProfile,
name: Optional[str] = REQ(
str_validator=check_capped_string(Realm.MAX_REALM_NAME_LENGTH), default=None
),
description: Optional[str] = REQ(
str_validator=check_capped_string(Realm.MAX_REALM_DESCRIPTION_LENGTH), default=None
),
emails_restricted_to_domains: Optional[bool] = REQ(json_validator=check_bool, default=None),
disallow_disposable_email_addresses: Optional[bool] = REQ(
json_validator=check_bool, default=None
),
invite_required: Optional[bool] = REQ(json_validator=check_bool, default=None),
invite_to_realm_policy: Optional[int] = REQ(
json_validator=check_int_in(Realm.COMMON_POLICY_TYPES), default=None
),
name_changes_disabled: Optional[bool] = REQ(json_validator=check_bool, default=None),
email_changes_disabled: Optional[bool] = REQ(json_validator=check_bool, default=None),
avatar_changes_disabled: Optional[bool] = REQ(json_validator=check_bool, default=None),
inline_image_preview: Optional[bool] = REQ(json_validator=check_bool, default=None),
inline_url_embed_preview: Optional[bool] = REQ(json_validator=check_bool, default=None),
add_emoji_by_admins_only: Optional[bool] = REQ(json_validator=check_bool, default=None),
allow_message_deleting: Optional[bool] = REQ(json_validator=check_bool, default=None),
message_content_delete_limit_seconds: Optional[int] = REQ(
converter=to_non_negative_int, default=None
),
allow_message_editing: Optional[bool] = REQ(json_validator=check_bool, default=None),
edit_topic_policy: Optional[int] = REQ(
json_validator=check_int_in(Realm.COMMON_MESSAGE_POLICY_TYPES), default=None
),
mandatory_topics: Optional[bool] = REQ(json_validator=check_bool, default=None),
message_content_edit_limit_seconds: Optional[int] = REQ(
converter=to_non_negative_int, default=None
),
allow_edit_history: Optional[bool] = REQ(json_validator=check_bool, default=None),
default_language: Optional[str] = REQ(default=None),
waiting_period_threshold: Optional[int] = REQ(converter=to_non_negative_int, default=None),
authentication_methods: Optional[Dict[str, Any]] = REQ(
json_validator=check_dict([]), default=None
),
notifications_stream_id: Optional[int] = REQ(json_validator=check_int, default=None),
signup_notifications_stream_id: Optional[int] = REQ(json_validator=check_int, default=None),
message_retention_days_raw: Optional[Union[int, str]] = REQ(
"message_retention_days", json_validator=check_string_or_int, default=None
),
send_welcome_emails: Optional[bool] = REQ(json_validator=check_bool, default=None),
digest_emails_enabled: Optional[bool] = REQ(json_validator=check_bool, default=None),
message_content_allowed_in_email_notifications: Optional[bool] = REQ(
json_validator=check_bool, default=None
),
bot_creation_policy: Optional[int] = REQ(
json_validator=check_int_in(Realm.BOT_CREATION_POLICY_TYPES), default=None
),
create_stream_policy: Optional[int] = REQ(
json_validator=check_int_in(Realm.COMMON_POLICY_TYPES), default=None
),
invite_to_stream_policy: Optional[int] = REQ(
json_validator=check_int_in(Realm.COMMON_POLICY_TYPES), default=None
),
move_messages_between_streams_policy: Optional[int] = REQ(
json_validator=check_int_in(Realm.COMMON_POLICY_TYPES), default=None
),
user_group_edit_policy: Optional[int] = REQ(
json_validator=check_int_in(Realm.COMMON_POLICY_TYPES), default=None
),
private_message_policy: Optional[int] = REQ(
json_validator=check_int_in(Realm.PRIVATE_MESSAGE_POLICY_TYPES), default=None
),
wildcard_mention_policy: Optional[int] = REQ(
json_validator=check_int_in(Realm.WILDCARD_MENTION_POLICY_TYPES), default=None
),
email_address_visibility: Optional[int] = REQ(
json_validator=check_int_in(Realm.EMAIL_ADDRESS_VISIBILITY_TYPES), default=None
),
default_twenty_four_hour_time: Optional[bool] = REQ(json_validator=check_bool, default=None),
video_chat_provider: Optional[int] = REQ(json_validator=check_int, default=None),
giphy_rating: Optional[int] = REQ(json_validator=check_int, default=None),
default_code_block_language: Optional[str] = REQ(default=None),
digest_weekday: Optional[int] = REQ(
json_validator=check_int_in(Realm.DIGEST_WEEKDAY_VALUES), default=None
),
) -> HttpResponse:
realm = user_profile.realm
# Additional validation/error checking beyond types go here, so
# the entire request can succeed or fail atomically.
if default_language is not None and default_language not in get_available_language_codes():
raise JsonableError(_("Invalid language '{}'").format(default_language))
if authentication_methods is not None:
if not user_profile.is_realm_owner:
raise OrganizationOwnerRequired()
if True not in list(authentication_methods.values()):
raise JsonableError(_("At least one authentication method must be enabled."))
if video_chat_provider is not None and video_chat_provider not in {
p["id"] for p in Realm.VIDEO_CHAT_PROVIDERS.values()
}:
raise JsonableError(_("Invalid video_chat_provider {}").format(video_chat_provider))
if giphy_rating is not None and giphy_rating not in {
p["id"] for p in Realm.GIPHY_RATING_OPTIONS.values()
}:
raise JsonableError(_("Invalid giphy_rating {}").format(giphy_rating))
message_retention_days: Optional[int] = None
if message_retention_days_raw is not None:
if not user_profile.is_realm_owner:
raise OrganizationOwnerRequired()
realm.ensure_not_on_limited_plan()
message_retention_days = parse_message_retention_days(
message_retention_days_raw, Realm.MESSAGE_RETENTION_SPECIAL_VALUES_MAP
)
# The user of `locals()` here is a bit of a code smell, but it's
# restricted to the elements present in realm.property_types.
#
# TODO: It should be possible to deduplicate this function up
# further by some more advanced usage of the
# `REQ/has_request_variables` extraction.
req_vars = {k: v for k, v in list(locals().items()) if k in realm.property_types}
data: Dict[str, Any] = {}
for k, v in list(req_vars.items()):
if v is not None and getattr(realm, k) != v:
do_set_realm_property(realm, k, v, acting_user=user_profile)
if isinstance(v, str):
data[k] = "updated"
else:
data[k] = v
# The following realm properties do not fit the pattern above
# authentication_methods is not supported by the do_set_realm_property
# framework because of its bitfield.
if authentication_methods is not None and (
realm.authentication_methods_dict() != authentication_methods
):
do_set_realm_authentication_methods(realm, authentication_methods, acting_user=user_profile)
data["authentication_methods"] = authentication_methods
# The message_editing settings are coupled to each other, and thus don't fit
# into the do_set_realm_property framework.
if (
(allow_message_editing is not None and realm.allow_message_editing != allow_message_editing)
or (
message_content_edit_limit_seconds is not None
and realm.message_content_edit_limit_seconds != message_content_edit_limit_seconds
)
or (edit_topic_policy is not None and realm.edit_topic_policy != edit_topic_policy)
):
if allow_message_editing is None:
allow_message_editing = realm.allow_message_editing
if message_content_edit_limit_seconds is None:
message_content_edit_limit_seconds = realm.message_content_edit_limit_seconds
if edit_topic_policy is None:
edit_topic_policy = realm.edit_topic_policy
do_set_realm_message_editing(
realm,
allow_message_editing,
message_content_edit_limit_seconds,
edit_topic_policy,
acting_user=user_profile,
)
data["allow_message_editing"] = allow_message_editing
data["message_content_edit_limit_seconds"] = message_content_edit_limit_seconds
data["edit_topic_policy"] = edit_topic_policy
# Realm.notifications_stream and Realm.signup_notifications_stream are not boolean,
# str or integer field, and thus doesn't fit into the do_set_realm_property framework.
if notifications_stream_id is not None:
if realm.notifications_stream is None or (
realm.notifications_stream.id != notifications_stream_id
):
new_notifications_stream = None
if notifications_stream_id >= 0:
(new_notifications_stream, sub) = access_stream_by_id(
user_profile, notifications_stream_id
)
do_set_realm_notifications_stream(
realm, new_notifications_stream, notifications_stream_id, acting_user=user_profile
)
data["notifications_stream_id"] = notifications_stream_id
if signup_notifications_stream_id is not None:
if realm.signup_notifications_stream is None or (
realm.signup_notifications_stream.id != signup_notifications_stream_id
):
new_signup_notifications_stream = None
if signup_notifications_stream_id >= 0:
(new_signup_notifications_stream, sub) = access_stream_by_id(
user_profile, signup_notifications_stream_id
)
do_set_realm_signup_notifications_stream(
realm,
new_signup_notifications_stream,
signup_notifications_stream_id,
acting_user=user_profile,
)
data["signup_notifications_stream_id"] = signup_notifications_stream_id
if default_code_block_language is not None:
# Migrate '', used in the API to encode the default/None behavior of this feature.
if default_code_block_language == "":
data["default_code_block_language"] = None
else:
data["default_code_block_language"] = default_code_block_language
return json_success(data)
@require_realm_owner
@has_request_variables
def deactivate_realm(request: HttpRequest, user: UserProfile) -> HttpResponse:
realm = user.realm
do_deactivate_realm(realm, acting_user=user)
return json_success()
@require_safe
def check_subdomain_available(request: HttpRequest, subdomain: str) -> HttpResponse:
try:
check_subdomain(subdomain)
return json_success({"msg": "available"})
except ValidationError as e:
return json_success({"msg": e.message})
def realm_reactivation(request: HttpRequest, confirmation_key: str) -> HttpResponse:
try:
realm = get_object_from_key(confirmation_key, Confirmation.REALM_REACTIVATION)
except ConfirmationKeyException:
return render(request, "zerver/realm_reactivation_link_error.html")
do_reactivate_realm(realm)
context = {"realm": realm}
return render(request, "zerver/realm_reactivation.html", context)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==============================================================================
"""Lookup operations."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_lookup_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import compat
from tensorflow.python.util.deprecation import deprecated
# TODO(yleon): Remove this function.
@deprecated("2017-03-02", "Use `tf.tables_initializer` instead.")
def initialize_all_tables(name="init_all_tables"):
"""Returns an Op that initializes all tables of the default graph.
Args:
name: Optional name for the initialization op.
Returns:
An Op that initializes all tables. Note that if there are
not tables the returned Op is a NoOp.
"""
return tables_initializer(name)
def tables_initializer(name="init_all_tables"):
"""Returns an Op that initializes all tables of the default graph.
Args:
name: Optional name for the initialization op.
Returns:
An Op that initializes all tables. Note that if there are
not tables the returned Op is a NoOp.
"""
initializers = ops.get_collection(ops.GraphKeys.TABLE_INITIALIZERS)
if initializers:
return control_flow_ops.group(*initializers, name=name)
return control_flow_ops.no_op(name=name)
def _check_table_dtypes(table, key_dtype, value_dtype):
"""Check that the given key_dtype and value_dtype matches the table dtypes.
Args:
table: The table to check types against to.
key_dtype: The key data type to check.
value_dtype: The value data type to check.
Raises:
TypeError: when 'key_dtype' or 'value_dtype' doesn't match the table data
types.
"""
if key_dtype != table.key_dtype:
raise TypeError("Invalid key dtype, expected %s but got %s." %
(table.key_dtype, key_dtype))
if value_dtype != table.value_dtype:
raise TypeError("Invalid value dtype, expected %s but got %s." %
(table.value_dtype, value_dtype))
class LookupInterface(object):
"""Represent a lookup table that persists across different steps."""
def __init__(self, key_dtype, value_dtype, name):
"""Construct a lookup table interface.
Args:
key_dtype: The table key type.
value_dtype: The table value type.
name: A name for the operation (optional).
"""
self._key_dtype = dtypes.as_dtype(key_dtype)
self._value_dtype = dtypes.as_dtype(value_dtype)
self._name = name
@property
def key_dtype(self):
"""The table key dtype."""
return self._key_dtype
@property
def value_dtype(self):
"""The table value dtype."""
return self._value_dtype
@property
def name(self):
"""The name of the table."""
return self._name
@property
def init(self):
"""The table initialization op."""
raise NotImplementedError
def size(self, name=None):
"""Compute the number of elements in this table."""
raise NotImplementedError
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values."""
raise NotImplementedError
class InitializableLookupTableBase(LookupInterface):
"""Initializable lookup table interface.
An initializable lookup tables persist across different steps.
"""
def __init__(self, table_ref, default_value, initializer):
"""Construct a table object from a table reference.
If requires a table initializer object (subclass of `TableInitializerBase`).
It provides the table key and value types, as well as the op to initialize
the table. The caller is responsible to execute the initialization op.
Args:
table_ref: The table reference, i.e. the output of the lookup table ops.
default_value: The value to use if a key is missing in the table.
initializer: The table initializer to use.
"""
super(InitializableLookupTableBase,
self).__init__(initializer.key_dtype, initializer.value_dtype,
table_ref.op.name.split("/")[-1])
self._table_ref = table_ref
self._default_value = ops.convert_to_tensor(
default_value, dtype=self._value_dtype)
self._default_value.get_shape().merge_with(tensor_shape.scalar())
self._init = initializer.initialize(self)
@property
def table_ref(self):
"""Get the underlying table reference."""
return self._table_ref
@property
def default_value(self):
"""The default value of the table."""
return self._default_value
@property
def init(self):
"""The table initialization op."""
return self._init
def size(self, name=None):
"""Compute the number of elements in this table.
Args:
name: A name for the operation (optional).
Returns:
A scalar tensor containing the number of elements in this table.
"""
with ops.name_scope(name, "%s_Size" % self._name,
[self._table_ref]) as scope:
# pylint: disable=protected-access
return gen_lookup_ops._lookup_table_size_v2(self._table_ref, name=scope)
# pylint: enable=protected-access
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values.
The `default_value` is used for keys not present in the table.
Args:
keys: Keys to look up. May be either a `SparseTensor` or dense `Tensor`.
name: A name for the operation (optional).
Returns:
A `SparseTensor` if keys are sparse, otherwise a dense `Tensor`.
Raises:
TypeError: when `keys` or `default_value` doesn't match the table data
types.
"""
key_tensor = keys
if isinstance(keys, sparse_tensor.SparseTensor):
key_tensor = keys.values
if keys.dtype != self._key_dtype:
raise TypeError("Signature mismatch. Keys must be dtype %s, got %s." %
(self._key_dtype, keys.dtype))
with ops.name_scope(name, "%s_Lookup" % self._name,
(self._table_ref, key_tensor,
self._default_value)) as scope:
# pylint: disable=protected-access
values = gen_lookup_ops._lookup_table_find_v2(
self._table_ref, key_tensor, self._default_value, name=scope)
# pylint: enable=protected-access
values.set_shape(key_tensor.get_shape())
if isinstance(keys, sparse_tensor.SparseTensor):
return sparse_tensor.SparseTensor(keys.indices, values, keys.dense_shape)
else:
return values
class HashTable(InitializableLookupTableBase):
"""A generic hash table implementation.
Example usage:
```python
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values), -1)
out = table.lookup(input_tensor).
table.init.run()
print(out.eval())
```
"""
def __init__(self, initializer, default_value, shared_name=None, name=None):
"""Creates a non-initialized `HashTable` object.
Creates a table, the type of its keys and values are specified by the
initializer.
Before using the table you will have to initialize it. After initialization
the table will be immutable.
Args:
initializer: The table initializer to use. See `HashTable` kernel for
supported key and value types.
default_value: The value to use if a key is missing in the table.
shared_name: If non-empty, this table will be shared under
the given name across multiple sessions.
name: A name for the operation (optional).
Returns:
A `HashTable` object.
"""
with ops.name_scope(name, "hash_table", (initializer,
default_value)) as scope:
# pylint: disable=protected-access
table_ref = gen_lookup_ops._hash_table_v2(
shared_name=shared_name,
key_dtype=initializer.key_dtype,
value_dtype=initializer.value_dtype,
name=scope)
# pylint: enable=protected-access
super(HashTable, self).__init__(table_ref, default_value, initializer)
class TableInitializerBase(object):
"""Base class for lookup table initializers."""
def __init__(self, key_dtype, value_dtype):
"""Construct a table initializer object.
Args:
key_dtype: Type of the table keys.
value_dtype: Type of the table values.
"""
self._key_dtype = dtypes.as_dtype(key_dtype)
self._value_dtype = dtypes.as_dtype(value_dtype)
@property
def key_dtype(self):
"""The expected table key dtype."""
return self._key_dtype
@property
def value_dtype(self):
"""The expected table value dtype."""
return self._value_dtype
def initialize(self, table):
"""Returns the table initialization op."""
raise NotImplementedError
class KeyValueTensorInitializer(TableInitializerBase):
"""Table initializers given `keys` and `values` tensors."""
def __init__(self, keys, values, key_dtype=None, value_dtype=None, name=None):
"""Constructs a table initializer object based on keys and values tensors.
Args:
keys: The tensor for the keys.
values: The tensor for the values.
key_dtype: The `keys` data type. Used when `keys` is a python array.
value_dtype: The `values` data type. Used when `values` is a python array.
name: A name for the operation (optional).
"""
with ops.name_scope(name, "key_value_init", [keys, values]) as scope:
self._keys = ops.convert_to_tensor(keys, dtype=key_dtype, name="keys")
self._values = ops.convert_to_tensor(
values, dtype=value_dtype, name="values")
self._name = scope
super(KeyValueTensorInitializer, self).__init__(self._keys.dtype,
self._values.dtype)
def initialize(self, table):
"""Initializes the given `table` with `keys` and `values` tensors.
Args:
table: The table to initialize.
Returns:
The operation that initializes the table.
Raises:
TypeError: when the keys and values data types do not match the table
key and value data types.
"""
_check_table_dtypes(table, self._keys.dtype, self._values.dtype)
with ops.name_scope(
self._name, values=(table.table_ref, self._keys,
self._values)) as scope:
# pylint: disable=protected-access
init_op = gen_lookup_ops._initialize_table_v2(
table.table_ref, self._keys, self._values, name=scope)
# pylint: enable=protected-access
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
return init_op
class TextFileIndex(object):
WHOLE_LINE = -2
LINE_NUMBER = -1
class TextFileInitializer(TableInitializerBase):
"""Table initializers from a text file.
This initializer assigns one entry in the table for each line in the file.
The key and value type of the table to initialize is given by `key_dtype` and
`value_dtype`.
The key and value content to get from each line is specified by
the `key_index` and `value_index`.
* `TextFileIndex.LINE_NUMBER` means use the line number starting from zero,
expects data type int64.
* `TextFileIndex.WHOLE_LINE` means use the whole line content, expects data
type string.
* A value `>=0` means use the index (starting at zero) of the split line based
on `delimiter`.
For example if we have a file with the following content:
```
emerson 10
lake 20
palmer 30
```
The following snippet initializes a table with the first column as keys and
second column as values:
* `emerson -> 10`
* `lake -> 20`
* `palmer -> 30`
```python
table = tf.contrib.lookup.HashTable(tf.contrib.lookup.TextFileInitializer(
"test.txt", tf.string, 0, tf.int64, 1, delimiter=" "), -1)
...
table.init.run()
```
Similarly to initialize the whole line as keys and the line number as values.
* `emerson 10 -> 0`
* `lake 20 -> 1`
* `palmer 30 -> 2`
```python
table = tf.contrib.lookup.HashTable(tf.contrib.lookup.TextFileInitializer(
"test.txt", tf.string, tf.contrib.lookup.TextFileIndex.WHOLE_LINE,
tf.int64, tf.contrib.lookup.TextFileIndex.LINE_NUMBER, delimiter=" "), -1)
...
table.init.run()
```
"""
def __init__(self,
filename,
key_dtype,
key_index,
value_dtype,
value_index,
vocab_size=None,
delimiter="\t",
name=None):
"""Constructs a table initializer object to populate from a text file.
It generates one key-value pair per line. The type of table key and
value are specified by `key_dtype` and `value_dtype`, respectively.
Similarly the content of the key and value are specified by the key_index
and value_index.
- TextFileIndex.LINE_NUMBER means use the line number starting from zero,
expects data type int64.
- TextFileIndex.WHOLE_LINE means use the whole line content, expects data
type string.
- A value >=0 means use the index (starting at zero) of the split line based
on `delimiter`.
Args:
filename: The filename of the text file to be used for initialization.
The path must be accessible from wherever the graph is initialized
(eg. trainer or eval workers). The filename may be a scalar `Tensor`.
key_dtype: The `key` data type.
key_index: the index that represents information of a line to get the
table 'key' values from.
value_dtype: The `value` data type.
value_index: the index that represents information of a line to get the
table 'value' values from.'
vocab_size: The number of elements in the file, if known.
delimiter: The delimiter to separate fields in a line.
name: A name for the operation (optional).
Raises:
ValueError: when the filename is empty, or when the table key and value
data types do not match the expected data types.
"""
if not isinstance(filename, ops.Tensor) and not filename:
raise ValueError("Filename required for %s." % name)
key_dtype = dtypes.as_dtype(key_dtype)
value_dtype = dtypes.as_dtype(value_dtype)
if key_index < -2:
raise ValueError("Invalid key index %s." % (key_index))
if key_index == TextFileIndex.LINE_NUMBER and key_dtype != dtypes.int64:
raise ValueError("Signature mismatch. Keys must be dtype %s, got %s." %
(dtypes.int64, key_dtype))
if ((key_index == TextFileIndex.WHOLE_LINE) and
(not key_dtype.is_integer) and (key_dtype != dtypes.string)):
raise ValueError(
"Signature mismatch. Keys must be integer or string, got %s." %
key_dtype)
if value_index < -2:
raise ValueError("Invalid value index %s." % (value_index))
if value_index == TextFileIndex.LINE_NUMBER and value_dtype != dtypes.int64:
raise ValueError("Signature mismatch. Values must be dtype %s, got %s." %
(dtypes.int64, value_dtype))
if value_index == TextFileIndex.WHOLE_LINE and value_dtype != dtypes.string:
raise ValueError("Signature mismatch. Values must be dtype %s, got %s." %
(dtypes.string, value_dtype))
if (vocab_size is not None) and (vocab_size <= 0):
raise ValueError("Invalid vocab_size %s." % vocab_size)
self._filename = filename
self._key_index = key_index
self._value_index = value_index
self._vocab_size = vocab_size
self._delimiter = delimiter
self._name = name
super(TextFileInitializer, self).__init__(key_dtype, value_dtype)
def initialize(self, table):
"""Initializes the table from a text file.
Args:
table: The table to be initialized.
Returns:
The operation that initializes the table.
Raises:
TypeError: when the keys and values data types do not match the table
key and value data types.
"""
_check_table_dtypes(table, self.key_dtype, self.value_dtype)
with ops.name_scope(self._name, "text_file_init",
(table.table_ref,)) as scope:
filename = ops.convert_to_tensor(
self._filename, dtypes.string, name="asset_filepath")
# pylint: disable=protected-access
init_op = gen_lookup_ops._initialize_table_from_text_file_v2(
table.table_ref,
filename,
self._key_index,
self._value_index,
-1 if self._vocab_size is None else self._vocab_size,
self._delimiter,
name=scope)
# pylint: enable=protected-access
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
# If the filename tensor is anything other than a string constant (e.g., if
# it is a placeholder) then it does not make sense to track it as an asset.
if constant_op.is_constant(filename):
ops.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, filename)
return init_op
class TextFileStringTableInitializer(TextFileInitializer):
"""Table initializer for `int64` IDs to string tables from a text file."""
def __init__(self,
filename,
key_column_index=TextFileIndex.LINE_NUMBER,
value_column_index=TextFileIndex.WHOLE_LINE,
vocab_size=None,
delimiter="\t",
name="text_file_string_table_init"):
"""Constructs an initializer for an id-to-string table from a text file.
It populates a table that its key and value types are int64 and string,
respectively. It generates one key-value pair per line.
The content of the key and value are specified by `key_column_index`
and `value_column_index`.
- TextFileIndex.LINE_NUMBER means use the line number starting from zero,
expects data type int64.
- TextFileIndex.WHOLE_LINE means use the whole line content, expects data
type string.
- A value >=0 means use the index (starting at zero) of the split line based
on `delimiter`.
Args:
filename: The filename of the text file to be used for initialization.
The path must be accessible from wherever the graph is initialized
(eg. trainer or eval workers). The filename may be a scalar `Tensor`.
key_column_index: The column index from the text file to get the keys
from. The default is 0 that represents the whole line content.
value_column_index: The column index from the text file to get the
values from. The default is to use the line number, starting from zero.
vocab_size: The number of elements in the file, if known.
delimiter: The delimiter to separate fields in a line.
name: Optional name for the op.
Raises:
TypeError: when the filename is empty, or when the table key and value
data types do not match the expected data types.
"""
super(TextFileStringTableInitializer, self).__init__(
filename,
dtypes.int64,
key_column_index,
dtypes.string,
value_column_index,
vocab_size=vocab_size,
delimiter=delimiter,
name=name)
class TextFileIdTableInitializer(TextFileInitializer):
"""Table initializer for string to `int64` IDs tables from a text file."""
def __init__(self,
filename,
key_column_index=TextFileIndex.WHOLE_LINE,
value_column_index=TextFileIndex.LINE_NUMBER,
vocab_size=None,
delimiter="\t",
name="text_file_id_table_init",
key_dtype=dtypes.string):
"""Constructs an initializer for an string-to-id table from a text file.
It populates a table that its key and value types are string and int64,
respectively. It generates one key-value pair per line.
The content of the key and value are specified by the key_index
and value_index.
- TextFileIndex.LINE_NUMBER means use the line number starting from zero,
expects data type int64.
- TextFileIndex.WHOLE_LINE means use the whole line content, expects data
type string.
- A value >=0 means use the index (starting at zero) of the split line based
on `delimiter`.
Args:
filename: The filename of the text file to be used for initialization.
The path must be accessible from wherever the graph is initialized
(eg. trainer or eval workers). The filename may be a scalar `Tensor`.
key_column_index: The column index from the text file to get the `key`
values from. The default is to use the line number, starting from zero.
value_column_index: The column index from the text file ro get the `value`
values from. The default is 0 that represents the whole line content.
vocab_size: The number of elements in the file, if known.
delimiter: The delimiter to separate fields in a line.
name: Optional name for the op.
key_dtype: The `key` data type.
Raises:
TypeError: when the filename is empty, or when the table key and value
data types do not match the expected data types.
"""
super(TextFileIdTableInitializer, self).__init__(
filename,
key_dtype,
key_column_index,
dtypes.int64,
value_column_index,
vocab_size=vocab_size,
delimiter=delimiter,
name=name)
class HasherSpec(collections.namedtuple("HasherSpec", ["hasher", "key"])):
"""A structure for the spec of the hashing function to use for hash buckets.
`hasher` is the name of the hashing function to use (eg. "fasthash",
"stronghash").
`key` is optional and specify the key to use for the hash function if
supported, currently only used by a strong hash.
Fields:
hasher: The hasher name to use.
key: The key to be used by the hashing function, if required.
"""
__slots__ = ()
FastHashSpec = HasherSpec("fasthash", None) # pylint: disable=invalid-name
class StrongHashSpec(HasherSpec):
"""A structure to specify a key of the strong keyed hash spec.
The strong hash requires a `key`, which is a list of 2 unsigned integer
numbers. These should be non-zero; random numbers generated from random.org
would be a fine choice.
Fields:
key: The key to be used by the keyed hashing function.
"""
__slots__ = ()
def __new__(cls, key):
if len(key) != 2:
raise ValueError("key must have size 2, got %s." % len(key))
if not isinstance(key[0], compat.integral_types) or not isinstance(
key[1], compat.integral_types):
raise TypeError("Invalid key %s. Must be unsigned integer values." % key)
return super(cls, StrongHashSpec).__new__(cls, "stronghash", key)
def _as_string(tensor):
if dtypes.string == tensor.dtype.base_dtype:
return tensor
return string_ops.as_string(tensor)
class IdTableWithHashBuckets(LookupInterface):
"""String to Id table wrapper that assigns out-of-vocabulary keys to buckets.
For example, if an instance of `IdTableWithHashBuckets` is initialized with a
string-to-id table that maps:
- emerson -> 0
- lake -> 1
- palmer -> 2
The `IdTableWithHashBuckets` object will performs the following mapping:
- emerson -> 0
- lake -> 1
- palmer -> 2
- <other term> -> bucket id between 3 and 3 + num_oov_buckets - 1, calculated
by: hash(<term>) % num_oov_buckets + vocab_size
If input_tensor is ["emerson", "lake", "palmer", "king", "crimson"],
the lookup result is [0, 1, 2, 4, 7]
If `table` is None, only out-of-vocabulary buckets are used.
Example usage:
```python
num_oov_buckets = 3
input_tensor = tf.constant(["emerson", "lake", "palmer", "king", "crimnson"])
table = tf.IdTableWithHashBuckets(
tf.HashTable(tf.TextFileIdTableInitializer(filename), default_value),
num_oov_buckets)
out = table.lookup(input_tensor).
table.init.run()
print(out.eval())
```
The hash function used for generating out-of-vocabulary buckets ID is handled
by `hasher_spec`.
"""
def __init__(self,
table,
num_oov_buckets,
hasher_spec=FastHashSpec,
name=None,
key_dtype=None):
"""Construct a `IdTableWithHashBuckets` object.
Args:
table: Table that maps `tf.string` or `tf.int64` keys to `tf.int64` ids.
num_oov_buckets: Number of buckets to use for out-of-vocabulary keys.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignation of out-of-vocabulary buckets (optional).
name: A name for the operation (optional).
key_dtype: Data type of keys passed to `lookup`. Defaults to
`table.key_dtype` if `table` is specified, otherwise `tf.string`.
Must be string or integer, and must be castable to `table.key_dtype`.
Raises:
ValueError: when `table` in None and `num_oov_buckets` is not positive.
TypeError: when `hasher_spec` is invalid.
"""
# If a name ends with a '/' it is a "name scope", remove all trailing '/'
# characters to use as table name.
if name:
name = name.rstrip("/")
if table:
if key_dtype is None:
key_dtype = table.key_dtype
supported_table_key_dtypes = (dtypes.int64, dtypes.string)
if table.key_dtype not in supported_table_key_dtypes:
raise TypeError("Invalid key dtype, expected one of %s, but got %s." %
(supported_table_key_dtypes, key_dtype))
if table.key_dtype.is_integer != key_dtype.is_integer:
raise TypeError("Invalid key dtype, expected %s but got %s." %
("integer" if key_dtype.is_integer else "non-integer",
table.key_dtype))
if table.value_dtype != dtypes.int64:
raise TypeError("Invalid value dtype, expected %s but got %s." %
(dtypes.int64, table.value_dtype))
self._table = table
name = name or self._table.name
else:
if num_oov_buckets <= 0:
raise ValueError("oov_buckets must be > 0 if no table is supplied.")
key_dtype = dtypes.string if key_dtype is None else key_dtype
self._table = None
name = name or "hash_bucket"
if (not key_dtype.is_integer) and (dtypes.string != key_dtype):
raise TypeError(
"Invalid key_dtype, expected integer or string, got %s." % key_dtype)
self._num_oov_buckets = num_oov_buckets
if not isinstance(hasher_spec, HasherSpec):
raise TypeError(
"hasher_spec must be of type HasherSpec, got %s" % hasher_spec)
self._hasher_spec = hasher_spec
super(IdTableWithHashBuckets, self).__init__(key_dtype, dtypes.int64,
name.split("/")[-1])
@property
def init(self):
"""The table initialization op."""
if self._table:
return self._table.init
with ops.name_scope(None, "init"):
return control_flow_ops.no_op()
def size(self, name=None):
"""Compute the number of elements in this table."""
with ops.name_scope(name, "%s_Size" % self.name) as scope:
if self._table:
tsize = self._table.size(scope)
else:
tsize = ops.convert_to_tensor(0, dtype=dtypes.int64)
return tsize + self._num_oov_buckets
def _get_string_to_hash_bucket_fn(self, hasher_spec):
"""Returns the string_to_hash_bucket op to use based on `hasher_spec`."""
if not isinstance(hasher_spec, HasherSpec):
raise TypeError("hasher_spec must be of type HasherSpec %s" % hasher_spec)
if hasher_spec.hasher == "fasthash":
return string_ops.string_to_hash_bucket_fast
if hasher_spec.hasher == "legacy":
return string_ops.string_to_hash_bucket
if hasher_spec.hasher == "stronghash":
return functools.partial(
string_ops.string_to_hash_bucket_strong, key=hasher_spec.key)
raise ValueError("Unknown hasher %s" % hasher_spec.hasher)
def lookup(self, keys, name=None):
"""Looks up `keys` in the table, outputs the corresponding values.
It assigns out-of-vocabulary keys to buckets based in their hashes.
Args:
keys: Keys to look up. May be either a `SparseTensor` or dense `Tensor`.
name: Optional name for the op.
Returns:
A `SparseTensor` if keys are sparse, otherwise a dense `Tensor`.
Raises:
TypeError: when `keys` doesn't match the table key data type.
"""
if keys.dtype != self._key_dtype:
raise TypeError("Signature mismatch. Keys must be dtype %s, got %s." %
(self._key_dtype, keys.dtype))
values = keys
if isinstance(keys, sparse_tensor.SparseTensor):
values = keys.values
if self._table and (self._table.key_dtype.base_dtype == dtypes.int64):
values = math_ops.to_int64(values)
if self._num_oov_buckets == 0:
ids = self._table.lookup(values, name=name)
else:
# TODO(yleon): Consider moving this functionality to its own kernel.
with ops.name_scope(name, "%s_Lookup" % self.name) as scope:
str_to_hash_bucket = self._get_string_to_hash_bucket_fn(
self._hasher_spec)
buckets = str_to_hash_bucket(
_as_string(values),
num_buckets=self._num_oov_buckets,
name="hash_bucket")
if self._table:
ids = self._table.lookup(values)
buckets = math_ops.add(buckets, self._table.size())
is_id_non_default = math_ops.not_equal(ids, self._table.default_value)
ids = array_ops.where(is_id_non_default, ids, buckets, name=scope)
else:
ids = buckets
if isinstance(keys, sparse_tensor.SparseTensor):
return sparse_tensor.SparseTensor(keys.indices, ids, keys.dense_shape)
return ids
def index_table_from_file(vocabulary_file=None,
num_oov_buckets=0,
vocab_size=None,
default_value=-1,
hasher_spec=FastHashSpec,
key_dtype=dtypes.string,
name=None):
"""Returns a lookup table that converts a string tensor into int64 IDs.
This operation constructs a lookup table to convert tensor of strings into
int64 IDs. The mapping can be initialized from a vocabulary file specified in
`vocabulary_file`, where the whole line is the key and the zero-based line
number is the ID.
Any lookup of an out-of-vocabulary token will return a bucket ID based on its
hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the
`default_value`.
The bucket ID range is
`[vocabulary size, vocabulary size + num_oov_buckets - 1]`.
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Sample Usages:
If we have a vocabulary file "test.txt" with the following content:
```
emerson
lake
palmer
```
```python
features = tf.constant(["emerson", "lake", "and", "palmer"])
table = tf.contrib.lookup.index_table_from_file(
vocabulary_file="test.txt", num_oov_buckets=1)
ids = table.lookup(features)
...
tf.tables_initializer().run()
ids.eval() ==> [0, 1, 3, 2] # where 3 is the out-of-vocabulary bucket
```
Args:
vocabulary_file: The vocabulary filename, may be a constant scalar `Tensor`.
num_oov_buckets: The number of out-of-vocabulary buckets.
vocab_size: Number of the elements in the vocabulary, if known.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignation of out-of-vocabulary buckets.
key_dtype: The `key` data type.
name: A name for this op (optional).
Returns:
The lookup table to map a `key_dtype` `Tensor` to index `int64` `Tensor`.
Raises:
ValueError: If `vocabulary_file` is not set.
ValueError: If `num_oov_buckets` is negative or `vocab_size` is not greater
than zero.
"""
if vocabulary_file is None or (
isinstance(vocabulary_file, str) and not vocabulary_file):
raise ValueError("vocabulary_file must be specified and must not be empty.")
if num_oov_buckets < 0:
raise ValueError("num_oov_buckets must be greater or equal than 0, got %d."
% num_oov_buckets)
if vocab_size is not None and vocab_size < 1:
raise ValueError("vocab_size must be greater than 0, got %d." % vocab_size)
if (not key_dtype.is_integer) and (dtypes.string != key_dtype.base_dtype):
raise TypeError("Only integer and string keys are supported.")
with ops.name_scope(name, "string_to_index") as feat_to_id_scope:
table = None
shared_name = ""
with ops.name_scope(None, "hash_table") as hash_table_scope:
if vocab_size:
# Keep the shared_name:
# <table_type>_<filename>_<vocab_size>_<key_index>_<value_index>
shared_name = "hash_table_%s_%d_%s_%s" % (vocabulary_file, vocab_size,
TextFileIndex.WHOLE_LINE,
TextFileIndex.LINE_NUMBER)
else:
# Keep the shared_name
# <table_type>_<filename>_<key_index>_<value_index>
shared_name = "hash_table_%s_%s_%s" % (vocabulary_file,
TextFileIndex.WHOLE_LINE,
TextFileIndex.LINE_NUMBER)
init = TextFileIdTableInitializer(
vocabulary_file,
vocab_size=vocab_size,
key_dtype=dtypes.int64 if key_dtype.is_integer else key_dtype,
name="table_init")
table = HashTable(
init, default_value, shared_name=shared_name, name=hash_table_scope)
if num_oov_buckets:
table = IdTableWithHashBuckets(
table,
num_oov_buckets=num_oov_buckets,
hasher_spec=hasher_spec,
name=feat_to_id_scope,
key_dtype=key_dtype)
return table
def index_table_from_tensor(vocabulary_list,
num_oov_buckets=0,
default_value=-1,
hasher_spec=FastHashSpec,
dtype=dtypes.string,
name=None):
"""Returns a lookup table that converts a string tensor into int64 IDs.
This operation constructs a lookup table to convert tensor of strings into
int64 IDs. The mapping can be initialized from a string `vocabulary_list` 1-D
tensor where each element is a key and corresponding index within the tensor
is the value.
Any lookup of an out-of-vocabulary token will return a bucket ID based on its
hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the
`default_value`.
The bucket ID range is `[mapping size, mapping size + num_oov_buckets - 1]`.
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Elements in `mapping` cannot have duplicates, otherwise when executing the
table initializer op, it will throw a `FailedPreconditionError`.
Sample Usages:
```python
vocabulary_list = tf.constant(["emerson", "lake", "palmer"])
table = tf.contrib.lookup.index_table_from_tensor(
mapping=vocabulary_list, num_oov_buckets=1, default_value=-1)
features = tf.constant(["emerson", "lake", "and", "palmer"])
ids = table.lookup(features)
...
tf.tables_initializer().run()
ids.eval() ==> [0, 1, 4, 2]
```
Args:
vocabulary_list: A 1-D `Tensor` that specifies the mapping of keys to
indices. Thetype of this object must be castable to `dtype`.
num_oov_buckets: The number of out-of-vocabulary buckets.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignment of out-of-vocabulary buckets.
dtype: The type of values passed to `lookup`. Only string and integers are
supported.
name: A name for this op (optional).
Returns:
The lookup table to map an input `Tensor` to index `int64` `Tensor`.
Raises:
ValueError: If `mapping` is invalid.
ValueError: If `num_oov_buckets` is negative.
"""
if vocabulary_list is None:
raise ValueError("vocabulary_list must be specified.")
if num_oov_buckets < 0:
raise ValueError("num_oov_buckets must be greater or equal than 0, got %d."
% num_oov_buckets)
if (not dtype.is_integer) and (dtypes.string != dtype.base_dtype):
raise TypeError("Only integer and string keys are supported.")
with ops.name_scope(name, "string_to_index") as feat_to_id_scope:
keys = ops.convert_to_tensor(vocabulary_list)
if keys.dtype.is_integer != dtype.is_integer:
raise ValueError("Expected %s, got %s." %
("integer"
if dtype.is_integer else "non-integer", keys.dtype))
if (not dtype.is_integer) and (keys.dtype.base_dtype != dtype):
raise ValueError("Expected %s, got %s." % (dtype, keys.dtype))
num_elements = array_ops.size(keys)
values = math_ops.to_int64(math_ops.range(num_elements))
shared_name = ""
with ops.name_scope(None, "hash_table") as hash_table_scope:
table_keys = math_ops.to_int64(keys) if keys.dtype.is_integer else keys
init = KeyValueTensorInitializer(
table_keys,
values,
table_keys.dtype.base_dtype,
dtypes.int64,
name="table_init")
table = HashTable(
init, default_value, shared_name=shared_name, name=hash_table_scope)
if num_oov_buckets:
table = IdTableWithHashBuckets(
table,
num_oov_buckets=num_oov_buckets,
hasher_spec=hasher_spec,
name=feat_to_id_scope,
key_dtype=dtype)
return table
def index_to_string_table_from_file(vocabulary_file,
vocab_size=None,
default_value="UNK",
name=None):
"""Returns a lookup table that maps a `Tensor` of indices into strings.
This operation constructs a lookup table to map int64 indices into string
values. The table is initialized from a vocabulary file specified in
`vocabulary_file`, where the whole line is the value and the
zero-based line number is the index.
Any input which does not have a corresponding index in the vocabulary file
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Sample Usages:
If we have a vocabulary file "test.txt" with the following content:
```
emerson
lake
palmer
```
```python
indices = tf.constant([1, 5], tf.int64)
table = tf.contrib.lookup.index_to_string_table_from_file(
vocabulary_file="test.txt", default_value="UNKNOWN")
values = table.lookup(indices)
...
tf.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
vocabulary_file: The vocabulary filename.
vocab_size: Number of the elements in the vocabulary, if known.
default_value: The value to use for out-of-vocabulary indices.
name: A name for this op (optional).
Returns:
The lookup table to map a string values associated to a given index `int64`
`Tensors`.
Raises:
ValueError: when `vocabulary_file` is empty.
ValueError: when `vocab_size` is invalid.
"""
if not vocabulary_file:
raise ValueError("vocabulary_file must be specified.")
if vocab_size is not None and vocab_size < 1:
raise ValueError("vocab_size must be greater than 0, got %d." % vocab_size)
with ops.name_scope(name, "index_to_string") as scope:
shared_name = ""
if vocab_size:
# Keep a shared_name
# <table_type>_<filename>_<vocab_size>_<key_index>_<value_index>
shared_name = "hash_table_%s_%d_%s_%s" % (vocabulary_file, vocab_size,
TextFileIndex.LINE_NUMBER,
TextFileIndex.WHOLE_LINE)
else:
# Keep a shared_name <table_type>_<filename>_<key_index>_<value_index>
shared_name = "hash_table_%s_%s_%s" % (vocabulary_file,
TextFileIndex.LINE_NUMBER,
TextFileIndex.WHOLE_LINE)
init = TextFileStringTableInitializer(
vocabulary_file, vocab_size=vocab_size, name="table_init")
# TODO(yleon): Use a more effienct structure.
return HashTable(init, default_value, shared_name=shared_name, name=scope)
def index_to_string_table_from_tensor(vocabulary_list,
default_value="UNK",
name=None):
"""Returns a lookup table that maps a `Tensor` of indices into strings.
This operation constructs a lookup table to map int64 indices into string
values. The mapping is initialized from a string `mapping` 1-D `Tensor` where
each element is a value and the corresponding index within the tensor is the
key.
Any input which does not have a corresponding index in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Elements in `mapping` cannot have duplicates, otherwise when executing the
table initializer op, it will throw a `FailedPreconditionError`.
Sample Usages:
```python
vocabulary_list = tf.constant(["emerson", "lake", "palmer"])
indices = tf.constant([1, 5], tf.int64)
table = tf.contrib.lookup.index_to_string_table_from_tensor(
vocabulary_list, default_value="UNKNOWN")
values = table.lookup(indices)
...
tf.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
vocabulary_list: A 1-D string `Tensor` that specifies the strings to map
from indices.
default_value: The value to use for out-of-vocabulary indices.
name: A name for this op (optional).
Returns:
The lookup table to map a string values associated to a given index `int64`
`Tensors`.
Raises:
ValueError: when `vocabulary_list` is not set.
"""
if vocabulary_list is None:
raise ValueError("vocabulary_list must be specified.")
with ops.name_scope(name, "index_to_string") as scope:
vocabulary_list = ops.convert_to_tensor(vocabulary_list, dtypes.string)
num_elements = array_ops.size(vocabulary_list)
keys = math_ops.to_int64(math_ops.range(num_elements))
shared_name = ""
init = KeyValueTensorInitializer(
keys, vocabulary_list, dtypes.int64, dtypes.string, name="table_init")
# TODO(yleon): Use a more effienct structure.
return HashTable(init, default_value, shared_name=shared_name, name=scope)
ops.NotDifferentiable("LookupTableFind")
ops.NotDifferentiable("LookupTableFindV2")
ops.NotDifferentiable("LookupTableInsert")
ops.NotDifferentiable("LookupTableInsertV2")
ops.NotDifferentiable("LookupTableSize")
ops.NotDifferentiable("LookupTableSizeV2")
ops.NotDifferentiable("HashTable")
ops.NotDifferentiable("HashTableV2")
ops.NotDifferentiable("InitializeTable")
ops.NotDifferentiable("InitializeTableV2")
ops.NotDifferentiable("InitializeTableFromTextFile")
ops.NotDifferentiable("InitializeTableFromTextFileV2")
ops.NotDifferentiable("MutableDenseHashTable")
ops.NotDifferentiable("MutableDenseHashTableV2")
ops.NotDifferentiable("MutableHashTable")
ops.NotDifferentiable("MutableHashTableV2")
ops.NotDifferentiable("MutableHashTableOfTensors")
ops.NotDifferentiable("MutableHashTableOfTensorsV2")
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Utilities for generating the version string for Astropy (or an affiliated
package) and the version.py module, which contains version info for the
package.
Within the generated astropy.version module, the `major`, `minor`, and `bugfix`
variables hold the respective parts of the version number (bugfix is '0' if
absent). The `release` variable is True if this is a release, and False if this
is a development version of astropy. For the actual version string, use::
from astropy.version import version
or::
from astropy import __version__
"""
from __future__ import division
import datetime
import os
import pkgutil
import sys
import time
import warnings
from distutils import log
from configparser import ConfigParser
import pkg_resources
from . import git_helpers
from .distutils_helpers import is_distutils_display_option
from .git_helpers import get_git_devstr
from .utils import AstropyDeprecationWarning, import_file
__all__ = ['generate_version_py']
def _version_split(version):
"""
Split a version string into major, minor, and bugfix numbers. If any of
those numbers are missing the default is zero. Any pre/post release
modifiers are ignored.
Examples
========
>>> _version_split('1.2.3')
(1, 2, 3)
>>> _version_split('1.2')
(1, 2, 0)
>>> _version_split('1.2rc1')
(1, 2, 0)
>>> _version_split('1')
(1, 0, 0)
>>> _version_split('')
(0, 0, 0)
"""
parsed_version = pkg_resources.parse_version(version)
if hasattr(parsed_version, 'base_version'):
# New version parsing for setuptools >= 8.0
if parsed_version.base_version:
parts = [int(part)
for part in parsed_version.base_version.split('.')]
else:
parts = []
else:
parts = []
for part in parsed_version:
if part.startswith('*'):
# Ignore any .dev, a, b, rc, etc.
break
parts.append(int(part))
if len(parts) < 3:
parts += [0] * (3 - len(parts))
# In principle a version could have more parts (like 1.2.3.4) but we only
# support <major>.<minor>.<micro>
return tuple(parts[:3])
# This is used by setup.py to create a new version.py - see that file for
# details. Note that the imports have to be absolute, since this is also used
# by affiliated packages.
_FROZEN_VERSION_PY_TEMPLATE = """
# Autogenerated by {packagetitle}'s setup.py on {timestamp!s} UTC
from __future__ import unicode_literals
import datetime
{header}
major = {major}
minor = {minor}
bugfix = {bugfix}
version_info = (major, minor, bugfix)
release = {rel}
timestamp = {timestamp!r}
debug = {debug}
astropy_helpers_version = "{ahver}"
"""[1:]
_FROZEN_VERSION_PY_WITH_GIT_HEADER = """
{git_helpers}
_packagename = "{packagename}"
_last_generated_version = "{verstr}"
_last_githash = "{githash}"
# Determine where the source code for this module
# lives. If __file__ is not a filesystem path then
# it is assumed not to live in a git repo at all.
if _get_repo_path(__file__, levels=len(_packagename.split('.'))):
version = update_git_devstr(_last_generated_version, path=__file__)
githash = get_git_devstr(sha=True, show_warning=False,
path=__file__) or _last_githash
else:
# The file does not appear to live in a git repo so don't bother
# invoking git
version = _last_generated_version
githash = _last_githash
"""[1:]
_FROZEN_VERSION_PY_STATIC_HEADER = """
version = "{verstr}"
githash = "{githash}"
"""[1:]
def _get_version_py_str(packagename, version, githash, release, debug,
uses_git=True):
try:
from astropy_helpers import __version__ as ahver
except ImportError:
ahver = "unknown"
epoch = int(os.environ.get('SOURCE_DATE_EPOCH', time.time()))
timestamp = datetime.datetime.utcfromtimestamp(epoch)
major, minor, bugfix = _version_split(version)
if packagename.lower() == 'astropy':
packagetitle = 'Astropy'
else:
packagetitle = 'Astropy-affiliated package ' + packagename
header = ''
if uses_git:
header = _generate_git_header(packagename, version, githash)
elif not githash:
# _generate_git_header will already generate a new git has for us, but
# for creating a new version.py for a release (even if uses_git=False)
# we still need to get the githash to include in the version.py
# See https://github.com/astropy/astropy-helpers/issues/141
githash = git_helpers.get_git_devstr(sha=True, show_warning=True)
if not header: # If _generate_git_header fails it returns an empty string
header = _FROZEN_VERSION_PY_STATIC_HEADER.format(verstr=version,
githash=githash)
return _FROZEN_VERSION_PY_TEMPLATE.format(packagetitle=packagetitle,
timestamp=timestamp,
header=header,
major=major,
minor=minor,
bugfix=bugfix,
ahver=ahver,
rel=release, debug=debug)
def _generate_git_header(packagename, version, githash):
"""
Generates a header to the version.py module that includes utilities for
probing the git repository for updates (to the current git hash, etc.)
These utilities should only be available in development versions, and not
in release builds.
If this fails for any reason an empty string is returned.
"""
loader = pkgutil.get_loader(git_helpers)
source = loader.get_source(git_helpers.__name__) or ''
source_lines = source.splitlines()
if not source_lines:
log.warn('Cannot get source code for astropy_helpers.git_helpers; '
'git support disabled.')
return ''
idx = 0
for idx, line in enumerate(source_lines):
if line.startswith('# BEGIN'):
break
git_helpers_py = '\n'.join(source_lines[idx + 1:])
verstr = version
new_githash = git_helpers.get_git_devstr(sha=True, show_warning=False)
if new_githash:
githash = new_githash
return _FROZEN_VERSION_PY_WITH_GIT_HEADER.format(
git_helpers=git_helpers_py, packagename=packagename,
verstr=verstr, githash=githash)
def generate_version_py(packagename=None, version=None, release=None, debug=None,
uses_git=None, srcdir='.'):
"""
Generate a version.py file in the package with version information, and
update developer version strings.
This function should normally be called without any arguments. In this case
the package name and version is read in from the ``setup.cfg`` file (from
the ``name`` or ``package_name`` entry and the ``version`` entry in the
``[metadata]`` section).
If the version is a developer version (of the form ``3.2.dev``), the
version string will automatically be expanded to include a sequential
number as a suffix (e.g. ``3.2.dev13312``), and the updated version string
will be returned by this function.
Based on this updated version string, a ``version.py`` file will be
generated inside the package, containing the version string as well as more
detailed information (for example the major, minor, and bugfix version
numbers, a ``release`` flag indicating whether the current version is a
stable or developer version, and so on.
"""
if packagename is not None:
warnings.warn('The packagename argument to generate_version_py has '
'been deprecated and will be removed in future. Specify '
'the package name in setup.cfg instead', AstropyDeprecationWarning)
if version is not None:
warnings.warn('The version argument to generate_version_py has '
'been deprecated and will be removed in future. Specify '
'the version number in setup.cfg instead', AstropyDeprecationWarning)
if release is not None:
warnings.warn('The release argument to generate_version_py has '
'been deprecated and will be removed in future. We now '
'use the presence of the "dev" string in the version to '
'determine whether this is a release', AstropyDeprecationWarning)
# We use ConfigParser instead of read_configuration here because the latter
# only reads in keys recognized by setuptools, but we need to access
# package_name below.
conf = ConfigParser()
conf.read('setup.cfg')
if conf.has_option('metadata', 'name'):
packagename = conf.get('metadata', 'name')
elif conf.has_option('metadata', 'package_name'):
# The package-template used package_name instead of name for a while
warnings.warn('Specifying the package name using the "package_name" '
'option in setup.cfg is deprecated - use the "name" '
'option instead.', AstropyDeprecationWarning)
packagename = conf.get('metadata', 'package_name')
elif packagename is not None: # deprecated
pass
else:
print('ERROR: Could not read package name from setup.cfg', file=sys.stderr)
sys.exit(1)
if conf.has_option('metadata', 'version'):
version = conf.get('metadata', 'version')
add_git_devstr = True
elif version is not None: # deprecated
add_git_devstr = False
else:
print('ERROR: Could not read package version from setup.cfg', file=sys.stderr)
sys.exit(1)
if release is None:
release = 'dev' not in version
if not release and add_git_devstr:
version += get_git_devstr(False)
if uses_git is None:
uses_git = not release
# In some cases, packages have a - but this is a _ in the module. Since we
# are only interested in the module here, we replace - by _
packagename = packagename.replace('-', '_')
try:
version_module = get_pkg_version_module(packagename)
try:
last_generated_version = version_module._last_generated_version
except AttributeError:
last_generated_version = version_module.version
try:
last_githash = version_module._last_githash
except AttributeError:
last_githash = version_module.githash
current_release = version_module.release
current_debug = version_module.debug
except ImportError:
version_module = None
last_generated_version = None
last_githash = None
current_release = None
current_debug = None
if release is None:
# Keep whatever the current value is, if it exists
release = bool(current_release)
if debug is None:
# Likewise, keep whatever the current value is, if it exists
debug = bool(current_debug)
package_srcdir = os.path.join(srcdir, *packagename.split('.'))
version_py = os.path.join(package_srcdir, 'version.py')
if (last_generated_version != version or current_release != release or
current_debug != debug):
if '-q' not in sys.argv and '--quiet' not in sys.argv:
log.set_threshold(log.INFO)
if is_distutils_display_option():
# Always silence unnecessary log messages when display options are
# being used
log.set_threshold(log.WARN)
log.info('Freezing version number to {0}'.format(version_py))
with open(version_py, 'w') as f:
# This overwrites the actual version.py
f.write(_get_version_py_str(packagename, version, last_githash,
release, debug, uses_git=uses_git))
return version
def get_pkg_version_module(packagename, fromlist=None):
"""Returns the package's .version module generated by
`astropy_helpers.version_helpers.generate_version_py`. Raises an
ImportError if the version module is not found.
If ``fromlist`` is an iterable, return a tuple of the members of the
version module corresponding to the member names given in ``fromlist``.
Raises an `AttributeError` if any of these module members are not found.
"""
version = import_file(os.path.join(packagename, 'version.py'), name='version')
if fromlist:
return tuple(getattr(version, member) for member in fromlist)
else:
return version
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.session.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
import tensorflow.python.platform
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import config_pb2
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.util import compat
# NOTE(mrry): Dummy shape registration for op used in the tests.
ops.RegisterShape('ConstructionFails')(None)
class SessionTest(test_util.TensorFlowTestCase):
def testUseExistingGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session(graph=g):
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testUseDefaultGraph(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session():
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testCreate(self):
with session.Session():
inp = constant_op.constant(10.0, name='W1')
copy = array_ops.identity(inp)
# Test with feed.
# TODO(mrry): Investigate why order='F' didn't work.
arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
copy_val = copy.eval({'W1:0': arr})
self.assertAllEqual(arr, copy_val)
# Test without feed.
copy_val = copy.eval()
self.assertAllEqual(np.asarray(10.0, dtype=np.float32), copy_val)
def testManyCPUs(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testPerSessionThreads(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(use_per_session_threads=True)):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testErrorsReported(self):
with session.Session() as s:
constant_op.constant(10.0, name='W1')
with self.assertRaises(ValueError):
s.run('foo:0')
def testErrorPayload(self):
with session.Session():
a = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(lambda e: e.op == a.op):
a.eval()
def testOpConstructionErrorPayload(self):
with session.Session():
failing_op = ops.get_default_graph().create_op(
'ConstructionFails', [], [], name='f')
def exc_predicate(e):
return (e.op == failing_op
and e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
failing_op.run()
def testErrorBasedOn(self):
with session.Session() as sess:
a = constant_op.constant(0.0, shape=[2, 3])
# NOTE(mrry): The original_op is nonsense, but used here to test that the
# errors are reported correctly.
# pylint: disable=protected-access
with sess.graph._original_op(a.op):
b = array_ops.identity(a, name='id')
with sess.graph._original_op(b.op):
c = array_ops.placeholder(dtypes.float32)
# pylint: enable=protected-access
def exc_predicate(e):
return (e.op == c.op
and e.op._original_op == b.op
and e.op._original_op._original_op == a.op)
with self.assertRaisesOpError(exc_predicate):
c.eval()
def testFetchTensorObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
results_with_list = s.run([c])
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
results_with_single = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
results_with_get = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
a_val, b_val = s.run([a, b]) # Test multiple fetches.
self.assertAllEqual([[1.0, 1.0]], a_val)
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
def testFetchScalar(self):
with session.Session() as s:
for scalar in np.int32, np.int64, np.float32, np.float64:
x = scalar(7)
y = scalar(8)
tf_x = constant_op.constant(x, shape=[])
tf_y = constant_op.constant(y)
tf_xy = math_ops.add(tf_x, tf_y)
# Single fetch
xy = s.run(tf_xy)
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# List fetch
xy, = s.run([tf_xy])
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
def testFetchOperationObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
v = variables.Variable(a, name='testFetchOperationObject_v')
s.run(v.initializer)
v_val = s.run(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
def testFetchSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = ops.SparseTensor(
constant_op.constant(indices),
constant_op.constant(values),
constant_op.constant(shape))
# Single fetch, use as tuple
sp_out = s.run(sp)
indices_out, values_out, shape_out = sp_out
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Single fetch, use as SparseTensorValue
sp_out = s.run(sp)
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
# Tuple fetch, use as tuple
indices_out, values_out, shape_out = s.run(sp)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as tuple
(indices_out, values_out, shape_out), = s.run([sp])
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as SparseTensorValue
sp_out, = s.run([sp])
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
def testFeedSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = ops.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.shape)
sp2 = ops.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.shape, shape)
def testFetchIndexedSlices(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices),
constant_op.constant(dense_shape))
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlices(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
array_ops.placeholder(dtype=np.int64,
shape=(3,)),)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind_dense_shape = array_ops.identity(ind.dense_shape)
ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape)
# Feed with tuple
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: (values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testFetchIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = None
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices), None)
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = None
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
None)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind2 = ops.IndexedSlices(ind_values, ind_indices)
# Feed with tuple
values_out, indices_out = s.run(
[ind_values, ind_indices], {ind: (values, indices)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue
values_out, indices_out = s.run(
[ind_values, ind_indices],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testExtendWithStatelessOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
# Extend will happen here.
e_val = s.run(e)
self.assertAllEqual([[24.0]], e_val)
def testExtendWithStatefulOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
# Extend will happen here.
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
def testExtendWithGroupBy(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
p = variables.Variable(a, name='testExtendWithGroupBy_p')
a_val = a.eval() # Force an Extend after this op.
self.assertAllEqual([[1.0, 1.0]], a_val)
b = constant_op.constant(2.0, shape=[1, 2])
q = variables.Variable(b, name='testExtendWithGroupBy_q')
# Extend will happen here.
init = control_flow_ops.group(p.initializer, q.initializer)
s.run(init)
p_val, q_val = s.run([p, q])
self.assertAllEqual([[1.0, 1.0]], p_val)
self.assertAllEqual([[2.0, 2.0]], q_val)
def testTensorGetMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
def testOperationRunMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 2], name='b')
v = variables.Variable(a, a.dtype)
assign_a_to_v = state_ops.assign(v, a)
assign_a_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[1.0, 1.0]], v_val)
assign_b_to_v = state_ops.assign(v, b)
assign_b_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[2.0, 2.0]], v_val)
assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
v_val = v.eval()
self.assertAllEqual([[3.0, 3.0]], v_val)
def testDefaultGraph(self):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
self.assertEqual(ops.get_default_graph(), a.graph)
self.assertEqual(ops.get_default_graph(), b.graph)
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testDefaultGraph_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
assign_c_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def testDefaultGraphWithThreads(self):
# Fork ten threads that use their thread-local default graph.
threads = []
constructed_events = [threading.Event() for _ in range(10)]
continue_event = threading.Event()
for i, constructed_event in enumerate(constructed_events):
t = self.checkedThread(target=self._testDefaultGraphInThread,
args=(constructed_event, continue_event, i))
threads.append(t)
for t in threads:
t.start()
for constructed_event in constructed_events:
constructed_event.wait()
continue_event.set()
for t in threads:
t.join()
def testParallelRun(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
ev = threading.Event()
def run_step():
ev.wait()
val = c.eval(session=sess)
self.assertEqual(val, 5.0)
threads = [self.checkedThread(target=run_step) for _ in range(100)]
for t in threads:
t.start()
ev.set()
for t in threads:
t.join()
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x: [1, 1]})
assert (y == 2 * np.ones(2)).all()
def testGraphDef(self):
with session.Session() as sess:
self.assertProtoEquals(
'versions { producer: %d min_consumer: %d }' % (
versions.GRAPH_DEF_VERSION,
versions.GRAPH_DEF_VERSION_MIN_CONSUMER),
sess.graph_def)
c = constant_op.constant(5.0, name='c')
self.assertEquals(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEquals(len(sess.graph_def.node), 2)
self.assertAllEqual(c.eval(), 5.0)
self.assertAllEqual(d.eval(), 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEquals(len(sess.graph_def.node), 3)
self.assertAllEqual(e.eval(), 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
sess.run(c)
def testUseAfterCloseConcurrent(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
def update_thread():
with self.assertRaisesWithPredicateMatch(
RuntimeError,
lambda e: 'Attempted to use a closed Session.' in str(e)):
while True:
sess.run(c)
t = threading.Thread(target=update_thread)
t.start()
time.sleep(0.1)
sess.close()
t.join()
def testUseEmptyGraph(self):
with session.Session() as sess:
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'The Session graph is empty.' in str(e)):
sess.run([])
def testNotEntered(self):
# pylint: disable=protected-access
self.assertEqual(ops._default_session_stack.get_default(), None)
# pylint: enable=protected-access
with ops.device('/cpu:0'):
sess = session.Session()
c_1 = constant_op.constant(5.0)
with sess.graph.as_default():
c_2 = constant_op.constant(5.0)
self.assertEqual(c_1.graph, c_2.graph)
self.assertEqual(sess.run(c_2), 5.0)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval()
def testInteractive(self):
with ops.device('/cpu:0'):
sess = session.InteractiveSession()
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval())
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e.eval())
sess.close()
def testSharedGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(sess1.run(c), sess2.run(c))
def testDuplicatedInputs(self):
with session.Session() as sess:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 3])
a_val, b_val, a2_val = sess.run([a, b, a])
self.assertAllEqual(a_val, [[1.0, 1.0]])
self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
self.assertAllEqual(a2_val, [[1.0, 1.0]])
def testFeedAndFetch(self):
with session.Session():
for dtype in [dtypes.float32,
dtypes.float64,
dtypes.int32,
dtypes.uint8,
dtypes.int16,
dtypes.int8,
dtypes.int64,
dtypes.bool,
dtypes.complex64]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
out_t = array_ops.identity(feed_t)
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes.bool:
np_array = np_array > 0
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
self.assertAllEqual(np_array,
out_t.eval(feed_dict={feed_t: np_array}))
def testFeedError(self):
with session.Session() as sess:
feed_t = array_ops.placeholder(dtype=dtypes.float32)
out_t = array_ops.identity(feed_t)
feed_val = constant_op.constant(5.0)
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
sess.run(out_t, feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.eval(feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.op.run(feed_dict={feed_t: feed_val})
def testStringFetch(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c.eval(), c_list)
def testStringFeed(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape)
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape)
c = array_ops.identity(feed_t)
self.assertAllEqual(c.eval(feed_dict={feed_t: c_list}), c_list)
def testStringFeedWithNullCharacters(self):
with session.Session():
c_list = [b'\n\x01\x00', b'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0])
self.assertEqual(c_list[1], out[1])
def testStringFeedWithUnicode(self):
with session.Session():
c_list = [u'\n\x01\x00', u'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0].decode('utf-8'))
self.assertEqual(c_list[1], out[1].decode('utf-8'))
out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)})
self.assertEqual(c_list[0], out[0].decode('utf-8'))
self.assertEqual(c_list[1], out[1].decode('utf-8'))
def testInvalidTargetFails(self):
with self.assertRaises(RuntimeError):
session.Session('INVALID_TARGET')
def testFetchByNameDifferentStringTypes(self):
with session.Session() as sess:
c = constant_op.constant(42.0, name='c')
d = constant_op.constant(43.0, name=u'd')
e = constant_op.constant(44.0, name=b'e')
f = constant_op.constant(45.0, name=r'f')
self.assertTrue(isinstance(c.name, six.text_type))
self.assertTrue(isinstance(d.name, six.text_type))
self.assertTrue(isinstance(e.name, six.text_type))
self.assertTrue(isinstance(f.name, six.text_type))
self.assertEqual(42.0, sess.run('c:0'))
self.assertEqual(42.0, sess.run(u'c:0'))
self.assertEqual(42.0, sess.run(b'c:0'))
self.assertEqual(42.0, sess.run(r'c:0'))
self.assertEqual(43.0, sess.run('d:0'))
self.assertEqual(43.0, sess.run(u'd:0'))
self.assertEqual(43.0, sess.run(b'd:0'))
self.assertEqual(43.0, sess.run(r'd:0'))
self.assertEqual(44.0, sess.run('e:0'))
self.assertEqual(44.0, sess.run(u'e:0'))
self.assertEqual(44.0, sess.run(b'e:0'))
self.assertEqual(44.0, sess.run(r'e:0'))
self.assertEqual(45.0, sess.run('f:0'))
self.assertEqual(45.0, sess.run(u'f:0'))
self.assertEqual(45.0, sess.run(b'f:0'))
self.assertEqual(45.0, sess.run(r'f:0'))
def testIncorrectGraph(self):
with ops.Graph().as_default() as g_1:
c_1 = constant_op.constant(1.0, name='c')
with ops.Graph().as_default() as g_2:
c_2 = constant_op.constant(2.0, name='c')
self.assertEqual('c', c_1.op.name)
self.assertEqual('c', c_2.op.name)
with session.Session(graph=g_1) as sess_1:
self.assertEqual(1.0, sess_1.run(c_1))
with self.assertRaises(ValueError):
sess_1.run(c_2)
with self.assertRaises(ValueError):
sess_1.run(c_2.op)
with session.Session(graph=g_2) as sess_2:
with self.assertRaises(ValueError):
sess_2.run(c_1)
with self.assertRaises(ValueError):
sess_2.run(c_1.op)
self.assertEqual(2.0, sess_2.run(c_2))
def testPartialRun(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 17
res = sess.partial_run(h, r2, feed_dict={c: temp})
self.assertEqual(153, res)
def testPartialRunIncomplete(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
def testConcurrentPartialRun(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h1 = sess.partial_run_setup([r1], [a, b, c])
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h1, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 19
res = sess.partial_run(h2, r1, feed_dict={a: temp, b: 9})
self.assertEqual(66, res)
res = sess.partial_run(h2, r2, feed_dict={c: 7})
self.assertEqual(462, res)
if __name__ == '__main__':
googletest.main()
|
|
"""
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
http://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
http://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.utils.extmath import randomized_svd
from sklearn.externals.joblib import Memory
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
import urllib
print("Downloading data from '%s', please wait..." % url)
opener = urllib.urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in index_map.iteritems())
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest compenents of the the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <hagberg@lanl.gov>
Dan Schult <dschult@colgate.edu>
Pieter Swart <swart@lanl.gov>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
|
|
from abc import ABCMeta, abstractmethod, abstractproperty
from operator import itemgetter
from .experiment import Experiment, DefaultExperiment
from .ops.random import Sample, RandomInteger
from .assignment import Assignment
# decorator for methods that assume assignments have been made
def requires_experiment(f):
def wrapped_f(self, *args, **kwargs):
if not self._experiment:
self._assign_experiment()
return f(self, *args, **kwargs)
return wrapped_f
def requires_default_experiment(f):
def wrapped_f(self, *args, **kwargs):
if not self._default_experiment:
self._assign_default_experiment()
return f(self, *args, **kwargs)
return wrapped_f
class Namespace(object):
__metaclass__ = ABCMeta
def __init__(self, **kwargs):
pass
@abstractmethod
def add_experiment(self, name, exp_object, num_segments, **kwargs):
pass
@abstractmethod
def remove_experiment(self, name):
pass
@abstractmethod
def set_auto_exposure_logging(self, value):
pass
@abstractproperty
def in_experiment(self):
pass
@abstractmethod
def get(self, name, default):
pass
@abstractmethod
def log_exposure(self, extras=None):
pass
@abstractmethod
def log_event(self, event_type, extras=None):
pass
class SimpleNamespace(Namespace):
__metaclass__ = ABCMeta
def __init__(self, **kwargs):
self.name = self.__class__ # default name is the class name
self.inputs = kwargs
self.num_segments = None
# dictionary mapping segments to experiment names
self.segment_allocations = {}
# dictionary mapping experiment names to experiment objects
self.current_experiments = {}
self._experiment = None # memoized experiment object
self._default_experiment = None # memoized default experiment object
self.default_experiment_class = DefaultExperiment
self._in_experiment = False
# setup name, primary key, number of segments, etc
self.setup()
self.available_segments = set(range(self.num_segments))
# load namespace with experiments
self.setup_experiments()
@abstractmethod
def setup(self):
"""Sets up experiment"""
# Developers extending this class should set the following variables
# self.name = 'sample namespace'
# self.primary_unit = 'userid'
# self.num_segments = 10000
pass
@abstractmethod
def setup_experiments():
# e.g.,
# self.add_experiment('first experiment', Exp1, 100)
pass
@property
def primary_unit(self):
return self._primary_unit
@primary_unit.setter
def primary_unit(self, value):
# later on we require that the primary key is a list, so we use
# a setter to convert strings to a single element list
if type(value) is list:
self._primary_unit = value
else:
self._primary_unit = [value]
def add_experiment(self, name, exp_object, segments):
num_avail = len(self.available_segments)
if num_avail < segments:
print 'error: %s segments requested, only %s available.' % \
(segments, num_avail)
return False
if name in self.current_experiments:
print 'error: there is already an experiment called %s.' % name
return False
# randomly select the given number of segments from all available
# segments
a = Assignment(self.name)
a.sampled_segments = \
Sample(choices=list(self.available_segments),
draws=segments, unit=name)
# assign each segment to the experiment name
for segment in a.sampled_segments:
self.segment_allocations[segment] = name
self.available_segments.remove(segment)
# associate the experiment name with an object
self.current_experiments[name] = exp_object
def remove_experiment(self, name):
if name not in self.current_experiments:
print 'error: there is no experiment called %s.' % name
return False
segments_to_free = \
[s for s, n in self.segment_allocations.iteritems() if n == name]
for segment in segments_to_free:
del self.segment_allocations[segment]
self.available_segments.add(segment)
del self.current_experiments[name]
return True
def get_segment(self):
# randomly assign primary unit to a segment
a = Assignment(self.name)
a.segment = RandomInteger(min=0, max=self.num_segments - 1,
unit=itemgetter(*self.primary_unit)(self.inputs))
return a.segment
def _assign_experiment(self):
"assign primary unit to an experiment"
segment = self.get_segment()
# is the unit allocated to an experiment?
if segment in self.segment_allocations:
experiment_name = self.segment_allocations[segment]
experiment = self.current_experiments[
experiment_name](**self.inputs)
experiment.name = '%s-%s' % (self.name, experiment_name)
experiment.salt = '%s.%s' % (self.name, experiment_name)
self._experiment = experiment
self._in_experiment = experiment.in_experiment
# if the unit does not belong to an experiment, or the unit has been
# disqualified from being in the assigned experiment, fall back on the
# default experiment
if not self._in_experiment:
self._assign_default_experiment()
def _assign_default_experiment(self):
self._default_experiment = self.default_experiment_class(**self.inputs)
@requires_default_experiment
def default_get(self, name, default=None):
return self._default_experiment.get(name, default)
@property
@requires_experiment
def in_experiment(self):
return self._in_experiment
@in_experiment.setter
def in_experiment(self, value):
# in_experiment cannot be externally modified
pass
@requires_experiment
def set_auto_exposure_logging(self, value):
self._experiment.set_auto_exposure_logging(value)
@requires_experiment
def get(self, name, default=None):
if self._experiment is None:
return self.default_get(name, default)
else:
return self._experiment.get(name, self.default_get(name, default))
@requires_experiment
def log_exposure(self, extras=None):
"""Logs exposure to treatment"""
if self._experiment is None:
pass
self._experiment.log_exposure(extras)
@requires_experiment
def log_event(self, event_type, extras=None):
"""Log an arbitrary event"""
if self._experiment is None:
pass
self._experiment.log_event(event_type, extras)
|
|
# Copyright 2015 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
CIFS = 'CIFS'
SMB_LOWER = 'smb'
NFS = 'NFS'
NFS_LOWER = 'nfs'
IP = 'ip'
USER = 'user'
USERNAME = 'USERNAME_0'
ADD_USERNAME = '+USERNAME_0:fullcontrol'
DROP_USERNAME = '-USERNAME_0:fullcontrol'
PASSWORD = 'PASSWORD_0'
READ_WRITE = 'rw'
READ_ONLY = 'ro'
SAN_LOGIN = 'testlogin4san'
SAN_PASSWORD = 'testpassword4san'
API_URL = 'https://1.2.3.4:8080/api/v1'
TIMEOUT = 60
PORT = 22
SHARE_TYPE_ID = 123456789
CIDR_PREFIX = '24'
# Constants to use with Mock and expect in results
EXPECTED_IP_10203040 = '10.20.30.40'
EXPECTED_IP_10203041 = '10.20.30.41'
EXPECTED_IP_1234 = '1.2.3.4'
EXPECTED_MY_IP = '9.8.7.6'
EXPECTED_IP_127 = '127.0.0.1'
EXPECTED_IP_127_2 = '127.0.0.2'
EXPECTED_ACCESS_LEVEL = 'foo_access'
EXPECTED_SUBNET = '255.255.255.0' # based on CIDR_PREFIX above
EXPECTED_VLAN_TYPE = 'vlan'
EXPECTED_VXLAN_TYPE = 'vxlan'
EXPECTED_VLAN_TAG = '101'
EXPECTED_SERVER_ID = '1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e'
EXPECTED_PROJECT_ID = 'osf-nfs-project-id'
SHARE_ID = 'share-id'
EXPECTED_SHARE_ID = 'osf-share-id'
EXPECTED_SHARE_ID_RO = 'osf-ro-share-id'
EXPECTED_SHARE_NAME = 'share-name'
EXPECTED_NET_NAME = 'testnet'
EXPECTED_FPG = 'pool'
EXPECTED_HOST = 'hostname@backend#' + EXPECTED_FPG
UNEXPECTED_FPG = 'not_a_pool'
UNEXPECTED_HOST = 'hostname@backend#' + UNEXPECTED_FPG
HOST_WITHOUT_POOL_1 = 'hostname@backend'
HOST_WITHOUT_POOL_2 = 'hostname@backend#'
EXPECTED_SHARE_PATH = '/anyfpg/anyvfs/anyfstore'
EXPECTED_SIZE_1 = 1
EXPECTED_SIZE_2 = 2
EXPECTED_SNAP_NAME = 'osf-snap-name'
EXPECTED_SNAP_ID = 'osf-snap-id'
EXPECTED_STATS = {'test': 'stats'}
EXPECTED_FPG_CONF = [{EXPECTED_FPG: [EXPECTED_IP_10203040]}]
EXPECTED_FSTORE = EXPECTED_PROJECT_ID
EXPECTED_VFS = 'test_vfs'
EXPECTED_GET_VFS = {'vfsname': EXPECTED_VFS,
'vfsip': {'address': [EXPECTED_IP_10203040]}}
EXPECTED_GET_VFS_MULTIPLES = {
'vfsname': EXPECTED_VFS,
'vfsip': {'address': [EXPECTED_IP_10203041, EXPECTED_IP_10203040]}}
EXPECTED_CLIENT_GET_VFS_MEMBERS_MULTI = {
'fspname': EXPECTED_VFS,
'vfsip': [
{'networkName': EXPECTED_NET_NAME,
'fspool': EXPECTED_VFS,
'address': EXPECTED_IP_10203040,
'prefixLen': EXPECTED_SUBNET,
'vfs': EXPECTED_VFS,
'vlanTag': EXPECTED_VLAN_TAG,
},
{'networkName': EXPECTED_NET_NAME,
'fspool': EXPECTED_VFS,
'address': EXPECTED_IP_10203041,
'prefixLen': EXPECTED_SUBNET,
'vfs': EXPECTED_VFS,
'vlanTag': EXPECTED_VLAN_TAG,
},
],
'vfsname': EXPECTED_VFS,
}
EXPECTED_MEDIATOR_GET_VFS_RET_VAL_MULTI = {
'fspname': EXPECTED_VFS,
'vfsip': {
'networkName': EXPECTED_NET_NAME,
'fspool': EXPECTED_VFS,
'address': [
EXPECTED_IP_10203040,
EXPECTED_IP_10203041,
],
'prefixLen': EXPECTED_SUBNET,
'vfs': EXPECTED_VFS,
'vlanTag': EXPECTED_VLAN_TAG
},
'vfsname': EXPECTED_VFS,
}
EXPECTED_CLIENT_GET_VFS_MEMBERS = {
'fspname': EXPECTED_VFS,
'vfsip': {
'networkName': EXPECTED_NET_NAME,
'fspool': EXPECTED_VFS,
'address': EXPECTED_IP_10203040,
'prefixLen': EXPECTED_SUBNET,
'vfs': EXPECTED_VFS,
'vlanTag': EXPECTED_VLAN_TAG,
},
'vfsname': EXPECTED_VFS,
}
EXPECTED_MEDIATOR_GET_VFS_RET_VAL = {
'fspname': EXPECTED_VFS,
'vfsip': {
'networkName': EXPECTED_NET_NAME,
'fspool': EXPECTED_VFS,
'address': [EXPECTED_IP_10203040],
'prefixLen': EXPECTED_SUBNET,
'vfs': EXPECTED_VFS,
'vlanTag': EXPECTED_VLAN_TAG,
},
'vfsname': EXPECTED_VFS,
}
EXPECTED_CLIENT_GET_VFS_RETURN_VALUE = {
'total': 1,
'members': [EXPECTED_CLIENT_GET_VFS_MEMBERS],
}
EXPECTED_CLIENT_GET_VFS_RETURN_VALUE_MULTI = {
'total': 1,
'members': [EXPECTED_CLIENT_GET_VFS_MEMBERS_MULTI],
}
EXPECTED_FPG_MAP = {EXPECTED_FPG: {EXPECTED_VFS: [EXPECTED_IP_10203040]}}
EXPECTED_FPG_MAP_MULTI_VFS = {EXPECTED_FPG: {
EXPECTED_VFS: [EXPECTED_IP_10203041, EXPECTED_IP_10203040]}}
EXPECTED_SHARE_IP = '10.50.3.8'
EXPECTED_HPE_DEBUG = True
EXPECTED_COMMENT = "OpenStack Manila - foo-comment"
EXPECTED_EXTRA_SPECS = {}
EXPECTED_LOCATION = ':'.join((EXPECTED_IP_1234, EXPECTED_SHARE_PATH))
EXPECTED_SUPER_SHARE = 'OPENSTACK_SUPER_SHARE'
EXPECTED_SUPER_SHARE_COMMENT = ('OpenStack super share used to delete nested '
'shares.')
EXPECTED_CIFS_DOMAIN = 'LOCAL_CLUSTER'
EXPECTED_MOUNT_PATH = '/mnt/'
SHARE_SERVER = {
'backend_details': {
'ip': EXPECTED_IP_10203040,
'fpg': EXPECTED_FPG,
'vfs': EXPECTED_VFS,
},
}
# Access rules. Allow for overwrites.
ACCESS_RULE_NFS = {
'access_type': IP,
'access_to': EXPECTED_IP_1234,
'access_level': READ_WRITE,
}
ACCESS_RULE_CIFS = {
'access_type': USER,
'access_to': USERNAME,
'access_level': READ_WRITE,
}
ADD_RULE_BAD_TYPE = {
'access_type': 'unsupported_other_type',
'access_to': USERNAME,
'access_level': READ_WRITE,
}
ADD_RULE_IP = {
'access_type': IP,
'access_to': EXPECTED_IP_1234,
'access_level': READ_WRITE,
}
ADD_RULE_IP_RO = {
'access_type': IP,
'access_to': EXPECTED_IP_1234,
'access_level': READ_ONLY,
}
ADD_RULE_USER = {
'access_type': USER,
'access_to': USERNAME,
'access_level': READ_WRITE,
}
DELETE_RULE_IP = {
'access_type': IP,
'access_to': EXPECTED_IP_1234,
'access_level': READ_WRITE,
}
DELETE_RULE_USER = {
'access_type': USER,
'access_to': USERNAME,
'access_level': READ_WRITE,
}
DELETE_RULE_IP_RO = {
'access_type': IP,
'access_to': EXPECTED_IP_1234,
'access_level': READ_ONLY,
}
GET_FSQUOTA = {'message': None,
'total': 1,
'members': [{'hardBlock': '1024', 'softBlock': '1024'}]}
EXPECTED_FSIP = {
'fspool': EXPECTED_FPG,
'vfs': EXPECTED_VFS,
'address': EXPECTED_IP_1234,
'prefixLen': EXPECTED_SUBNET,
'vlanTag': EXPECTED_VLAN_TAG,
}
OTHER_FSIP = {
'fspool': EXPECTED_FPG,
'vfs': EXPECTED_VFS,
'address': '9.9.9.9',
'prefixLen': EXPECTED_SUBNET,
'vlanTag': EXPECTED_VLAN_TAG,
}
NFS_SHARE_INFO = {
'project_id': EXPECTED_PROJECT_ID,
'id': EXPECTED_SHARE_ID,
'share_proto': NFS,
'export_location': EXPECTED_LOCATION,
'size': 1234,
'host': EXPECTED_HOST,
}
SNAPSHOT_INFO = {
'name': EXPECTED_SNAP_NAME,
'id': EXPECTED_SNAP_ID,
'share': {
'project_id': EXPECTED_PROJECT_ID,
'id': EXPECTED_SHARE_ID,
'share_proto': NFS,
'export_location': EXPECTED_LOCATION,
'host': EXPECTED_HOST,
},
}
SNAPSHOT_INSTANCE = {
'name': EXPECTED_SNAP_NAME,
'id': EXPECTED_SNAP_ID,
'share_id': EXPECTED_SHARE_ID,
'share_proto': NFS,
}
class FakeException(Exception):
pass
FAKE_EXCEPTION = FakeException("Fake exception for testing.")
|
|
# (c) 2017, XYSec Labs
#
# This list based on the AdAway blocklist (https://github.com/AdAway/AdAway)
#
# AdAway default blocklist
# Blocking mobile ad providers and some analytics providers
#
# Contribute:
# Create an issue at https://github.com/AdAway/AdAway/issues
#
# Changelog:
# 2016-07-18 Now hosted on GitHub + Cloudflare
# 2014-05-18 Now with a valid SSL certificate available at
# https://adaway.org/hosts.txt
# 2013-03-29 Integrated some hosts from
# http://adblock.gjtech.net/?format=hostfile
# 2013-03-14 Back from the dead
#
# License:
# CC Attribution 3.0 (http://creativecommons.org/licenses/by/3.0/)
#
# Contributions by:
# Kicelo, Dominik Schuermann
#
blocklist = [
"lb.usemaxserver.de",
"tracking.klickthru.com",
"gsmtop.net",
"click.buzzcity.net",
"ads.admoda.com",
"stats.pflexads.com",
"a.glcdn.co",
"wwww.adleads.com",
"ad.madvertise.de",
"apps.buzzcity.net",
"ads.mobgold.com",
"android.bcfads.com",
"show.buzzcity.net",
"api.analytics.omgpop.com",
"r.edge.inmobicdn.net",
"www.mmnetwork.mobi",
"img.ads.huntmad.com",
"creative1cdn.mobfox.com",
"admicro2.vcmedia.vn",
"admicro1.vcmedia.vn",
"s3.phluant.com",
"c.vrvm.com",
"go.vrvm.com",
"static.estebull.com",
"mobile.Banzai.it",
"ads.xxxad.net",
"img.ads.mojiva.com",
"adcontent.saymedia.com",
"ads.saymedia.com",
"ftpcontent.worldnow.com",
"s0.2mdn.net",
"img.ads.mocean.mobi",
"bigmobileads.com",
"banners.bigmobileads.com",
"ads.mopub.com",
"images.mpression.net",
"images.millennialmedia.com",
"oasc04012.247realmedia.com",
"assets.cntdy.mobi",
"ad.leadboltapps.net",
"api.airpush.com",
"ad.where.com",
"i.tapit.com",
"cdn1.crispadvertising.com",
"cdn2.crispadvertising.com",
"medrx.sensis.com.au",
"rs-staticart.ybcdn.net",
"img.ads.taptapnetworks.com",
"adserver.ubiyoo.com",
"c753738.r38.cf2.rackcdn.com",
"edge.reporo.net",
"ads.n-ws.org",
"adultmoda.com",
"ads.smartdevicemedia.com",
"b.scorecardresearch.com",
"m.adsymptotic.com",
"cdn.vdopia.com",
"api.yp.com",
"asotrack1.fluentmobile.com",
"android-sdk31.transpera.com",
"apps.mobilityware.com",
"ads.mobilityware.com",
"ads.admarvel.com",
"netdna.reporo.net",
"www.eltrafiko.com",
"cdn.trafficforce.com",
"gts-ads.twistbox.com",
"static.cdn.gtsmobi.com",
"ads.matomymobile.com",
"ads.adiquity.com",
"img.ads.mobilefuse.net",
"as.adfonic.net",
"media.mobpartner.mobi",
"cdn.us.goldspotmedia.com",
"ads2.mediaarmor.com",
"cdn.nearbyad.com",
"ads.ookla.com",
"mobiledl.adobe.com",
"ads.flurry.com",
"gemini.yahoo.com",
"d3anogn3pbtk4v.cloudfront.net",
"d3oltyb66oj2v8.cloudfront.net",
"d2bgg7rjywcwsy.cloudfront.net",
"a.vserv.mobi",
"admin.vserv.mobi",
"c.vserv.mobi",
"ads.vserv.mobi",
"sf.vserv.mobi",
"hybl9bazbc35.pflexads.com",
"hhbekxxw5d9e.pflexads.com",
"www.pflexads.com",
"orencia.pflexads.com",
"atti.velti.com",
"ru.velti.com",
"mwc.velti.com",
"cdn.celtra.com",
"ads.celtra.com",
"cache-ssl.celtra.com",
"cache.celtra.com",
"track.celtra.com",
"wv.inner-active.mobi",
"cdn1.inner-active.mobi",
"m2m1.inner-active.mobi",
"bos-tapreq01.jumptap.com",
"bos-tapreq02.jumptap.com",
"bos-tapreq03.jumptap.com",
"bos-tapreq04.jumptap.com",
"bos-tapreq05.jumptap.com",
"bos-tapreq06.jumptap.com",
"bos-tapreq07.jumptap.com",
"bos-tapreq08.jumptap.com",
"bos-tapreq09.jumptap.com",
"bos-tapreq10.jumptap.com",
"bos-tapreq11.jumptap.com",
"bos-tapreq12.jumptap.com",
"bos-tapreq13.jumptap.com",
"bos-tapreq14.jumptap.com",
"bos-tapreq15.jumptap.com",
"bos-tapreq16.jumptap.com",
"bos-tapreq17.jumptap.com",
"bos-tapreq18.jumptap.com",
"bos-tapreq19.jumptap.com",
"bos-tapreq20.jumptap.com",
"web64.jumptap.com",
"web63.jumptap.com",
"web65.jumptap.com",
"bo.jumptap.com",
"i.jumptap.com",
"a.applovin.com",
"d.applovin.com",
"pdn.applovin.com",
"mobpartner.mobi",
"go.mobpartner.mobi",
"r.mobpartner.mobi",
"uk-ad2.adinfuse.com",
"adinfuse.com",
"go.adinfuse.com",
"ad1.adinfuse.com",
"ad2.adinfuse.com",
"sky.adinfuse.com",
"orange-fr.adinfuse.com",
"sky-connect.adinfuse.com",
"uk-go.adinfuse.com",
"orangeuk-mc.adinfuse.com",
"intouch.adinfuse.com",
"funnel0.adinfuse.com",
"cvt.mydas.mobi",
"lp.mydas.mobi",
"golds.lp.mydas.mobi",
"suo.lp.mydas.mobi",
"aio.lp.mydas.mobi",
"lp.mp.mydas.mobi",
"media.mydas.mobi",
"ads.mp.mydas.mobi",
"neptune.appads.com",
"neptune1.appads.com",
"neptune2.appads.com",
"neptune3.appads.com",
"saturn.appads.com",
"saturn1.appads.com",
"saturn2.appads.com",
"saturn3.appads.com",
"jupiter.appads.com",
"jupiter1.appads.com",
"jupiter2.appads.com",
"jupiter3.appads.com",
"req.appads.com",
"req1.appads.com",
"req2.appads.com",
"req3.appads.com",
"mc.yandex.ru",
"an.yandex.ru",
"swappit.tapad.com",
"campaign-tapad.s3.amazonaws.com",
"adsrv1.tapad.com",
"ads1.mojiva.com",
"ads2.mojiva.com",
"ads3.mojiva.com",
"ads4.mojiva.com",
"ads5.mojiva.com",
"i.w.inmobi.com",
"r.w.inmobi.com",
"c.w.inmobi.com",
"adtracker.inmobi.com",
"china.inmobi.com",
"japan.inmobi.com",
"mdn1.phluantmobile.net",
"mdn2.phluantmobile.net",
"mdn3.phluantmobile.net",
"mdn3origin.phluantmobile.net",
"soma.smaato.net",
"c29new.smaato.net",
"c01.smaato.net",
"c02.smaato.net",
"c03.smaato.net",
"c04.smaato.net",
"c05.smaato.net",
"c06.smaato.net",
"c07.smaato.net",
"c08.smaato.net",
"c09.smaato.net",
"c10.smaato.net",
"c11.smaato.net",
"c12.smaato.net",
"c13.smaato.net",
"c14.smaato.net",
"c15.smaato.net",
"c16.smaato.net",
"c17.smaato.net",
"c18.smaato.net",
"c19.smaato.net",
"c20.smaato.net",
"c21.smaato.net",
"c22.smaato.net",
"c23.smaato.net",
"c24.smaato.net",
"c25.smaato.net",
"c26.smaato.net",
"c27.smaato.net",
"c28.smaato.net",
"c29.smaato.net",
"c30.smaato.net",
"c31.smaato.net",
"c32.smaato.net",
"c33.smaato.net",
"c34.smaato.net",
"c35.smaato.net",
"c36.smaato.net",
"c37.smaato.net",
"c38.smaato.net",
"c39.smaato.net",
"c40.smaato.net",
"c41.smaato.net",
"c42.smaato.net",
"c43.smaato.net",
"c44.smaato.net",
"c45.smaato.net",
"c46.smaato.net",
"c47.smaato.net",
"c48.smaato.net",
"c49.smaato.net",
"c50.smaato.net",
"c51.smaato.net",
"c52.smaato.net",
"c53.smaato.net",
"c54.smaato.net",
"c55.smaato.net",
"c56.smaato.net",
"c57.smaato.net",
"c58.smaato.net",
"c59.smaato.net",
"c60.smaato.net",
"f03.smaato.net",
"f04.smaato.net",
"f05.smaato.net",
"f06.smaato.net",
"f07.smaato.net",
"f08.smaato.net",
"f09.smaato.net",
"f10.smaato.net",
"f11.smaato.net",
"f12.smaato.net",
"f13.smaato.net",
"f14.smaato.net",
"f15.smaato.net",
"f16.smaato.net",
"f17.smaato.net",
"f18.smaato.net",
"f19.smaato.net",
"f20.smaato.net",
"f21.smaato.net",
"f22.smaato.net",
"f23.smaato.net",
"f24.smaato.net",
"f25.smaato.net",
"f26.smaato.net",
"f27.smaato.net",
"f28.smaato.net",
"f29.smaato.net",
"f30.smaato.net",
"f31.smaato.net",
"f32.smaato.net",
"f33.smaato.net",
"f34.smaato.net",
"f35.smaato.net",
"f36.smaato.net",
"f37.smaato.net",
"f38.smaato.net",
"f39.smaato.net",
"f40.smaato.net",
"f41.smaato.net",
"f42.smaato.net",
"f43.smaato.net",
"f44.smaato.net",
"f45.smaato.net",
"f46.smaato.net",
"f47.smaato.net",
"f48.smaato.net",
"f49.smaato.net",
"f50.smaato.net",
"f51.smaato.net",
"f52.smaato.net",
"f53.smaato.net",
"f54.smaato.net",
"f55.smaato.net",
"f56.smaato.net",
"f57.smaato.net",
"f58.smaato.net",
"f59.smaato.net",
"f60.smaato.net",
"img.ads1.mojiva.com",
"img.ads2.mojiva.com",
"img.ads3.mojiva.com",
"img.ads4.mojiva.com",
"img.ads1.mocean.mobi",
"img.ads2.mocean.mobi",
"img.ads3.mocean.mobi",
"img.ads4.mocean.mobi",
"akamai.smartadserver.com",
"cdn1.smartadserver.com",
"diff.smartadserver.com",
"diff2.smartadserver.com",
"diff3.smartadserver.com",
"eqx.smartadserver.com",
"im2.smartadserver.com",
"itx5-publicidad.smartadserver.com",
"itx5.smartadserver.com",
"tcy.smartadserver.com",
"ww129.smartadserver.com",
"ww13.smartadserver.com",
"ww14.smartadserver.com",
"ww234.smartadserver.com",
"ww251.smartadserver.com",
"ww264.smartadserver.com",
"ww302.smartadserver.com",
"ww362.smartadserver.com",
"ww370.smartadserver.com",
"ww381.smartadserver.com",
"ww392.smartadserver.com",
"ww55.smartadserver.com",
"ww57.smartadserver.com",
"ww84.smartadserver.com",
"www.smartadserver.com",
"www2.smartadserver.com",
"www3.smartadserver.com",
"www4.smartadserver.com",
"ads.mobclix.com",
"data.mobclix.com",
"s.mobclix.com",
"ads.mdotm.com",
"cdn.mdotm.com",
"ads2.greystripe.com",
"adsx.greystripe.com",
"c.greystripe.com",
"aax-us-east.amazon-adsystem.com",
"aax-us-west.amazon-adsystem.com",
"s.amazon-adsystem.com",
"admarvel.s3.amazonaws.com",
"html5adkit.plusmo.s3.amazonaws.com",
"inneractive-assets.s3.amazonaws.com",
"strikeadcdn.s3.amazonaws.com",
"a.admob.com",
"analytics.admob.com",
"c.admob.com",
"media.admob.com",
"p.admob.com",
"met.adwhirl.com",
"mob.adwhirl.com",
"ad-g.doubleclick.net",
"ad.doubleclick.net",
"ad.mo.doubleclick.net",
"doubleclick.net",
"googleads.g.doubleclick.net",
"pagead.googlesyndication.com",
"pagead1.googlesyndication.com",
"pagead2.googlesyndication.com",
"events.foreseeresults.com",
"survey.foreseeresults.com",
"m.quantserve.com",
"ad.leadboltmobile.net",
"mobileads.msn.com",
"img.adecorp.co.kr",
"us0.adlibr.com",
"ad.parrot.mable-inc.com",
"aos.wall.youmi.net",
"au.youmi.net",
"coconuts.boy.jp",
"iacpromotion.s3.amazonaws.com",
"plugin.2easydroid.com",
"adimg3.search.naver.net",
"st.a-link.co.kr",
"cdn.ajillionmax.com",
"dispatch.admixer.co.kr",
"ifc.inmobi.com",
"thinknear-hosted.thinknearhub.com",
"ads.adadapted.com",
"analytics.localytics.com",
"a.medialytics.com",
"c.medialytics.com",
"cdn.creative.medialytics.com",
"p.medialytics.com",
"px.cdn.creative.medialytics.com",
"t.medialytics.com",
"google-analytics.com",
"googlesyndication.com",
"applift.com",
"trackersimulator.org",
"eviltracker.net",
"do-not-tracker.org",
]
|
|
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
# Work around a bug which causes segfaults if uuid is imported after
# PyQt. See here for details :
#
# https://bugs.gentoo.org/show_bug.cgi?id=317557
# http://www.riverbankcomputing.com/pipermail/pyqt/2010-December/028773.html
#
# Using __import__ rather than import so that we don't pollute the GafferUI
# namespace.
__import__( "uuid" )
##########################################################################
# Function to return the C++ address of a wrapped Qt object. This can
# be useful if needing to implement part of the UI in C++ and the rest
# in Python.
##########################################################################
def _qtAddress( o ) :
import Qt
if "PyQt" in Qt.__binding__ :
import sip
return sip.unwrapinstance( o )
else :
return __shiboken().getCppPointer( o )[0]
##########################################################################
# Function to return a wrapped Qt object from the given C++ address.
# This can be useful if needing to implement part of the UI in C++ and
# the rest in Python.
##########################################################################
def _qtObject( address, type ) :
import Qt
if "PyQt" in Qt.__binding__ :
import sip
return sip.wrapinstance( address, type )
else :
return __shiboken().wrapInstance( address, type )
##########################################################################
# Determines if the wrapped Qt object is still valid
# Useful when having to deal with the consequences of C++/Python deletion
# order challeneges, see:
# https://github.com/GafferHQ/gaffer/pull/3179
##########################################################################
def _qtObjectIsValid( o ) :
import Qt
if "PyQt" in Qt.__binding__ :
import sip
return not sip.isdeleted( o )
else :
return __shiboken().isValid( o )
##########################################################################
# Shiboken lives in a variety of places depending on which PySide it is.
##########################################################################
def __shiboken() :
import Qt
assert( "PyQt" not in Qt.__binding__ )
if Qt.__binding__ == "PySide2" :
try :
import PySide2.shiboken2 as shiboken
except ImportError :
import shiboken2 as shiboken
else :
try :
import PySide.shiboken
except ImportError :
import shiboken
return shiboken
##########################################################################
# now import our actual functionality
##########################################################################
# Import modules that must be imported before _GafferUI, using __import__
# to avoid polluting the GafferUI namespace.
__import__( "IECore" )
__import__( "Gaffer" )
from ._GafferUI import *
# general ui stuff first
from .Enums import *
from .Widget import Widget
from .LazyMethod import LazyMethod
from .Menu import Menu
from .ContainerWidget import ContainerWidget
from .Window import Window
from .SplitContainer import SplitContainer
from .ListContainer import ListContainer
from .GridContainer import GridContainer
from .MenuBar import MenuBar
from .EventLoop import EventLoop
from .TabbedContainer import TabbedContainer
from .TextWidget import TextWidget
from .NumericWidget import NumericWidget
from .Button import Button
from .MultiLineTextWidget import MultiLineTextWidget
from .Label import Label
from .GLWidget import GLWidget
from .ScrolledContainer import ScrolledContainer
from .PathWidget import PathWidget
from .PathListingWidget import PathListingWidget
from .PathChooserWidget import PathChooserWidget
from .Dialogue import Dialogue
from .PathChooserDialogue import PathChooserDialogue
from .TextInputDialogue import TextInputDialogue
from .Collapsible import Collapsible
from .ColorSwatch import ColorSwatch
from .Slider import Slider
from .ShowURL import showURL
from .Spacer import Spacer
from .BoolWidget import BoolWidget, CheckBox
from .Image import Image
from .ErrorDialogue import ErrorDialogue
from ._Variant import _Variant
from .VectorDataWidget import VectorDataWidget
from .PathVectorDataWidget import PathVectorDataWidget
from .ProgressBar import ProgressBar
from .SelectionMenu import SelectionMenu
from .PathFilterWidget import PathFilterWidget
from .CompoundPathFilterWidget import CompoundPathFilterWidget
from .InfoPathFilterWidget import InfoPathFilterWidget
from .MatchPatternPathFilterWidget import MatchPatternPathFilterWidget
from .FileSequencePathFilterWidget import FileSequencePathFilterWidget
from .BusyWidget import BusyWidget
from .NumericSlider import NumericSlider
from .ColorChooser import ColorChooser
from .ColorChooserDialogue import ColorChooserDialogue
from .MessageWidget import MessageWidget, MessageSummaryWidget
from .NotificationMessageHandler import NotificationMessageHandler
from .MenuButton import MenuButton
from .MultiSelectionMenu import MultiSelectionMenu
from .PopupWindow import PopupWindow
from .ConfirmationDialogue import ConfirmationDialogue
from .DisplayTransform import DisplayTransform
from .Divider import Divider
from . import _Pointer
from .SplineWidget import SplineWidget
from .Bookmarks import Bookmarks
from . import WidgetAlgo
# then all the PathPreviewWidgets. note that the order
# of import controls the order of display.
from .PathPreviewWidget import PathPreviewWidget
from .CompoundPathPreview import CompoundPathPreview
from .DeferredPathPreview import DeferredPathPreview
from .InfoPathPreview import InfoPathPreview
from .HeaderPathPreview import HeaderPathPreview
from .DataPathPreview import DataPathPreview
# then stuff specific to graph uis
from .BackgroundMethod import BackgroundMethod
from .PlugValueWidget import PlugValueWidget
from .StringPlugValueWidget import StringPlugValueWidget
from .NumericPlugValueWidget import NumericPlugValueWidget
from .BoolPlugValueWidget import BoolPlugValueWidget
from .PathPlugValueWidget import PathPlugValueWidget
from .FileSystemPathPlugValueWidget import FileSystemPathPlugValueWidget
from .VectorDataPlugValueWidget import VectorDataPlugValueWidget
from .PathVectorDataPlugValueWidget import PathVectorDataPlugValueWidget
from .FileSystemPathVectorDataPlugValueWidget import FileSystemPathVectorDataPlugValueWidget
from .PlugWidget import PlugWidget
from .PlugLayout import PlugLayout
from .Editor import Editor
from .PythonEditor import PythonEditor
from .GadgetWidget import GadgetWidget
from .GraphEditor import GraphEditor
from .ScriptWindow import ScriptWindow
from .CompoundEditor import CompoundEditor
from .NameWidget import NameWidget
from .NameLabel import NameLabel
from .NodeSetEditor import NodeSetEditor
from .NodeEditor import NodeEditor
from .Layouts import Layouts
from .NodeMenu import NodeMenu
from . import FileMenu
from . import LayoutMenu
from . import EditMenu
from . import UserPlugs
from .Frame import Frame
from .CompoundNumericPlugValueWidget import CompoundNumericPlugValueWidget
from .BoxPlugValueWidget import BoxPlugValueWidget
from .NodeUI import NodeUI
from .StandardNodeUI import StandardNodeUI
from .NodeToolbar import NodeToolbar
from .StandardNodeToolbar import StandardNodeToolbar
from .Viewer import Viewer
from .ColorSwatchPlugValueWidget import ColorSwatchPlugValueWidget
from .ColorPlugValueWidget import ColorPlugValueWidget
from .AboutWindow import AboutWindow
from . import ApplicationMenu
from .BrowserEditor import BrowserEditor
from .Timeline import Timeline
from .MultiLineStringPlugValueWidget import MultiLineStringPlugValueWidget
from .PresetsPlugValueWidget import PresetsPlugValueWidget
from .GraphComponentBrowserMode import GraphComponentBrowserMode
from .ToolPlugValueWidget import ToolPlugValueWidget
from .LabelPlugValueWidget import LabelPlugValueWidget
from .CompoundDataPlugValueWidget import CompoundDataPlugValueWidget
from .LayoutPlugValueWidget import LayoutPlugValueWidget
from . import ScriptNodeUI
from .RefreshPlugValueWidget import RefreshPlugValueWidget
from . import PreferencesUI
from .SplinePlugValueWidget import SplinePlugValueWidget
from .RampPlugValueWidget import RampPlugValueWidget
from .NodeFinderDialogue import NodeFinderDialogue
from .ConnectionPlugValueWidget import ConnectionPlugValueWidget
from .ButtonPlugValueWidget import ButtonPlugValueWidget
from . import ViewUI
from . import ToolUI
from .Playback import Playback
from . import MetadataWidget
from .UIEditor import UIEditor
from . import GraphBookmarksUI
from . import DocumentationAlgo
from . import _PlugAdder
from .Backups import Backups
from .AnimationEditor import AnimationEditor
from . import CompoundNumericNoduleUI
from . import Examples
from .NameValuePlugValueWidget import NameValuePlugValueWidget
from .ShufflePlugValueWidget import ShufflePlugValueWidget
from .ShufflePlugValueWidget import ShufflesPlugValueWidget
# and then specific node uis
from . import DependencyNodeUI
from . import ComputeNodeUI
from . import RandomUI
from . import SpreadsheetUI
from . import ExpressionUI
from . import BoxUI
from . import ReferenceUI
from . import BackdropUI
from . import DotUI
from . import SubGraphUI
from . import SwitchUI
from . import ContextProcessorUI
from . import ContextVariablesUI
from . import DeleteContextVariablesUI
from . import TimeWarpUI
from . import LoopUI
from . import AnimationUI
from . import BoxIOUI
from . import BoxInUI
from . import BoxOutUI
from . import NameSwitchUI
from . import EditScopeUI
# backwards compatibility
## \todo Remove me
Metadata = __import__( "Gaffer" ).Metadata
__import__( "IECore" ).loadConfig( "GAFFER_STARTUP_PATHS", subdirectory = "GafferUI" )
|
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2002-2016 "Neo Technology,"
# Network Engine for Objects in Lund AB [http://neotechnology.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the main user-facing abstractions. The main entry
point is the `GraphDatabase` class which can be used to obtain `Driver`
instances that are in turn used for managing sessions.
"""
from __future__ import division
from collections import deque
from .bolt import connect, Response, RUN, PULL_ALL
from .compat import integer, string, urlparse
from .constants import DEFAULT_PORT, ENCRYPTED_DEFAULT, TRUST_DEFAULT, TRUST_SIGNED_CERTIFICATES
from .exceptions import CypherError, ProtocolError, ResultError
from .ssl_compat import SSL_AVAILABLE, SSLContext, PROTOCOL_SSLv23, OP_NO_SSLv2, CERT_REQUIRED
from .summary import ResultSummary
from .types import hydrated
DEFAULT_MAX_POOL_SIZE = 50
class AuthToken(object):
""" Container for auth information
"""
def __init__(self, scheme, principal, credentials):
self.scheme = scheme
self.principal = principal
self.credentials = credentials
class GraphDatabase(object):
""" The :class:`.GraphDatabase` class provides access to all graph
database functionality. This is primarily used to construct a driver
instance, using the :meth:`.driver` method.
"""
@staticmethod
def driver(url, **config):
""" Acquire a :class:`.Driver` instance for the given URL and
configuration:
>>> from neo4j.v1 import GraphDatabase
>>> driver = GraphDatabase.driver("bolt://localhost")
"""
return Driver(url, **config)
class Driver(object):
""" Accessor for a specific graph database resource.
"""
def __init__(self, address, **config):
if "://" in address:
parsed = urlparse(address)
if parsed.scheme == "bolt":
host = parsed.hostname
port = parsed.port or DEFAULT_PORT
else:
raise ProtocolError("Only the 'bolt' URI scheme is supported [%s]" % address)
elif ":" in address:
host, port = address.split(":")
port = int(port)
else:
host = address
port = DEFAULT_PORT
self.address = (host, port)
self.config = config
self.max_pool_size = config.get("max_pool_size", DEFAULT_MAX_POOL_SIZE)
self.session_pool = deque()
try:
self.encrypted = encrypted = config["encrypted"]
except KeyError:
_warn_about_insecure_default()
self.encrypted = encrypted = ENCRYPTED_DEFAULT
self.trust = trust = config.get("trust", TRUST_DEFAULT)
if encrypted:
if not SSL_AVAILABLE:
raise RuntimeError("Bolt over TLS is only available in Python 2.7.9+ and Python 3.3+")
ssl_context = SSLContext(PROTOCOL_SSLv23)
ssl_context.options |= OP_NO_SSLv2
if trust >= TRUST_SIGNED_CERTIFICATES:
ssl_context.verify_mode = CERT_REQUIRED
ssl_context.set_default_verify_paths()
self.ssl_context = ssl_context
else:
self.ssl_context = None
def session(self):
""" Create a new session based on the graph database details
specified within this driver:
>>> from neo4j.v1 import GraphDatabase
>>> driver = GraphDatabase.driver("bolt://localhost")
>>> session = driver.session()
"""
session = None
connected = False
while not connected:
try:
session = self.session_pool.pop()
except IndexError:
connection = connect(self.address, self.ssl_context, **self.config)
session = Session(self, connection)
connected = True
else:
if session.healthy:
#session.connection.reset()
connected = session.healthy
return session
def recycle(self, session):
""" Accept a session for recycling, if healthy.
:param session:
:return:
"""
pool = self.session_pool
for s in list(pool): # freezing the pool into a list for iteration allows pool mutation inside the loop
if not s.healthy:
pool.remove(s)
if session.healthy and len(pool) < self.max_pool_size and session not in pool:
pool.appendleft(session)
class StatementResult(object):
""" A handler for the result of Cypher statement execution.
"""
#: The statement text that was executed to produce this result.
statement = None
#: Dictionary of parameters passed with the statement.
parameters = None
def __init__(self, connection, run_response, pull_all_response):
super(StatementResult, self).__init__()
# The Connection instance behind this result.
self.connection = connection
# The keys for the records in the result stream. These are
# lazily populated on request.
self._keys = None
# Buffer for incoming records to be queued before yielding. If
# the result is used immediately, this buffer will be ignored.
self._buffer = deque()
# The result summary (populated after the records have been
# fully consumed).
self._summary = None
# Flag to indicate whether the entire stream has been consumed
# from the network (but not necessarily yielded).
self._consumed = False
def on_header(metadata):
# Called on receipt of the result header.
self._keys = metadata["fields"]
def on_record(values):
# Called on receipt of each result record.
self._buffer.append(values)
def on_footer(metadata):
# Called on receipt of the result footer.
self._summary = ResultSummary(self.statement, self.parameters, **metadata)
self._consumed = True
def on_failure(metadata):
# Called on execution failure.
self._consumed = True
raise CypherError(metadata)
run_response.on_success = on_header
run_response.on_failure = on_failure
pull_all_response.on_record = on_record
pull_all_response.on_success = on_footer
pull_all_response.on_failure = on_failure
def __iter__(self):
while self._buffer:
values = self._buffer.popleft()
yield Record(self.keys(), tuple(map(hydrated, values)))
while not self._consumed:
self.connection.fetch()
while self._buffer:
values = self._buffer.popleft()
yield Record(self.keys(), tuple(map(hydrated, values)))
def keys(self):
""" Return the keys for the records.
"""
# Fetch messages until we have the header or a failure
while self._keys is None and not self._consumed:
self.connection.fetch()
return tuple(self._keys)
def buffer(self):
if self.connection and not self.connection.closed:
while not self._consumed:
self.connection.fetch()
self.connection = None
def consume(self):
""" Consume the remainder of this result and return the
summary.
"""
if self.connection and not self.connection.closed:
list(self)
self.connection = None
return self._summary
def single(self):
""" Return the next record, failing if none or more than one remain.
"""
records = list(self)
num_records = len(records)
if num_records == 0:
raise ResultError("Cannot retrieve a single record, because this result is empty.")
elif num_records != 1:
raise ResultError("Expected a result with a single record, but this result contains at least one more.")
else:
return records[0]
def peek(self):
""" Return the next record without advancing the cursor. Fails
if no records remain.
"""
if self._buffer:
values = self._buffer[0]
return Record(self.keys(), tuple(map(hydrated, values)))
while not self._buffer and not self._consumed:
self.connection.fetch()
if self._buffer:
values = self._buffer[0]
return Record(self.keys(), tuple(map(hydrated, values)))
raise ResultError("End of stream")
class Session(object):
""" Logical session carried out over an established TCP connection.
Sessions should generally be constructed using the :meth:`.Driver.session`
method.
"""
def __init__(self, driver, connection):
self.driver = driver
self.connection = connection
self.transaction = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@property
def healthy(self):
""" Return ``True`` if this session is healthy, ``False`` if
unhealthy and ``None`` if closed.
"""
return self.connection.healthy
def run(self, statement, parameters=None):
""" Run a parameterised Cypher statement.
:param statement: Cypher statement to execute
:param parameters: dictionary of parameters
:return: Cypher result
:rtype: :class:`.StatementResult`
"""
if self.transaction:
raise ProtocolError("Statements cannot be run directly on a session with an open transaction;"
" either run from within the transaction or use a different session.")
return run(self.connection, statement, parameters)
def close(self):
""" Recycle this session through the driver it came from.
"""
if self.connection and not self.connection.closed:
self.connection.fetch_all()
if self.transaction:
self.transaction.close()
self.driver.recycle(self)
def begin_transaction(self):
""" Create a new :class:`.Transaction` within this session.
:return: new :class:`.Transaction` instance.
"""
if self.transaction:
raise ProtocolError("You cannot begin a transaction on a session with an open transaction;"
" either run from within the transaction or use a different session.")
def clear_transaction():
self.transaction = None
self.transaction = Transaction(self.connection, on_close=clear_transaction)
return self.transaction
class Transaction(object):
""" Container for multiple Cypher queries to be executed within
a single context. Transactions can be used within a :py:const:`with`
block where the value of :attr:`.success` will determine whether
the transaction is committed or rolled back on :meth:`.Transaction.close`::
with session.new_transaction() as tx:
pass
"""
#: When closed, the transaction will be committed if marked as successful
#: and rolled back otherwise. This attribute can be set in user code
#: multiple times before a transaction completes with only the final
#: value taking effect.
success = False
#: Indicator to show whether the transaction has been closed, either
#: with commit or rollback.
closed = False
def __init__(self, connection, on_close):
self.connection = connection
self.on_close = on_close
run(self.connection, "BEGIN")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_value:
self.success = False
self.close()
def run(self, statement, parameters=None):
""" Run a Cypher statement within the context of this transaction.
:param statement: Cypher statement
:param parameters: dictionary of parameters
:return: result object
"""
assert not self.closed
return run(self.connection, statement, parameters)
def commit(self):
""" Mark this transaction as successful and close in order to
trigger a COMMIT.
"""
self.success = True
self.close()
def rollback(self):
""" Mark this transaction as unsuccessful and close in order to
trigger a ROLLBACK.
"""
self.success = False
self.close()
def close(self):
""" Close this transaction, triggering either a COMMIT or a ROLLBACK.
"""
assert not self.closed
if self.success:
run(self.connection, "COMMIT")
else:
run(self.connection, "ROLLBACK")
self.closed = True
self.on_close()
class Record(object):
""" Record is an ordered collection of fields.
A Record object is used for storing result values along with field names.
Fields can be accessed by numeric or named index (``record[0]`` or
``record["field"]``).
"""
def __init__(self, keys, values):
self._keys = tuple(keys)
self._values = tuple(values)
def keys(self):
""" Return the keys (key names) of the record
"""
return self._keys
def values(self):
""" Return the values of the record
"""
return self._values
def items(self):
""" Return the fields of the record as a list of key and value tuples
"""
return zip(self._keys, self._values)
def index(self, key):
""" Return the index of the given key
"""
try:
return self._keys.index(key)
except ValueError:
raise KeyError(key)
def __record__(self):
return self
def __contains__(self, key):
return self._keys.__contains__(key)
def __iter__(self):
return iter(self._keys)
def copy(self):
return Record(self._keys, self._values)
def __getitem__(self, item):
if isinstance(item, string):
return self._values[self.index(item)]
elif isinstance(item, integer):
return self._values[item]
else:
raise TypeError(item)
def __len__(self):
return len(self._keys)
def __repr__(self):
values = self._values
s = []
for i, field in enumerate(self._keys):
s.append("%s=%r" % (field, values[i]))
return "<Record %s>" % " ".join(s)
def __hash__(self):
return hash(self._keys) ^ hash(self._values)
def __eq__(self, other):
try:
return self._keys == tuple(other.keys()) and self._values == tuple(other.values())
except AttributeError:
return False
def __ne__(self, other):
return not self.__eq__(other)
def basic_auth(user, password):
""" Generate a basic auth token for a given user and password.
:param user: user name
:param password: current password
:return: auth token for use with :meth:`GraphDatabase.driver`
"""
return AuthToken("basic", user, password)
def run(connection, statement, parameters=None):
""" Run a Cypher statement on a given connection.
:param connection: connection to carry the request and response
:param statement: Cypher statement
:param parameters: optional dictionary of parameters
:return: statement result
"""
# Ensure the statement is a Unicode value
if isinstance(statement, bytes):
statement = statement.decode("UTF-8")
params = {}
for key, value in (parameters or {}).items():
if isinstance(key, bytes):
key = key.decode("UTF-8")
if isinstance(value, bytes):
params[key] = value.decode("UTF-8")
else:
params[key] = value
parameters = params
run_response = Response(connection)
pull_all_response = Response(connection)
result = StatementResult(connection, run_response, pull_all_response)
result.statement = statement
result.parameters = parameters
connection.append(RUN, (statement, parameters), response=run_response)
connection.append(PULL_ALL, response=pull_all_response)
connection.send()
return result
_warned_about_insecure_default = False
def _warn_about_insecure_default():
global _warned_about_insecure_default
if not SSL_AVAILABLE and not _warned_about_insecure_default:
from warnings import warn
warn("Bolt over TLS is only available in Python 2.7.9+ and Python 3.3+ "
"so communications are not secure")
_warned_about_insecure_default = True
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''pip-faster is a thin wrapper around pip.
It only adds a --prune option to the `install` subcommand.
`pip-faster install --prune` will *uninstall* any installed packages that are
not required.
Otherwise, you should find that pip-faster gives the same results as pip, just
more quickly, especially in the case of pinned requirements (e.g.
package-x==1.2.3).
Version control at: https://github.com/yelp/venv-update
'''
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import errno
import glob
import os
import random
import re
import shutil
import sys
from contextlib import contextmanager
import pip as pipmodule
from pip._internal import logger
from pip._internal.commands.install import InstallCommand
from pip._internal.exceptions import DistributionNotFound
from pip._internal.exceptions import InstallationError
from pip._internal.index import BestVersionAlreadyInstalled
from pip._internal.index import HTMLPage
from pip._internal.index import Link
from pip._internal.index import PackageFinder
from pip._internal.req import InstallRequirement
from pip._internal.wheel import Wheel
from venv_update import colorize
from venv_update import raise_on_failure
from venv_update import timid_relpath
from venv_update import user_cache_dir
# Debian de-vendorizes the version of pip it ships
try: # :pragma:nocover: non-debian
from pip._vendor import pkg_resources
except ImportError: # :pragma:nocover: debian
import pkg_resources
try: # :pragma:nocover: pip>=18.1
from pip._internal.req.constructors import install_req_from_line
except ImportError: # :pragma:nocover: pip<18.1
install_req_from_line = InstallRequirement.from_line
# Thanks six!
PY2 = str is bytes
if PY2: # :pragma:nocover:
_reraise_src = 'def reraise(tp, value, tb=None): raise tp, value, tb'
exec(_reraise_src)
else: # :pragma:nocover:
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
class CACHE(object):
_cache_dir = user_cache_dir()
wheelhouse = os.path.join(_cache_dir, 'pip-faster', 'wheelhouse')
pip_wheelhouse = os.path.join(_cache_dir, 'pip', 'wheels')
def ignorecase_glob(glob):
return ''.join([
'[{}{}]'.format(char.lower(), char.upper())
if char.isalpha() else char
for char in glob
])
def optimistic_wheel_search(req, index_urls):
name = req.name.replace('-', '_').lower()
for index_url in index_urls:
expected_location = os.path.join(
CACHE.wheelhouse, index_url, ignorecase_glob(name) + '-*.whl',
)
for link in glob.glob(expected_location):
link = Link('file:' + link)
wheel = Wheel(link.filename)
if req.specifier.contains(wheel.version) and wheel.supported():
return link
def is_req_pinned(requirement):
if not requirement:
# url-style requirement
return False
for spec in requirement.specifier:
if spec.operator == '==' and not spec.version.endswith('.*'):
return True
return False
class FasterPackageFinder(PackageFinder):
def find_requirement(self, req, upgrade):
if is_req_pinned(req.req):
# if the version is pinned-down by a ==
# first try to use any installed package that satisfies the req
if req.satisfied_by:
logger.info('Faster! pinned requirement already installed.')
raise BestVersionAlreadyInstalled
# then try an optimistic search for a .whl file:
link = optimistic_wheel_search(req.req, self.index_urls)
if link is None:
# The wheel will be built during prepare_files
logger.debug('No wheel found locally for pinned requirement %s', req)
else:
logger.info('Faster! Pinned wheel found, without hitting PyPI.')
return link
else:
# unpinned requirements aren't very notable. only show with -v
logger.info('slow: full search for unpinned requirement %s', req)
# otherwise, do the full network search, per usual
try:
return super(FasterPackageFinder, self).find_requirement(req, upgrade)
except DistributionNotFound:
exc_info = sys.exc_info()
# Best effort: try and install from suitable version on-disk
link = optimistic_wheel_search(req.req, self.index_urls)
if link:
return link
else:
reraise(*exc_info)
def _can_be_cached(package):
return (
package.is_wheel and
# An assertion that we're looking in the pip wheel dir
package.link.path.startswith(CACHE.pip_wheelhouse)
)
def mkdirp(pth):
try:
os.makedirs(pth)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _store_wheel_in_cache(file_path, index_url):
filename = os.path.basename(file_path)
cache = os.path.join(CACHE.wheelhouse, index_url, filename)
cache_tmp = '{}.{}'.format(cache, random.randint(0, sys.maxsize))
cache_dir = os.path.dirname(cache)
mkdirp(cache_dir)
# Atomicity
shutil.copy(file_path, cache_tmp)
os.rename(cache_tmp, cache)
def cache_installed_wheels(index_url, installed_packages):
"""After installation, pip tells us what it installed and from where.
We build a structure that looks like
.cache/pip-faster/wheelhouse/$index_url/$wheel
"""
for installed_package in installed_packages:
if not _can_be_cached(installed_package):
continue
_store_wheel_in_cache(installed_package.link.path, index_url)
def get_patched_download_http_url(orig_download_http_url, index_urls):
def pipfaster_download_http_url(link, *args, **kwargs):
file_path, content_type = orig_download_http_url(link, *args, **kwargs)
if link.is_wheel:
for index_url in index_urls:
if (
# pip <18.1
isinstance(link.comes_from, HTMLPage) and
link.comes_from.url.startswith(index_url)
) or (
# pip >= 18.1
isinstance(link.comes_from, (str, type(''))) and
link.comes_from.startswith(index_url)
):
_store_wheel_in_cache(file_path, index_url)
break
return file_path, content_type
return pipfaster_download_http_url
def pip(args):
"""Run pip, in-process."""
from sys import stdout
stdout.write(colorize(('pip',) + args))
stdout.write('\n')
stdout.flush()
return pipmodule._internal.main(list(args))
def dist_to_req(dist):
"""Make a pip.FrozenRequirement from a pkg_resources distribution object"""
try: # :pragma:nocover: (pip>=10)
from pip._internal.operations.freeze import FrozenRequirement
except ImportError: # :pragma:nocover: (pip<10)
from pip import FrozenRequirement
# normalize the casing, dashes in the req name
orig_name, dist.project_name = dist.project_name, dist.key
result = FrozenRequirement.from_dist(dist, [])
# put things back the way we found it.
dist.project_name = orig_name
return result
def pip_get_installed():
"""Code extracted from the middle of the pip freeze command.
FIXME: does not list anything installed via -e
"""
from pip._internal.utils.misc import dist_is_local
return tuple(
dist_to_req(dist)
for dist in fresh_working_set()
if dist_is_local(dist)
if dist.key != 'python' # See #220
)
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub('[-_.]+', '-', name).lower()
def fresh_working_set():
"""return a pkg_resources "working set", representing the *currently* installed packages"""
class WorkingSetPlusEditableInstalls(pkg_resources.WorkingSet):
def __init__(self, *args, **kwargs):
self._normalized_name_mapping = {}
super(WorkingSetPlusEditableInstalls, self).__init__(*args, **kwargs)
def add_entry(self, entry):
"""Same as the original .add_entry, but sets only=False, so that egg-links are honored."""
logger.debug('working-set entry: %r', entry)
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in pkg_resources.find_distributions(entry, False):
# eggs override anything that's installed normally
# fun fact: pkg_resources.working_set's results depend on the
# ordering of os.listdir since the order of os.listdir is
# entirely arbitrary (an implemenation detail of file system),
# without calling site.main(), an .egg-link file may or may not
# be honored, depending on the filesystem
replace = (dist.precedence == pkg_resources.EGG_DIST)
self._normalized_name_mapping[normalize_name(dist.key)] = dist.key
self.add(dist, entry, False, replace=replace)
def find_normalized(self, req):
req = _package_req_to_pkg_resources_req(str(req))
req.key = self._normalized_name_mapping.get(normalize_name(req.key), req.key)
return self.find(req)
return WorkingSetPlusEditableInstalls()
def req_cycle(req):
"""is this requirement cyclic?"""
cls = req.__class__
seen = {req.name}
while isinstance(req.comes_from, cls):
req = req.comes_from
if req.name in seen:
return True
else:
seen.add(req.name)
return False
def pretty_req(req):
"""
return a copy of a pip requirement that is a bit more readable,
at the expense of removing some of its data
"""
from copy import copy
req = copy(req)
req.link = None
req.satisfied_by = None
return req
def _package_req_to_pkg_resources_req(req):
return pkg_resources.Requirement.parse(str(req))
def trace_requirements(requirements):
"""given an iterable of pip InstallRequirements,
return the set of required packages, given their transitive requirements.
"""
requirements = tuple(pretty_req(r) for r in requirements)
working_set = fresh_working_set()
# breadth-first traversal:
from collections import deque
queue = deque(requirements)
queued = {_package_req_to_pkg_resources_req(req.req) for req in queue}
errors = []
result = []
while queue:
req = queue.popleft()
logger.debug('tracing: %s', req)
try:
dist = working_set.find_normalized(_package_req_to_pkg_resources_req(req.req))
except pkg_resources.VersionConflict as conflict:
dist = conflict.args[0]
errors.append('Error: version conflict: {} ({}) <-> {}'.format(
dist, timid_relpath(dist.location), req
))
assert dist is not None, 'Should be unreachable in pip8+'
result.append(dist_to_req(dist))
# TODO: pip does no validation of extras. should we?
extras = [extra for extra in req.extras if extra in dist.extras]
for sub_req in sorted(dist.requires(extras=extras), key=lambda req: req.key):
sub_req = InstallRequirement(sub_req, req)
if req_cycle(sub_req):
logger.warning('Circular dependency! %s', sub_req)
continue
elif sub_req.req in queued:
logger.debug('already queued: %s', sub_req)
continue
else:
logger.debug('adding sub-requirement %s', sub_req)
queue.append(sub_req)
queued.add(sub_req.req)
if errors:
raise InstallationError('\n'.join(errors))
return result
def reqnames(reqs):
return {req.name for req in reqs}
class FasterInstallCommand(InstallCommand):
def __init__(self, *args, **kw):
super(FasterInstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'--prune',
action='store_true',
dest='prune',
default=False,
help='Uninstall any non-required packages.',
)
cmd_opts.add_option(
'--no-prune',
action='store_false',
dest='prune',
help='Do not uninstall any non-required packages.',
)
def run(self, options, args):
"""update install options with caching values"""
if options.prune:
previously_installed = pip_get_installed()
index_urls = [options.index_url] + options.extra_index_urls
with pipfaster_download_cacher(index_urls):
requirement_set = super(FasterInstallCommand, self).run(
options, args,
)
required = requirement_set.requirements.values()
# With extra_index_urls we don't know where the wheel is from
if not options.extra_index_urls:
cache_installed_wheels(options.index_url, requirement_set.successfully_downloaded)
if not options.ignore_dependencies:
# transitive requirements, previously installed, are also required
# this has a side-effect of finding any missing / conflicting requirements
required = trace_requirements(required)
if not options.prune:
return requirement_set
extraneous = (
reqnames(previously_installed) -
reqnames(required) -
# the stage1 bootstrap packages
reqnames(trace_requirements([install_req_from_line('venv-update')])) -
# See #186
frozenset(('pkg-resources',))
)
if extraneous:
extraneous = sorted(extraneous)
pip(('uninstall', '--yes') + tuple(extraneous))
# TODO: Cleanup: remove stale values from the cache and wheelhouse that have not been accessed in a week.
# TODO: a pip_faster.patch module
def patch(attrs, updates):
"""Perform a set of updates to a attribute dictionary, return the original values."""
orig = {}
for attr, value in updates:
orig[attr] = attrs[attr]
attrs[attr] = value
return orig
@contextmanager
def patched(attrs, updates):
"""A context in which some attributes temporarily have a modified value."""
orig = patch(attrs, updates.items())
try:
yield orig
finally:
patch(attrs, orig.items())
# END: pip_faster.patch module
def pipfaster_install_prune_option():
return patched(pipmodule._internal.commands.commands_dict, {FasterInstallCommand.name: FasterInstallCommand})
def pipfaster_packagefinder():
"""Provide a short-circuited search when the requirement is pinned and appears on disk.
Suggested upstream at: https://github.com/pypa/pip/pull/2114
"""
# A poor man's dependency injection: monkeypatch :(
try: # :pragma:nocover: pip>=18.1
from pip._internal.cli import base_command
except ImportError: # :pragma:nocover: pip<18.1
from pip._internal import basecommand as base_command
return patched(vars(base_command), {'PackageFinder': FasterPackageFinder})
def pipfaster_download_cacher(index_urls):
"""vanilla pip stores a cache of the http session in its cache and not the
wheel files. We intercept the download and save those files into our
cache
"""
from pip._internal import download
orig = download._download_http_url
patched_fn = get_patched_download_http_url(orig, index_urls)
return patched(vars(download), {'_download_http_url': patched_fn})
def main():
with pipfaster_install_prune_option():
with pipfaster_packagefinder():
raise_on_failure(pipmodule._internal.main)
if __name__ == '__main__':
exit(main())
|
|
#!/usr/bin/env python
# CREATED:2014-01-18 14:09:05 by Brian McFee <brm2132@columbia.edu>
# unit tests for util routines
# Disable cache
import os
try:
os.environ.pop('LIBROSA_CACHE_DIR')
except:
pass
import matplotlib
matplotlib.use('Agg')
import numpy as np
np.set_printoptions(precision=3)
from nose.tools import raises
import six
import warnings
import librosa
def test_example_audio_file():
assert os.path.exists(librosa.util.example_audio_file())
def test_frame():
# Generate a random time series
def __test(P):
frame, hop = P
y = np.random.randn(8000)
y_frame = librosa.util.frame(y, frame_length=frame, hop_length=hop)
for i in range(y_frame.shape[1]):
assert np.allclose(y_frame[:, i], y[i * hop:(i * hop + frame)])
for frame in [256, 1024, 2048]:
for hop_length in [64, 256, 512]:
yield (__test, [frame, hop_length])
def test_pad_center():
def __test(y, n, axis, mode):
y_out = librosa.util.pad_center(y, n, axis=axis, mode=mode)
n_len = y.shape[axis]
n_pad = int((n - n_len) / 2)
eq_slice = [slice(None)] * y.ndim
eq_slice[axis] = slice(n_pad, n_pad + n_len)
assert np.allclose(y, y_out[eq_slice])
@raises(librosa.ParameterError)
def __test_fail(y, n, axis, mode):
librosa.util.pad_center(y, n, axis=axis, mode=mode)
for shape in [(16,), (16, 16)]:
y = np.ones(shape)
for axis in [0, -1]:
for mode in ['constant', 'edge', 'reflect']:
for n in [0, 10]:
yield __test, y, n + y.shape[axis], axis, mode
for n in [0, 10]:
yield __test_fail, y, n, axis, mode
def test_fix_length():
def __test(y, n, axis):
y_out = librosa.util.fix_length(y, n, axis=axis)
eq_slice = [slice(None)] * y.ndim
eq_slice[axis] = slice(y.shape[axis])
if n > y.shape[axis]:
assert np.allclose(y, y_out[eq_slice])
else:
assert np.allclose(y[eq_slice], y)
for shape in [(16,), (16, 16)]:
y = np.ones(shape)
for axis in [0, -1]:
for n in [-5, 0, 5]:
yield __test, y, n + y.shape[axis], axis
def test_fix_frames():
@raises(librosa.ParameterError)
def __test_fail(frames, x_min, x_max, pad):
librosa.util.fix_frames(frames, x_min, x_max, pad)
def __test_pass(frames, x_min, x_max, pad):
f_fix = librosa.util.fix_frames(frames,
x_min=x_min,
x_max=x_max,
pad=pad)
if x_min is not None:
if pad:
assert f_fix[0] == x_min
assert np.all(f_fix >= x_min)
if x_max is not None:
if pad:
assert f_fix[-1] == x_max
assert np.all(f_fix <= x_max)
for low in [-20, 0, 20]:
for high in [low + 20, low + 50, low + 100]:
frames = np.random.randint(low, high=high, size=15)
for x_min in [None, 0, 20]:
for x_max in [None, 20, 100]:
for pad in [False, True]:
if np.any(frames < 0):
yield __test_fail, frames, x_min, x_max, pad
else:
yield __test_pass, frames, x_min, x_max, pad
def test_normalize():
def __test_pass(X, norm, axis):
X_norm = librosa.util.normalize(X, norm=norm, axis=axis)
if norm is None:
assert np.allclose(X, X_norm)
return
X_norm = np.abs(X_norm)
if norm == np.inf:
values = np.max(X_norm, axis=axis)
elif norm == -np.inf:
values = np.min(X_norm, axis=axis)
elif norm == 0:
# XXX: normalization here isn't quite right
values = np.ones(1)
else:
values = np.sum(X_norm**norm, axis=axis)**(1./norm)
assert np.allclose(values, np.ones_like(values))
@raises(librosa.ParameterError)
def __test_fail(X, norm, axis):
librosa.util.normalize(X, norm=norm, axis=axis)
for ndims in [1, 2, 3]:
X = np.random.randn(* ([16] * ndims))
for axis in range(X.ndim):
for norm in [np.inf, -np.inf, 0, 0.5, 1.0, 2.0, None]:
yield __test_pass, X, norm, axis
for norm in ['inf', -0.5, -2]:
yield __test_fail, X, norm, axis
def test_axis_sort():
def __test_pass(data, axis, index, value):
if index:
Xsorted, idx = librosa.util.axis_sort(data,
axis=axis,
index=index,
value=value)
cmp_slice = [slice(None)] * X.ndim
cmp_slice[axis] = idx
assert np.allclose(X[cmp_slice], Xsorted)
else:
Xsorted = librosa.util.axis_sort(data,
axis=axis,
index=index,
value=value)
compare_axis = np.mod(1 - axis, 2)
if value is None:
value = np.argmax
sort_values = value(Xsorted, axis=compare_axis)
assert np.allclose(sort_values, np.sort(sort_values))
@raises(librosa.ParameterError)
def __test_fail(data, axis, index, value):
librosa.util.axis_sort(data, axis=axis, index=index, value=value)
for ndim in [1, 2, 3]:
X = np.random.randn(*([10] * ndim))
for axis in [0, 1, -1]:
for index in [False, True]:
for value in [None, np.min, np.mean, np.max]:
if ndim == 2:
yield __test_pass, X, axis, index, value
else:
yield __test_fail, X, axis, index, value
def test_match_intervals():
def __make_intervals(n):
return np.cumsum(np.abs(np.random.randn(n, 2)), axis=1)
def __compare(i1, i2):
return np.maximum(0, np.minimum(i1[-1], i2[-1])
- np.maximum(i1[0], i2[0]))
def __is_best(y, ints1, ints2):
for i in range(len(y)):
values = np.asarray([__compare(ints1[i], i2) for i2 in ints2])
if np.any(values > values[y[i]]):
return False
return True
def __test(n, m):
ints1 = __make_intervals(n)
ints2 = __make_intervals(m)
y_pred = librosa.util.match_intervals(ints1, ints2)
assert __is_best(y_pred, ints1, ints2)
@raises(librosa.ParameterError)
def __test_fail(n, m):
ints1 = __make_intervals(n)
ints2 = __make_intervals(m)
librosa.util.match_intervals(ints1, ints2)
for n in [0, 1, 5, 20, 100]:
for m in [0, 1, 5, 20, 100]:
if n == 0 or m == 0:
yield __test_fail, n, m
else:
yield __test, n, m
# TODO: 2015-01-20 17:04:55 by Brian McFee <brian.mcfee@nyu.edu>
# add coverage for shape errors
def test_match_events():
def __make_events(n):
return np.abs(np.random.randn(n))
def __is_best(y, ev1, ev2):
for i in range(len(y)):
values = np.asarray([np.abs(ev1[i] - e2) for e2 in ev2])
if np.any(values < values[y[i]]):
return False
return True
def __test(n, m):
ev1 = __make_events(n)
ev2 = __make_events(m)
y_pred = librosa.util.match_events(ev1, ev2)
assert __is_best(y_pred, ev1, ev2)
@raises(librosa.ParameterError)
def __test_fail(n, m):
ev1 = __make_events(n)
ev2 = __make_events(m)
librosa.util.match_events(ev1, ev2)
for n in [0, 1, 5, 20, 100]:
for m in [0, 1, 5, 20, 100]:
if n == 0 or m == 0:
yield __test_fail, n, m
else:
yield __test, n, m
def test_localmax():
def __test(ndim, axis):
data = np.random.randn(*([20] * ndim))
lm = librosa.util.localmax(data, axis=axis)
for hits in np.argwhere(lm):
for offset in [-1, 1]:
compare_idx = hits.copy()
compare_idx[axis] += offset
if compare_idx[axis] < 0:
continue
if compare_idx[axis] >= data.shape[axis]:
continue
if offset < 0:
assert data[tuple(hits)] > data[tuple(compare_idx)]
else:
assert data[tuple(hits)] >= data[tuple(compare_idx)]
for ndim in range(1, 5):
for axis in range(ndim):
yield __test, ndim, axis
def test_feature_extractor():
y, sr = librosa.load('data/test1_22050.wav')
def __test_positional_iterate(myfunc, args):
output_raw = myfunc(y, **args)
FP = librosa.util.FeatureExtractor(myfunc, **args)
output = FP.transform([y])
assert np.allclose(output, output_raw)
# Ensure that fitting does nothing
FP.fit()
output = FP.transform([y])
assert np.allclose(output, output_raw)
def __test_positional(myfunc, args):
output_raw = myfunc(y, **args)
FP = librosa.util.FeatureExtractor(myfunc, iterate=False, **args)
output = FP.transform(y)
assert np.allclose(output, output_raw)
# Ensure that fitting does nothing
FP.fit()
output = FP.transform(y)
assert np.allclose(output, output_raw)
def __test_keyword_iterate(myfunc, args):
output_raw = myfunc(y=y, **args)
FP = librosa.util.FeatureExtractor(myfunc, target='y', **args)
output = FP.transform([y])
assert np.allclose(output, output_raw)
# Ensure that fitting does nothing
FP.fit()
output = FP.transform([y])
assert np.allclose(output, output_raw)
def __test_keyword(myfunc, args):
output_raw = myfunc(y=y, **args)
FP = librosa.util.FeatureExtractor(myfunc, target='y',
iterate=False, **args)
output = FP.transform(y)
assert np.allclose(output, output_raw)
# Ensure that fitting does nothing
FP.fit()
output = FP.transform(y)
assert np.allclose(output, output_raw)
func = librosa.feature.melspectrogram
args = {'sr': sr}
for n_fft in [1024, 2048]:
for n_mels in [32, 64, 128]:
args['n_fft'] = n_fft
args['n_mels'] = n_mels
yield __test_positional_iterate, func, args
yield __test_keyword_iterate, func, args
yield __test_positional, func, args
yield __test_keyword, func, args
def test_peak_pick():
def __test(n, pre_max, post_max, pre_avg, post_avg, delta, wait):
# Generate a test signal
x = np.random.randn(n)**2
peaks = librosa.util.peak_pick(x,
pre_max, post_max,
pre_avg, post_avg,
delta, wait)
for i in peaks:
# Test 1: is it a peak in this window?
s = i - pre_max
if s < 0:
s = 0
t = i + post_max
diff = x[i] - np.max(x[s:t])
assert diff > 0 or np.isclose(diff, 0, rtol=1e-3, atol=1e-4)
# Test 2: is it a big enough peak to count?
s = i - pre_avg
if s < 0:
s = 0
t = i + post_avg
diff = x[i] - (delta + np.mean(x[s:t]))
assert diff > 0 or np.isclose(diff, 0, rtol=1e-3, atol=1e-4)
# Test 3: peak separation
assert not np.any(np.diff(peaks) <= wait)
@raises(librosa.ParameterError)
def __test_shape_fail():
x = np.eye(10)
librosa.util.peak_pick(x, 1, 1, 1, 1, 0.5, 1)
yield __test_shape_fail
win_range = [-1, 0, 1, 10]
for n in [1, 5, 10, 100]:
for pre_max in win_range:
for post_max in win_range:
for pre_avg in win_range:
for post_avg in win_range:
for wait in win_range:
for delta in [-1, 0.05, 100.0]:
tf = __test
if pre_max < 0:
tf = raises(librosa.ParameterError)(__test)
if pre_avg < 0:
tf = raises(librosa.ParameterError)(__test)
if delta < 0:
tf = raises(librosa.ParameterError)(__test)
if wait < 0:
tf = raises(librosa.ParameterError)(__test)
if post_max <= 0:
tf = raises(librosa.ParameterError)(__test)
if post_avg <= 0:
tf = raises(librosa.ParameterError)(__test)
yield (tf, n, pre_max, post_max,
pre_avg, post_avg, delta, wait)
def test_sparsify_rows():
def __test(n, d, q):
X = np.random.randn(*([d] * n))**4
X = np.asarray(X)
xs = librosa.util.sparsify_rows(X, quantile=q)
if ndim == 1:
X = X.reshape((1, -1))
assert np.allclose(xs.shape, X.shape)
# And make sure that xs matches X on nonzeros
xsd = np.asarray(xs.todense())
for i in range(xs.shape[0]):
assert np.allclose(xsd[i, xs[i].indices], X[i, xs[i].indices])
# Compute row-wise magnitude marginals
v_in = np.sum(np.abs(X), axis=-1)
v_out = np.sum(np.abs(xsd), axis=-1)
# Ensure that v_out retains 1-q fraction of v_in
assert np.all(v_out >= (1.0 - q) * v_in)
for ndim in range(1, 4):
for d in [1, 5, 10, 100]:
for q in [-1, 0.0, 0.01, 0.25, 0.5, 0.99, 1.0, 2.0]:
tf = __test
if ndim not in [1, 2]:
tf = raises(librosa.ParameterError)(__test)
if not 0.0 <= q < 1:
tf = raises(librosa.ParameterError)(__test)
yield tf, ndim, d, q
def test_files():
# Expected output
output = [os.path.join(os.path.abspath(os.path.curdir), 'data', s)
for s in ['test1_22050.wav',
'test1_44100.wav',
'test2_8000.wav']]
def __test(searchdir, ext, recurse, case_sensitive, limit, offset):
files = librosa.util.find_files(searchdir,
ext=ext,
recurse=recurse,
case_sensitive=case_sensitive,
limit=limit,
offset=offset)
s1 = slice(offset, None)
s2 = slice(limit)
assert set(files) == set(output[s1][s2])
for searchdir in [os.path.curdir, os.path.join(os.path.curdir, 'data')]:
for ext in [None, 'wav', 'WAV', ['wav'], ['WAV']]:
for recurse in [False, True]:
for case_sensitive in [False, True]:
for limit in [None, 1, 2]:
for offset in [0, 1, -1]:
tf = __test
if searchdir == os.path.curdir and not recurse:
tf = raises(AssertionError)(__test)
if (ext is not None and
case_sensitive and
(ext == 'WAV' or set(ext) == set(['WAV']))):
tf = raises(AssertionError)(__test)
yield (tf, searchdir, ext, recurse,
case_sensitive, limit, offset)
def test_valid_int():
def __test(x_in, cast):
z = librosa.util.valid_int(x_in, cast)
assert isinstance(z, int)
if cast is None:
assert z == int(np.floor(x_in))
else:
assert z == int(cast(x_in))
__test_fail = raises(librosa.ParameterError)(__test)
for x in np.linspace(-2, 2, num=6):
for cast in [None, np.floor, np.ceil, 7]:
if cast is None or six.callable(cast):
yield __test, x, cast
else:
yield __test_fail, x, cast
def test_valid_intervals():
def __test(intval):
librosa.util.valid_intervals(intval)
for d in range(1, 4):
for n in range(1, 4):
ivals = np.ones(d * [n])
for m in range(1, 3):
slices = [slice(m)] * d
if m == 2 and d == 2 and n > 1:
yield __test, ivals[slices]
else:
yield raises(librosa.ParameterError)(__test), ivals[slices]
def test_warning_deprecated():
@librosa.util.decorators.deprecated('old_version', 'new_version')
def __dummy():
return True
warnings.resetwarnings()
with warnings.catch_warnings(record=True) as out:
x = __dummy()
# Make sure we still get the right value
assert x is True
# And that the warning triggered
assert len(out) > 0
# And that the category is correct
assert out[0].category is DeprecationWarning
# And that it says the right thing (roughly)
assert 'deprecated' in str(out[0].message).lower()
def test_warning_moved():
@librosa.util.decorators.moved('from', 'old_version', 'new_version')
def __dummy():
return True
warnings.resetwarnings()
with warnings.catch_warnings(record=True) as out:
x = __dummy()
# Make sure we still get the right value
assert x is True
# And that the warning triggered
assert len(out) > 0
# And that the category is correct
assert out[0].category is DeprecationWarning
# And that it says the right thing (roughly)
assert 'moved' in str(out[0].message).lower()
|
|
# Copyright 2014 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Batch updates / deletes of storage buckets / blobs.
See https://cloud.google.com/storage/docs/json_api/v1/how-tos/batch
"""
from email.encoders import encode_noop
from email.generator import Generator
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.parser import Parser
import io
import json
import requests
import six
from google.cloud import _helpers
from google.cloud import exceptions
from google.cloud.storage._http import Connection
class MIMEApplicationHTTP(MIMEApplication):
"""MIME type for ``application/http``.
Constructs payload from headers and body
:type method: str
:param method: HTTP method
:type uri: str
:param uri: URI for HTTP request
:type headers: dict
:param headers: HTTP headers
:type body: str
:param body: (Optional) HTTP payload
"""
def __init__(self, method, uri, headers, body):
if isinstance(body, dict):
body = json.dumps(body)
headers["Content-Type"] = "application/json"
headers["Content-Length"] = len(body)
if body is None:
body = ""
lines = ["%s %s HTTP/1.1" % (method, uri)]
lines.extend(
["%s: %s" % (key, value) for key, value in sorted(headers.items())]
)
lines.append("")
lines.append(body)
payload = "\r\n".join(lines)
if six.PY2:
# email.message.Message is an old-style class, so we
# cannot use 'super()'.
MIMEApplication.__init__(self, payload, "http", encode_noop)
else: # pragma: NO COVER Python3
super_init = super(MIMEApplicationHTTP, self).__init__
super_init(payload, "http", encode_noop)
class _FutureDict(object):
"""Class to hold a future value for a deferred request.
Used by for requests that get sent in a :class:`Batch`.
"""
@staticmethod
def get(key, default=None):
"""Stand-in for dict.get.
:type key: object
:param key: Hashable dictionary key.
:type default: object
:param default: Fallback value to dict.get.
:raises: :class:`KeyError` always since the future is intended to fail
as a dictionary.
"""
raise KeyError("Cannot get(%r, default=%r) on a future" % (key, default))
def __getitem__(self, key):
"""Stand-in for dict[key].
:type key: object
:param key: Hashable dictionary key.
:raises: :class:`KeyError` always since the future is intended to fail
as a dictionary.
"""
raise KeyError("Cannot get item %r from a future" % (key,))
def __setitem__(self, key, value):
"""Stand-in for dict[key] = value.
:type key: object
:param key: Hashable dictionary key.
:type value: object
:param value: Dictionary value.
:raises: :class:`KeyError` always since the future is intended to fail
as a dictionary.
"""
raise KeyError("Cannot set %r -> %r on a future" % (key, value))
class _FutureResponse(requests.Response):
"""Reponse that returns a placeholder dictionary for a batched requests."""
def __init__(self, future_dict):
super(_FutureResponse, self).__init__()
self._future_dict = future_dict
self.status_code = 204
def json(self):
return self._future_dict
@property
def content(self):
return self._future_dict
class Batch(Connection):
"""Proxy an underlying connection, batching up change operations.
:type client: :class:`google.cloud.storage.client.Client`
:param client: The client to use for making connections.
"""
_MAX_BATCH_SIZE = 1000
def __init__(self, client):
super(Batch, self).__init__(client)
self._requests = []
self._target_objects = []
def _do_request(self, method, url, headers, data, target_object, timeout=None):
"""Override Connection: defer actual HTTP request.
Only allow up to ``_MAX_BATCH_SIZE`` requests to be deferred.
:type method: str
:param method: The HTTP method to use in the request.
:type url: str
:param url: The URL to send the request to.
:type headers: dict
:param headers: A dictionary of HTTP headers to send with the request.
:type data: str
:param data: The data to send as the body of the request.
:type target_object: object
:param target_object:
(Optional) This allows us to enable custom behavior in our batch
connection. Here we defer an HTTP request and complete
initialization of the object at a later time.
:type timeout: float or tuple
:param timeout: (optional) The amount of time, in seconds, to wait
for the server response. By default, the method waits indefinitely.
Can also be passed as a tuple (connect_timeout, read_timeout).
See :meth:`requests.Session.request` documentation for details.
:rtype: tuple of ``response`` (a dictionary of sorts)
and ``content`` (a string).
:returns: The HTTP response object and the content of the response.
"""
if len(self._requests) >= self._MAX_BATCH_SIZE:
raise ValueError(
"Too many deferred requests (max %d)" % self._MAX_BATCH_SIZE
)
self._requests.append((method, url, headers, data, timeout))
result = _FutureDict()
self._target_objects.append(target_object)
if target_object is not None:
target_object._properties = result
return _FutureResponse(result)
def _prepare_batch_request(self):
"""Prepares headers and body for a batch request.
:rtype: tuple (dict, str)
:returns: The pair of headers and body of the batch request to be sent.
:raises: :class:`ValueError` if no requests have been deferred.
"""
if len(self._requests) == 0:
raise ValueError("No deferred requests")
multi = MIMEMultipart()
# Use timeout of last request, default to None (indefinite)
timeout = None
for method, uri, headers, body, _timeout in self._requests:
subrequest = MIMEApplicationHTTP(method, uri, headers, body)
multi.attach(subrequest)
timeout = _timeout
# The `email` package expects to deal with "native" strings
if six.PY3: # pragma: NO COVER Python3
buf = io.StringIO()
else:
buf = io.BytesIO()
generator = Generator(buf, False, 0)
generator.flatten(multi)
payload = buf.getvalue()
# Strip off redundant header text
_, body = payload.split("\n\n", 1)
return dict(multi._headers), body, timeout
def _finish_futures(self, responses):
"""Apply all the batch responses to the futures created.
:type responses: list of (headers, payload) tuples.
:param responses: List of headers and payloads from each response in
the batch.
:raises: :class:`ValueError` if no requests have been deferred.
"""
# If a bad status occurs, we track it, but don't raise an exception
# until all futures have been populated.
exception_args = None
if len(self._target_objects) != len(responses): # pragma: NO COVER
raise ValueError("Expected a response for every request.")
for target_object, subresponse in zip(self._target_objects, responses):
if not 200 <= subresponse.status_code < 300:
exception_args = exception_args or subresponse
elif target_object is not None:
try:
target_object._properties = subresponse.json()
except ValueError:
target_object._properties = subresponse.content
if exception_args is not None:
raise exceptions.from_http_response(exception_args)
def finish(self):
"""Submit a single `multipart/mixed` request with deferred requests.
:rtype: list of tuples
:returns: one ``(headers, payload)`` tuple per deferred request.
"""
headers, body, timeout = self._prepare_batch_request()
url = "%s/batch/storage/v1" % self.API_BASE_URL
# Use the private ``_base_connection`` rather than the property
# ``_connection``, since the property may be this
# current batch.
response = self._client._base_connection._make_request(
"POST", url, data=body, headers=headers, timeout=timeout
)
responses = list(_unpack_batch_response(response))
self._finish_futures(responses)
return responses
def current(self):
"""Return the topmost batch, or None."""
return self._client.current_batch
def __enter__(self):
self._client._push_batch(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
if exc_type is None:
self.finish()
finally:
self._client._pop_batch()
def _generate_faux_mime_message(parser, response):
"""Convert response, content -> (multipart) email.message.
Helper for _unpack_batch_response.
"""
# We coerce to bytes to get consistent concat across
# Py2 and Py3. Percent formatting is insufficient since
# it includes the b in Py3.
content_type = _helpers._to_bytes(response.headers.get("content-type", ""))
faux_message = b"".join(
[b"Content-Type: ", content_type, b"\nMIME-Version: 1.0\n\n", response.content]
)
if six.PY2:
return parser.parsestr(faux_message)
else: # pragma: NO COVER Python3
return parser.parsestr(faux_message.decode("utf-8"))
def _unpack_batch_response(response):
"""Convert requests.Response -> [(headers, payload)].
Creates a generator of tuples of emulating the responses to
:meth:`requests.Session.request`.
:type response: :class:`requests.Response`
:param response: HTTP response / headers from a request.
"""
parser = Parser()
message = _generate_faux_mime_message(parser, response)
if not isinstance(message._payload, list): # pragma: NO COVER
raise ValueError("Bad response: not multi-part")
for subrequest in message._payload:
status_line, rest = subrequest._payload.split("\n", 1)
_, status, _ = status_line.split(" ", 2)
sub_message = parser.parsestr(rest)
payload = sub_message._payload
msg_headers = dict(sub_message._headers)
content_id = msg_headers.get("Content-ID")
subresponse = requests.Response()
subresponse.request = requests.Request(
method="BATCH", url="contentid://{}".format(content_id)
).prepare()
subresponse.status_code = int(status)
subresponse.headers.update(msg_headers)
subresponse._content = payload.encode("utf-8")
yield subresponse
|
|
#!/usr/bin/env python3
# Copyright (c) 2016 The nealcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import re
import fnmatch
import sys
import subprocess
import datetime
import os
################################################################################
# file filtering
################################################################################
EXCLUDE = [
# libsecp256k1:
'src/secp256k1/include/secp256k1.h',
'src/secp256k1/include/secp256k1_ecdh.h',
'src/secp256k1/include/secp256k1_recovery.h',
'src/secp256k1/include/secp256k1_schnorr.h',
'src/secp256k1/src/java/org_nealcoin_NativeSecp256k1.c',
'src/secp256k1/src/java/org_nealcoin_NativeSecp256k1.h',
'src/secp256k1/src/java/org_nealcoin_Secp256k1Context.c',
'src/secp256k1/src/java/org_nealcoin_Secp256k1Context.h',
# auto generated:
'src/univalue/lib/univalue_escapes.h',
'src/qt/nealcoinstrings.cpp',
'src/chainparamsseeds.h',
# other external copyrights:
'src/tinyformat.h',
'src/leveldb/util/env_win.cc',
'src/crypto/ctaes/bench.c',
'qa/rpc-tests/test_framework/bignum.py',
# python init:
'*__init__.py',
]
EXCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in EXCLUDE]))
INCLUDE = ['*.h', '*.cpp', '*.cc', '*.c', '*.py']
INCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in INCLUDE]))
def applies_to_file(filename):
return ((EXCLUDE_COMPILED.match(filename) is None) and
(INCLUDE_COMPILED.match(filename) is not None))
################################################################################
# obtain list of files in repo according to INCLUDE and EXCLUDE
################################################################################
GIT_LS_CMD = 'git ls-files'
def call_git_ls():
out = subprocess.check_output(GIT_LS_CMD.split(' '))
return [f for f in out.decode("utf-8").split('\n') if f != '']
def get_filenames_to_examine():
filenames = call_git_ls()
return sorted([filename for filename in filenames if
applies_to_file(filename)])
################################################################################
# define and compile regexes for the patterns we are looking for
################################################################################
COPYRIGHT_WITH_C = 'Copyright \(c\)'
COPYRIGHT_WITHOUT_C = 'Copyright'
ANY_COPYRIGHT_STYLE = '(%s|%s)' % (COPYRIGHT_WITH_C, COPYRIGHT_WITHOUT_C)
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
YEAR_LIST = '(%s)(, %s)+' % (YEAR, YEAR)
ANY_YEAR_STYLE = '(%s|%s)' % (YEAR_RANGE, YEAR_LIST)
ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE = ("%s %s" % (ANY_COPYRIGHT_STYLE,
ANY_YEAR_STYLE))
ANY_COPYRIGHT_COMPILED = re.compile(ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE)
def compile_copyright_regex(copyright_style, year_style, name):
return re.compile('%s %s %s' % (copyright_style, year_style, name))
EXPECTED_HOLDER_NAMES = [
"Satoshi Nakamoto\n",
"The nealcoin Core developers\n",
"The nealcoin Core developers \n",
"nealcoin Core Developers\n",
"the nealcoin Core developers\n",
"The nealcoin developers\n",
"The LevelDB Authors\. All rights reserved\.\n",
"BitPay Inc\.\n",
"BitPay, Inc\.\n",
"University of Illinois at Urbana-Champaign\.\n",
"MarcoFalke\n",
"Pieter Wuille\n",
"Pieter Wuille +\*\n",
"Pieter Wuille, Gregory Maxwell +\*\n",
"Pieter Wuille, Andrew Poelstra +\*\n",
"Andrew Poelstra +\*\n",
"Wladimir J. van der Laan\n",
"Jeff Garzik\n",
"Diederik Huys, Pieter Wuille +\*\n",
"Thomas Daede, Cory Fields +\*\n",
"Jan-Klaas Kollhof\n",
"Sam Rushing\n",
"ArtForz -- public domain half-a-node\n",
]
DOMINANT_STYLE_COMPILED = {}
YEAR_LIST_STYLE_COMPILED = {}
WITHOUT_C_STYLE_COMPILED = {}
for holder_name in EXPECTED_HOLDER_NAMES:
DOMINANT_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_RANGE, holder_name))
YEAR_LIST_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_LIST, holder_name))
WITHOUT_C_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITHOUT_C, ANY_YEAR_STYLE,
holder_name))
################################################################################
# search file contents for copyright message of particular category
################################################################################
def get_count_of_copyrights_of_any_style_any_holder(contents):
return len(ANY_COPYRIGHT_COMPILED.findall(contents))
def file_has_dominant_style_copyright_for_holder(contents, holder_name):
match = DOMINANT_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_year_list_style_copyright_for_holder(contents, holder_name):
match = YEAR_LIST_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_without_c_style_copyright_for_holder(contents, holder_name):
match = WITHOUT_C_STYLE_COMPILED[holder_name].search(contents)
return match is not None
################################################################################
# get file info
################################################################################
def read_file(filename):
return open(os.path.abspath(filename), 'r').read()
def gather_file_info(filename):
info = {}
info['filename'] = filename
c = read_file(filename)
info['contents'] = c
info['all_copyrights'] = get_count_of_copyrights_of_any_style_any_holder(c)
info['classified_copyrights'] = 0
info['dominant_style'] = {}
info['year_list_style'] = {}
info['without_c_style'] = {}
for holder_name in EXPECTED_HOLDER_NAMES:
has_dominant_style = (
file_has_dominant_style_copyright_for_holder(c, holder_name))
has_year_list_style = (
file_has_year_list_style_copyright_for_holder(c, holder_name))
has_without_c_style = (
file_has_without_c_style_copyright_for_holder(c, holder_name))
info['dominant_style'][holder_name] = has_dominant_style
info['year_list_style'][holder_name] = has_year_list_style
info['without_c_style'][holder_name] = has_without_c_style
if has_dominant_style or has_year_list_style or has_without_c_style:
info['classified_copyrights'] = info['classified_copyrights'] + 1
return info
################################################################################
# report execution
################################################################################
SEPARATOR = '-'.join(['' for _ in range(80)])
def print_filenames(filenames, verbose):
if not verbose:
return
for filename in filenames:
print("\t%s" % filename)
def print_report(file_infos, verbose):
print(SEPARATOR)
examined = [i['filename'] for i in file_infos]
print("%d files examined according to INCLUDE and EXCLUDE fnmatch rules" %
len(examined))
print_filenames(examined, verbose)
print(SEPARATOR)
print('')
zero_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 0]
print("%4d with zero copyrights" % len(zero_copyrights))
print_filenames(zero_copyrights, verbose)
one_copyright = [i['filename'] for i in file_infos if
i['all_copyrights'] == 1]
print("%4d with one copyright" % len(one_copyright))
print_filenames(one_copyright, verbose)
two_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 2]
print("%4d with two copyrights" % len(two_copyrights))
print_filenames(two_copyrights, verbose)
three_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 3]
print("%4d with three copyrights" % len(three_copyrights))
print_filenames(three_copyrights, verbose)
four_or_more_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] >= 4]
print("%4d with four or more copyrights" % len(four_or_more_copyrights))
print_filenames(four_or_more_copyrights, verbose)
print('')
print(SEPARATOR)
print('Copyrights with dominant style:\ne.g. "Copyright (c)" and '
'"<year>" or "<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
dominant_style = [i['filename'] for i in file_infos if
i['dominant_style'][holder_name]]
if len(dominant_style) > 0:
print("%4d with '%s'" % (len(dominant_style),
holder_name.replace('\n', '\\n')))
print_filenames(dominant_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with year list style:\ne.g. "Copyright (c)" and '
'"<year1>, <year2>, ...":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
year_list_style = [i['filename'] for i in file_infos if
i['year_list_style'][holder_name]]
if len(year_list_style) > 0:
print("%4d with '%s'" % (len(year_list_style),
holder_name.replace('\n', '\\n')))
print_filenames(year_list_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with no "(c)" style:\ne.g. "Copyright" and "<year>" or '
'"<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
without_c_style = [i['filename'] for i in file_infos if
i['without_c_style'][holder_name]]
if len(without_c_style) > 0:
print("%4d with '%s'" % (len(without_c_style),
holder_name.replace('\n', '\\n')))
print_filenames(without_c_style, verbose)
print('')
print(SEPARATOR)
unclassified_copyrights = [i['filename'] for i in file_infos if
i['classified_copyrights'] < i['all_copyrights']]
print("%d with unexpected copyright holder names" %
len(unclassified_copyrights))
print_filenames(unclassified_copyrights, verbose)
print(SEPARATOR)
def exec_report(base_directory, verbose):
original_cwd = os.getcwd()
os.chdir(base_directory)
filenames = get_filenames_to_examine()
file_infos = [gather_file_info(f) for f in filenames]
print_report(file_infos, verbose)
os.chdir(original_cwd)
################################################################################
# report cmd
################################################################################
REPORT_USAGE = """
Produces a report of all copyright header notices found inside the source files
of a repository.
Usage:
$ ./copyright_header.py report <base_directory> [verbose]
Arguments:
<base_directory> - The base directory of a nealcoin source code repository.
[verbose] - Includes a list of every file of each subcategory in the report.
"""
def report_cmd(argv):
if len(argv) == 2:
sys.exit(REPORT_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad <base_directory>: %s" % base_directory)
if len(argv) == 3:
verbose = False
elif argv[3] == 'verbose':
verbose = True
else:
sys.exit("*** unknown argument: %s" % argv[2])
exec_report(base_directory, verbose)
################################################################################
# query git for year of last change
################################################################################
GIT_LOG_CMD = "git log --pretty=format:%%ai %s"
def call_git_log(filename):
out = subprocess.check_output((GIT_LOG_CMD % filename).split(' '))
return out.decode("utf-8").split('\n')
def get_git_change_years(filename):
git_log_lines = call_git_log(filename)
if len(git_log_lines) == 0:
return [datetime.date.today().year]
# timestamp is in ISO 8601 format. e.g. "2016-09-05 14:25:32 -0600"
return [line.split(' ')[0].split('-')[0] for line in git_log_lines]
def get_most_recent_git_change_year(filename):
return max(get_git_change_years(filename))
################################################################################
# read and write to file
################################################################################
def read_file_lines(filename):
f = open(os.path.abspath(filename), 'r')
file_lines = f.readlines()
f.close()
return file_lines
def write_file_lines(filename, file_lines):
f = open(os.path.abspath(filename), 'w')
f.write(''.join(file_lines))
f.close()
################################################################################
# update header years execution
################################################################################
COPYRIGHT = 'Copyright \(c\)'
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
HOLDER = 'The nealcoin Core developers'
UPDATEABLE_LINE_COMPILED = re.compile(' '.join([COPYRIGHT, YEAR_RANGE, HOLDER]))
def get_updatable_copyright_line(file_lines):
index = 0
for line in file_lines:
if UPDATEABLE_LINE_COMPILED.search(line) is not None:
return index, line
index = index + 1
return None, None
def parse_year_range(year_range):
year_split = year_range.split('-')
start_year = year_split[0]
if len(year_split) == 1:
return start_year, start_year
return start_year, year_split[1]
def year_range_to_str(start_year, end_year):
if start_year == end_year:
return start_year
return "%s-%s" % (start_year, end_year)
def create_updated_copyright_line(line, last_git_change_year):
copyright_splitter = 'Copyright (c) '
copyright_split = line.split(copyright_splitter)
# Preserve characters on line that are ahead of the start of the copyright
# notice - they are part of the comment block and vary from file-to-file.
before_copyright = copyright_split[0]
after_copyright = copyright_split[1]
space_split = after_copyright.split(' ')
year_range = space_split[0]
start_year, end_year = parse_year_range(year_range)
if end_year == last_git_change_year:
return line
return (before_copyright + copyright_splitter +
year_range_to_str(start_year, last_git_change_year) + ' ' +
' '.join(space_split[1:]))
def update_updatable_copyright(filename):
file_lines = read_file_lines(filename)
index, line = get_updatable_copyright_line(file_lines)
if not line:
print_file_action_message(filename, "No updatable copyright.")
return
last_git_change_year = get_most_recent_git_change_year(filename)
new_line = create_updated_copyright_line(line, last_git_change_year)
if line == new_line:
print_file_action_message(filename, "Copyright up-to-date.")
return
file_lines[index] = new_line
write_file_lines(filename, file_lines)
print_file_action_message(filename,
"Copyright updated! -> %s" % last_git_change_year)
def exec_update_header_year(base_directory):
original_cwd = os.getcwd()
os.chdir(base_directory)
for filename in get_filenames_to_examine():
update_updatable_copyright(filename)
os.chdir(original_cwd)
################################################################################
# update cmd
################################################################################
UPDATE_USAGE = """
Updates all the copyright headers of "The nealcoin Core developers" which were
changed in a year more recent than is listed. For example:
// Copyright (c) <firstYear>-<lastYear> The nealcoin Core developers
will be updated to:
// Copyright (c) <firstYear>-<lastModifiedYear> The nealcoin Core developers
where <lastModifiedYear> is obtained from the 'git log' history.
This subcommand also handles copyright headers that have only a single year. In those cases:
// Copyright (c) <year> The nealcoin Core developers
will be updated to:
// Copyright (c) <year>-<lastModifiedYear> The nealcoin Core developers
where the update is appropriate.
Usage:
$ ./copyright_header.py update <base_directory>
Arguments:
<base_directory> - The base directory of a nealcoin source code repository.
"""
def print_file_action_message(filename, action):
print("%-52s %s" % (filename, action))
def update_cmd(argv):
if len(argv) != 3:
sys.exit(UPDATE_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad base_directory: %s" % base_directory)
exec_update_header_year(base_directory)
################################################################################
# inserted copyright header format
################################################################################
def get_header_lines(header, start_year, end_year):
lines = header.split('\n')[1:-1]
lines[0] = lines[0] % year_range_to_str(start_year, end_year)
return [line + '\n' for line in lines]
CPP_HEADER = '''
// Copyright (c) %s The nealcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_cpp_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(CPP_HEADER, start_year, end_year))
PYTHON_HEADER = '''
# Copyright (c) %s The nealcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_python_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(PYTHON_HEADER, start_year, end_year))
################################################################################
# query git for year of last change
################################################################################
def get_git_change_year_range(filename):
years = get_git_change_years(filename)
return min(years), max(years)
################################################################################
# check for existing core copyright
################################################################################
def file_already_has_core_copyright(file_lines):
index, _ = get_updatable_copyright_line(file_lines)
return index != None
################################################################################
# insert header execution
################################################################################
def file_has_hashbang(file_lines):
if len(file_lines) < 1:
return False
if len(file_lines[0]) <= 2:
return False
return file_lines[0][:2] == '#!'
def insert_python_header(filename, file_lines, start_year, end_year):
if file_has_hashbang(file_lines):
insert_idx = 1
else:
insert_idx = 0
header_lines = get_python_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(insert_idx, line)
write_file_lines(filename, file_lines)
def insert_cpp_header(filename, file_lines, start_year, end_year):
header_lines = get_cpp_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(0, line)
write_file_lines(filename, file_lines)
def exec_insert_header(filename, style):
file_lines = read_file_lines(filename)
if file_already_has_core_copyright(file_lines):
sys.exit('*** %s already has a copyright by The nealcoin Core developers'
% (filename))
start_year, end_year = get_git_change_year_range(filename)
if style == 'python':
insert_python_header(filename, file_lines, start_year, end_year)
else:
insert_cpp_header(filename, file_lines, start_year, end_year)
################################################################################
# insert cmd
################################################################################
INSERT_USAGE = """
Inserts a copyright header for "The nealcoin Core developers" at the top of the
file in either Python or C++ style as determined by the file extension. If the
file is a Python file and it has a '#!' starting the first line, the header is
inserted in the line below it.
The copyright dates will be set to be:
"<year_introduced>-<current_year>"
where <year_introduced> is according to the 'git log' history. If
<year_introduced> is equal to <current_year>, the date will be set to be:
"<current_year>"
If the file already has a copyright for "The nealcoin Core developers", the
script will exit.
Usage:
$ ./copyright_header.py insert <file>
Arguments:
<file> - A source file in the nealcoin repository.
"""
def insert_cmd(argv):
if len(argv) != 3:
sys.exit(INSERT_USAGE)
filename = argv[2]
if not os.path.isfile(filename):
sys.exit("*** bad filename: %s" % filename)
_, extension = os.path.splitext(filename)
if extension not in ['.h', '.cpp', '.cc', '.c', '.py']:
sys.exit("*** cannot insert for file extension %s" % extension)
if extension == '.py':
style = 'python'
else:
style = 'cpp'
exec_insert_header(filename, style)
################################################################################
# UI
################################################################################
USAGE = """
copyright_header.py - utilities for managing copyright headers of 'The nealcoin
Core developers' in repository source files.
Usage:
$ ./copyright_header <subcommand>
Subcommands:
report
update
insert
To see subcommand usage, run them without arguments.
"""
SUBCOMMANDS = ['report', 'update', 'insert']
if __name__ == "__main__":
if len(sys.argv) == 1:
sys.exit(USAGE)
subcommand = sys.argv[1]
if subcommand not in SUBCOMMANDS:
sys.exit(USAGE)
if subcommand == 'report':
report_cmd(sys.argv)
elif subcommand == 'update':
update_cmd(sys.argv)
elif subcommand == 'insert':
insert_cmd(sys.argv)
|
|
"""
Executes a test suite consisting of two separate cases: short tests and long tests.
Before each case, an instance of Clipper is created. Tests
are then performed by invoking methods on this instance, often resulting
in the execution of docker commands.
"""
from __future__ import absolute_import, division, print_function
import unittest
import sys
import os
import json
import time
import requests
import tempfile
import shutil
from argparse import ArgumentParser
import logging
from test_utils import get_docker_client, create_docker_connection, fake_model_data
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.abspath('%s/../clipper_admin' % cur_dir))
import clipper_admin as cl
from clipper_admin.deployers.python import create_endpoint as create_py_endpoint
from clipper_admin.deployers.python import deploy_python_closure
from clipper_admin import __version__ as clipper_version
logging.basicConfig(
format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%y-%m-%d:%H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class ClipperManagerTestCaseShort(unittest.TestCase):
@classmethod
def tearDownClass(self):
self.clipper_conn = create_docker_connection(
cleanup=True, start_clipper=False)
def setUp(self):
self.clipper_conn = create_docker_connection(
cleanup=True, start_clipper=True)
def test_register_model_correct(self):
input_type = "doubles"
model_name = "m"
self.clipper_conn.register_model(model_name, "v1", input_type)
registered_model_info = self.clipper_conn.get_model_info(
model_name, "v1")
self.assertIsNotNone(registered_model_info)
self.clipper_conn.register_model(model_name, "v2", input_type)
registered_model_info = self.clipper_conn.get_model_info(
model_name, "v2")
self.assertIsNotNone(registered_model_info)
def test_register_application_correct(self):
input_type = "doubles"
default_output = "DEFAULT"
slo_micros = 30000
app_name = "testapp"
self.clipper_conn.register_application(app_name, input_type,
default_output, slo_micros)
registered_applications = self.clipper_conn.get_all_apps()
self.assertGreaterEqual(len(registered_applications), 1)
self.assertTrue(app_name in registered_applications)
def test_link_not_registered_model_to_app_fails(self):
not_deployed_model = "test_model"
app_name = "testapp"
input_type = "doubles"
default_output = "DEFAULT"
slo_micros = 30000
self.clipper_conn.register_application(app_name, input_type,
default_output, slo_micros)
with self.assertRaises(cl.ClipperException) as context:
self.clipper_conn.link_model_to_app(app_name, not_deployed_model)
self.assertTrue("No model with name" in str(context.exception))
def test_delete_application_correct(self):
input_type = "doubles"
default_output = "DEFAULT"
slo_micros = 30000
app_name = "testapp"
self.clipper_conn.register_application(app_name, input_type,
default_output, slo_micros)
self.clipper_conn.delete_application(app_name)
registered_applications = self.clipper_conn.get_all_apps()
self.assertEqual(len(registered_applications), 0)
self.assertTrue(app_name not in registered_applications)
def test_get_model_links_when_none_exist_returns_empty_list(self):
app_name = "testapp"
input_type = "doubles"
default_output = "DEFAULT"
slo_micros = 30000
self.clipper_conn.register_application(app_name, input_type,
default_output, slo_micros)
result = self.clipper_conn.get_linked_models(app_name)
self.assertEqual([], result)
def test_link_registered_model_to_app_succeeds(self):
# Register app
app_name = "testapp"
input_type = "doubles"
default_output = "DEFAULT"
slo_micros = 30000
self.clipper_conn.register_application(app_name, input_type,
default_output, slo_micros)
# Register model
model_name = "m"
self.clipper_conn.register_model(model_name, "v1", input_type)
self.clipper_conn.link_model_to_app(app_name, model_name)
result = self.clipper_conn.get_linked_models(app_name)
self.assertEqual([model_name], result)
def get_app_info_for_registered_app_returns_info_dictionary(self):
# Register app
app_name = "testapp"
input_type = "doubles"
default_output = "DEFAULT"
slo_micros = 30000
self.clipper_conn.register_application(app_name, input_type,
default_output, slo_micros)
result = self.clipper_conn.get_app_info(app_name)
self.assertIsNotNone(result)
self.assertEqual(type(result), dict)
def get_app_info_for_nonexistent_app_returns_none(self):
result = self.clipper_conn.get_app_info("fake_app")
self.assertIsNone(result)
def test_set_num_replicas_for_external_model_fails(self):
# Register model
model_name = "m"
input_type = "doubles"
version = "v1"
self.clipper_conn.register_model(model_name, version, input_type)
with self.assertRaises(cl.ClipperException) as context:
self.clipper_conn.set_num_replicas(model_name, 5, version)
self.assertTrue("containerless model" in str(context.exception))
def test_model_version_sets_correctly(self):
model_name = "m"
input_type = "doubles"
v1 = "v1"
self.clipper_conn.register_model(model_name, v1, input_type)
v2 = "v2"
self.clipper_conn.register_model(model_name, v2, input_type)
self.clipper_conn.set_model_version(model_name, v1)
all_models = self.clipper_conn.get_all_models(verbose=True)
models_list_contains_correct_version = False
for model_info in all_models:
version = model_info["model_version"]
if version == v1:
models_list_contains_correct_version = True
self.assertTrue(model_info["is_current_version"])
self.assertTrue(models_list_contains_correct_version)
def test_get_logs_creates_log_files(self):
if not os.path.exists(cl.CLIPPER_TEMP_DIR):
os.makedirs(cl.CLIPPER_TEMP_DIR)
tmp_log_dir = tempfile.mkdtemp(dir=cl.CLIPPER_TEMP_DIR)
log_file_names = self.clipper_conn.get_clipper_logs(
logging_dir=tmp_log_dir)
self.assertIsNotNone(log_file_names)
self.assertGreaterEqual(len(log_file_names), 1)
for file_name in log_file_names:
self.assertTrue(os.path.isfile(file_name))
# Remove temp files
shutil.rmtree(tmp_log_dir)
def test_inspect_instance_returns_json_dict(self):
metrics = self.clipper_conn.inspect_instance()
self.assertEqual(type(metrics), dict)
self.assertGreaterEqual(len(metrics), 1)
def test_model_deploys_successfully(self):
model_name = "m"
version = "v1"
container_name = "clipper/noop-container:{}".format(clipper_version)
input_type = "doubles"
self.clipper_conn.build_and_deploy_model(
model_name, version, input_type, fake_model_data, container_name)
model_info = self.clipper_conn.get_model_info(model_name, version)
self.assertIsNotNone(model_info)
self.assertEqual(type(model_info), dict)
docker_client = get_docker_client()
containers = docker_client.containers.list(filters={
"ancestor": container_name
})
self.assertEqual(len(containers), 1)
def test_set_num_replicas_for_deployed_model_succeeds(self):
model_name = "set-num-reps-model"
input_type = "doubles"
version = "v1"
container_name = "clipper/noop-container:{}".format(clipper_version)
input_type = "doubles"
self.clipper_conn.build_and_deploy_model(
model_name, version, input_type, fake_model_data, container_name)
# Version defaults to current version
self.clipper_conn.set_num_replicas(model_name, 4)
time.sleep(1)
num_reps = self.clipper_conn.get_num_replicas(model_name, version)
self.assertEqual(num_reps, 4)
self.clipper_conn.set_num_replicas(model_name, 2, version)
time.sleep(1)
num_reps = self.clipper_conn.get_num_replicas(model_name, version)
self.assertEqual(num_reps, 2)
def test_remove_inactive_containers_succeeds(self):
container_name = "clipper/noop-container:{}".format(clipper_version)
input_type = "doubles"
model_name = "remove-inactive-test-model"
self.clipper_conn.build_and_deploy_model(
model_name,
1,
input_type,
fake_model_data,
container_name,
num_replicas=2)
docker_client = get_docker_client()
containers = docker_client.containers.list(filters={
"ancestor": container_name
})
self.assertEqual(len(containers), 2)
self.clipper_conn.build_and_deploy_model(
model_name,
2,
input_type,
fake_model_data,
container_name,
num_replicas=3)
containers = docker_client.containers.list(filters={
"ancestor": container_name
})
self.assertEqual(len(containers), 5)
self.clipper_conn.stop_inactive_model_versions([model_name])
containers = docker_client.containers.list(filters={
"ancestor": container_name
})
self.assertEqual(len(containers), 3)
def test_stop_models(self):
container_name = "clipper/noop-container:{}".format(clipper_version)
input_type = "doubles"
mnames = ["jimmypage", "robertplant", "jpj", "johnbohnam"]
versions = ["i", "ii", "iii", "iv"]
for model_name in mnames:
for version in versions:
self.clipper_conn.deploy_model(
model_name,
version,
input_type,
container_name,
num_replicas=1)
docker_client = get_docker_client()
containers = docker_client.containers.list(filters={
"ancestor": container_name
})
self.assertEqual(len(containers), len(mnames) * len(versions))
# stop all versions of models jimmypage, robertplant
self.clipper_conn.stop_models(mnames[:2])
containers = docker_client.containers.list(filters={
"ancestor": container_name
})
self.assertEqual(len(containers), len(mnames[2:]) * len(versions))
# After calling this method, the remaining models should be:
# jpj:i, jpj:iii, johnbohman:ii
self.clipper_conn.stop_versioned_models({
"jpj": ["ii", "iv"],
"johnbohnam": ["i", "iv", "iii"],
})
containers = docker_client.containers.list(filters={
"ancestor": container_name
})
self.assertEqual(len(containers), 3)
self.clipper_conn.stop_all_model_containers()
containers = docker_client.containers.list(filters={
"ancestor": container_name
})
self.assertEqual(len(containers), 0)
def test_python_closure_deploys_successfully(self):
model_name = "m2"
model_version = 1
def predict_func(inputs):
return ["0" for x in inputs]
input_type = "doubles"
deploy_python_closure(self.clipper_conn, model_name, model_version,
input_type, predict_func)
model_info = self.clipper_conn.get_model_info(model_name,
model_version)
self.assertIsNotNone(model_info)
docker_client = get_docker_client()
py_minor_version = (sys.version_info.major, sys.version_info.minor)
if py_minor_version < (3, 0):
containers = docker_client.containers.list(
filters={
"ancestor":
"clipper/python-closure-container:{}".format(
clipper_version)
})
elif py_minor_version == (3, 5):
containers = docker_client.containers.list(
filters={
"ancestor":
"clipper/python35-closure-container:{}".format(
clipper_version)
})
elif py_minor_version == (3, 6):
containers = docker_client.containers.list(
filters={
"ancestor":
"clipper/python36-closure-container:{}".format(
clipper_version)
})
else:
msg = (
"Python closure deployer only supports Python 2.7, 3.5, and 3.6. "
"Detected {major}.{minor}").format(
major=sys.version_info.major, minor=sys.version_info.minor)
logger.error(msg)
self.assertGreaterEqual(len(containers), 1)
def test_register_py_endpoint(self):
name = "py-closure-test"
expected_version = 1
def predict_func(inputs):
return ["0" for x in inputs]
input_type = "doubles"
create_py_endpoint(self.clipper_conn, name, input_type, predict_func)
registered_applications = self.clipper_conn.get_all_apps()
self.assertEqual(len(registered_applications), 1)
self.assertTrue(name in registered_applications)
registered_model_info = self.clipper_conn.get_model_info(
name, expected_version)
self.assertIsNotNone(registered_model_info)
linked_models = self.clipper_conn.get_linked_models(name)
self.assertIsNotNone(linked_models)
docker_client = get_docker_client()
py_minor_version = (sys.version_info.major, sys.version_info.minor)
if py_minor_version < (3, 0):
containers = docker_client.containers.list(
filters={
"ancestor":
"clipper/python-closure-container:{}".format(
clipper_version)
})
elif py_minor_version == (3, 5):
containers = docker_client.containers.list(
filters={
"ancestor":
"clipper/python35-closure-container:{}".format(
clipper_version)
})
elif py_minor_version == (3, 6):
containers = docker_client.containers.list(
filters={
"ancestor":
"clipper/python36-closure-container:{}".format(
clipper_version)
})
else:
msg = (
"Python closure deployer only supports Python 2.7, 3.5, and 3.6. "
"Detected {major}.{minor}").format(
major=sys.version_info.major, minor=sys.version_info.minor)
logger.error(msg)
self.assertEqual(len(containers), 1)
def test_test_predict_function(self):
def predict_func(xs):
return [sum(x) for x in xs]
self.clipper_conn.register_application(
name="hello-world",
input_type="doubles",
default_output="-1.0",
slo_micros=100000)
deploy_python_closure(
self.clipper_conn,
name="sum-model",
version=1,
input_type="doubles",
func=predict_func)
self.clipper_conn.link_model_to_app(
app_name="hello-world", model_name="sum-model")
time.sleep(60)
addr = self.clipper_conn.get_query_addr()
url = "http://{addr}/hello-world/predict".format(
addr=addr, app='hello-world')
headers = {"Content-type": "application/json"}
test_input = [1.1, 2.2, 3.3]
pred = requests.post(
url, headers=headers, data=json.dumps({
"input": test_input
})).json()
test_predict_result = self.clipper_conn.test_predict_function(
query={"input": test_input},
func=predict_func,
input_type="doubles")
self.assertEqual([pred['output']],
test_predict_result) # tests single input
test_batch_input = [[1.1, 2.2, 3.3], [4.4, 5.5, 6.6]]
batch_pred = requests.post(
url,
headers=headers,
data=json.dumps({
"input_batch": test_batch_input
})).json()
test_batch_predict_result = self.clipper_conn.test_predict_function(
query={"input_batch": test_batch_input},
func=predict_func,
input_type="doubles")
batch_predictions = batch_pred['batch_predictions']
batch_pred_outputs = [batch['output'] for batch in batch_predictions]
self.assertEqual(batch_pred_outputs,
test_batch_predict_result) # tests batch input
def test_build_model_with_custom_packages(self):
self.clipper_conn.build_model(
"buildmodeltest",
"py2",
fake_model_data,
"clipper/python-closure-container:{}".format(clipper_version),
None,
pkgs_to_install=["sympy==1.1.*"])
self.clipper_conn.build_model(
"buildmodeltest",
"py35",
fake_model_data,
"clipper/python35-closure-container:{}".format(clipper_version),
None,
pkgs_to_install=["sympy==1.1.*"])
self.clipper_conn.build_model(
"buildmodeltest",
"py36",
fake_model_data,
"clipper/python35-closure-container:{}".format(clipper_version),
None,
pkgs_to_install=["sympy==1.1.*"])
class ClipperManagerTestCaseLong(unittest.TestCase):
@classmethod
def setUpClass(self):
self.clipper_conn = create_docker_connection(
cleanup=True, start_clipper=True)
self.app_name_1 = "app3"
self.app_name_2 = "app4"
self.app_name_3 = "app5"
self.app_name_4 = "app6"
self.app_name_5 = "app7"
self.model_name_1 = "m4"
self.model_name_2 = "m5"
self.model_name_3 = "m6"
self.model_name_4 = "m7"
self.model_name_5 = "m8"
self.input_type = "doubles"
self.default_output = "DEFAULT"
self.latency_slo_micros = 30000
self.clipper_conn.register_application(
self.app_name_1, self.input_type, self.default_output,
self.latency_slo_micros)
self.clipper_conn.register_application(
self.app_name_2, self.input_type, self.default_output,
self.latency_slo_micros)
self.clipper_conn.register_application(
self.app_name_3, self.input_type, self.default_output,
self.latency_slo_micros)
self.clipper_conn.register_application(
self.app_name_4,
self.input_type,
self.default_output,
slo_micros=30000000)
self.clipper_conn.register_application(
self.app_name_5, self.input_type, self.default_output,
self.latency_slo_micros)
@classmethod
def tearDownClass(self):
self.clipper_conn = create_docker_connection(
cleanup=True, start_clipper=False)
def test_unlinked_app_returns_default_predictions(self):
addr = self.clipper_conn.get_query_addr()
url = "http://{addr}/{app}/predict".format(
addr=addr, app=self.app_name_2)
test_input = [99.3, 18.9, 67.2, 34.2]
req_json = json.dumps({'input': test_input})
headers = {'Content-type': 'application/json'}
response = requests.post(url, headers=headers, data=req_json)
parsed_response = response.json()
logger.info(parsed_response)
self.assertEqual(parsed_response["output"], self.default_output)
self.assertTrue(parsed_response["default"])
def test_deployed_model_queried_successfully(self):
model_version = 1
container_name = "clipper/noop-container:{}".format(clipper_version)
self.clipper_conn.build_and_deploy_model(
self.model_name_2, model_version, self.input_type, fake_model_data,
container_name)
self.clipper_conn.link_model_to_app(self.app_name_2, self.model_name_2)
time.sleep(30)
addr = self.clipper_conn.get_query_addr()
url = "http://{addr}/{app}/predict".format(
addr=addr, app=self.app_name_2)
test_input = [99.3, 18.9, 67.2, 34.2]
req_json = json.dumps({'input': test_input})
headers = {'Content-type': 'application/json'}
response = requests.post(url, headers=headers, data=req_json)
parsed_response = response.json()
logger.info(parsed_response)
self.assertNotEqual(parsed_response["output"], self.default_output)
self.assertFalse(parsed_response["default"])
def test_batch_queries_returned_successfully(self):
model_version = 1
container_name = "clipper/noop-container:{}".format(clipper_version)
self.clipper_conn.build_and_deploy_model(
self.model_name_3, model_version, self.input_type, fake_model_data,
container_name)
self.clipper_conn.link_model_to_app(self.app_name_3, self.model_name_3)
time.sleep(30)
addr = self.clipper_conn.get_query_addr()
url = "http://{addr}/{app}/predict".format(
addr=addr, app=self.app_name_3)
test_input = [[99.3, 18.9, 67.2, 34.2], [101.1, 45.6, 98.0, 99.1],
[12.3, 6.7, 42.1, 12.6], [9.01, 87.6, 70.2, 19.6]]
req_json = json.dumps({'input_batch': test_input})
headers = {'Content-type': 'application/json'}
response = requests.post(url, headers=headers, data=req_json)
parsed_response = response.json()
logger.info(parsed_response)
self.assertEqual(
len(parsed_response["batch_predictions"]), len(test_input))
def test_deployed_python_closure_queried_successfully(self):
model_version = 1
def predict_func(inputs):
return [str(len(x)) for x in inputs]
input_type = "doubles"
deploy_python_closure(self.clipper_conn, self.model_name_1,
model_version, input_type, predict_func)
self.clipper_conn.link_model_to_app(self.app_name_1, self.model_name_1)
time.sleep(60)
received_non_default_prediction = False
addr = self.clipper_conn.get_query_addr()
url = "http://{addr}/{app}/predict".format(
addr=addr, app=self.app_name_1)
test_input = [101.1, 99.5, 107.2]
req_json = json.dumps({'input': test_input})
headers = {'Content-type': 'application/json'}
for i in range(0, 40):
response = requests.post(url, headers=headers, data=req_json)
parsed_response = response.json()
print(parsed_response)
output = parsed_response["output"]
if output == self.default_output:
time.sleep(20)
else:
received_non_default_prediction = True
self.assertEqual(int(output), len(test_input))
break
self.assertTrue(received_non_default_prediction)
def test_fixed_batch_size_model_processes_specified_query_batch_size_when_saturated(
self):
model_version = 1
def predict_func(inputs):
time.sleep(.5)
batch_size = len(inputs)
return [str(batch_size) for _ in inputs]
fixed_batch_size = 9
total_num_queries = fixed_batch_size * 50
deploy_python_closure(
self.clipper_conn,
self.model_name_4,
model_version,
self.input_type,
predict_func,
batch_size=fixed_batch_size)
self.clipper_conn.link_model_to_app(self.app_name_4, self.model_name_4)
time.sleep(60)
addr = self.clipper_conn.get_query_addr()
url = "http://{addr}/{app}/predict".format(
addr=addr, app=self.app_name_4)
test_input = [[float(x) + (j * .001) for x in range(5)]
for j in range(total_num_queries)]
req_json = json.dumps({'input_batch': test_input})
headers = {'Content-type': 'application/json'}
response = requests.post(url, headers=headers, data=req_json)
parsed_response = response.json()
num_max_batch_queries = 0
for prediction in parsed_response["batch_predictions"]:
batch_size = prediction["output"]
if batch_size != self.default_output and int(
batch_size) == fixed_batch_size:
num_max_batch_queries += 1
self.assertGreaterEqual(num_max_batch_queries,
int(total_num_queries * .7))
def test_remove_inactive_container(self):
container_name = "clipper/noop-container:{}".format(clipper_version)
input_type = "doubles"
model_name = "remove-inactive-test-model"
self.clipper_conn.build_and_deploy_model(
self.model_name_5,
1,
self.input_type,
fake_model_data,
container_name,
num_replicas=2)
docker_client = get_docker_client()
containers = docker_client.containers.list(filters={
"ancestor": container_name
})
self.assertEqual(len(containers), 2)
self.clipper_conn.link_model_to_app(self.app_name_5, self.model_name_5)
time.sleep(30)
#we now have 2 replicas running, both the same Model Name and Version
#send predictions, assert that we are getting correct response
addr = self.clipper_conn.get_query_addr()
test_input = [101.1, 99.5, 107.2]
req_json = json.dumps({'input': test_input})
headers = {'Content-type': 'application/json'}
for i in range(2):
response = requests.post(
"http://%s/%s/predict" % (addr, self.app_name_5),
headers=headers,
data=req_json)
result = response.json()
self.assertEqual(response.status_code, requests.codes.ok)
#print(result["default_explanation"])
self.assertEqual(result["default"], False)
#1 of the containers should go inactive
self.clipper_conn.set_num_replicas(
name=self.model_name_5, version=1, num_replicas=1)
time.sleep(100)
containers = docker_client.containers.list(filters={
"ancestor": container_name
})
self.assertEqual(len(containers), 1)
test_input = [101.1, 99.9]
req_json = json.dumps({'input': test_input})
#send predictions, should still be working
for i in range(2):
response = requests.post(
"http://%s/%s/predict" % (addr, self.app_name_5),
headers=headers,
data=req_json)
result = response.json()
self.assertEqual(response.status_code, requests.codes.ok)
self.assertEqual(result["default"], False)
#2nd container should go inactive
self.clipper_conn.set_num_replicas(
name=self.model_name_5, version=1, num_replicas=0)
time.sleep(100)
containers = docker_client.containers.list(filters={
"ancestor": container_name
})
self.assertEqual(len(containers), 0)
test_input = [101.1]
req_json = json.dumps({'input': test_input})
#send predictions, should be getting response with message 'no connected models'
for i in range(2):
response = requests.post(
"http://%s/%s/predict" % (addr, self.app_name_5),
headers=headers,
data=req_json)
result = response.json()
self.assertEqual(result["default"], True)
self.assertEqual(result["default_explanation"],
"No connected models found for query")
SHORT_TEST_ORDERING = [
'test_register_model_correct', 'test_register_application_correct',
'test_link_not_registered_model_to_app_fails',
'test_get_model_links_when_none_exist_returns_empty_list',
'test_link_registered_model_to_app_succeeds',
'get_app_info_for_registered_app_returns_info_dictionary',
'get_app_info_for_nonexistent_app_returns_none',
'test_set_num_replicas_for_external_model_fails',
'test_model_version_sets_correctly', 'test_get_logs_creates_log_files',
'test_inspect_instance_returns_json_dict',
'test_model_deploys_successfully',
'test_set_num_replicas_for_deployed_model_succeeds',
'test_remove_inactive_containers_succeeds', 'test_stop_models',
'test_python_closure_deploys_successfully', 'test_register_py_endpoint',
'test_test_predict_function', 'test_build_model_with_custom_packages',
'test_delete_application_correct'
]
LONG_TEST_ORDERING = [
'test_remove_inactive_container',
'test_unlinked_app_returns_default_predictions',
'test_deployed_model_queried_successfully',
'test_batch_queries_returned_successfully',
'test_deployed_python_closure_queried_successfully',
'test_fixed_batch_size_model_processes_specified_query_batch_size_when_saturated'
]
if __name__ == '__main__':
description = (
"Runs clipper manager tests. If no arguments are specified, all tests are "
"executed.")
parser = ArgumentParser(description)
parser.add_argument(
"-s",
"--short",
action="store_true",
dest="run_short",
help="Run the short suite of test cases")
parser.add_argument(
"-l",
"--long",
action="store_true",
dest="run_long",
help="Run the long suite of test cases")
parser.add_argument(
"-a",
"--all",
action="store_true",
dest="run_all",
help="Run all test cases")
args = parser.parse_args()
# If neither the short nor the long argument is specified,
# we will run all tests
args.run_all = args.run_all or ((not args.run_short) and
(not args.run_long))
suite = unittest.TestSuite()
if args.run_short or args.run_all:
for test in SHORT_TEST_ORDERING:
suite.addTest(ClipperManagerTestCaseShort(test))
if args.run_long or args.run_all:
for test in LONG_TEST_ORDERING:
suite.addTest(ClipperManagerTestCaseLong(test))
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
|
|
"""Support for non-delivered packages recorded in AfterShip."""
from datetime import timedelta
import logging
from pyaftership.tracker import Tracking
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_API_KEY, CONF_NAME, HTTP_OK
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Information provided by AfterShip"
ATTR_TRACKINGS = "trackings"
BASE = "https://track.aftership.com/"
CONF_SLUG = "slug"
CONF_TITLE = "title"
CONF_TRACKING_NUMBER = "tracking_number"
DEFAULT_NAME = "aftership"
UPDATE_TOPIC = f"{DOMAIN}_update"
ICON = "mdi:package-variant-closed"
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=15)
SERVICE_ADD_TRACKING = "add_tracking"
SERVICE_REMOVE_TRACKING = "remove_tracking"
ADD_TRACKING_SERVICE_SCHEMA = vol.Schema(
{
vol.Required(CONF_TRACKING_NUMBER): cv.string,
vol.Optional(CONF_TITLE): cv.string,
vol.Optional(CONF_SLUG): cv.string,
}
)
REMOVE_TRACKING_SERVICE_SCHEMA = vol.Schema(
{vol.Required(CONF_SLUG): cv.string, vol.Required(CONF_TRACKING_NUMBER): cv.string}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the AfterShip sensor platform."""
apikey = config[CONF_API_KEY]
name = config[CONF_NAME]
session = async_get_clientsession(hass)
aftership = Tracking(hass.loop, session, apikey)
await aftership.get_trackings()
if not aftership.meta or aftership.meta["code"] != HTTP_OK:
_LOGGER.error(
"No tracking data found. Check API key is correct: %s", aftership.meta
)
return
instance = AfterShipSensor(aftership, name)
async_add_entities([instance], True)
async def handle_add_tracking(call):
"""Call when a user adds a new Aftership tracking from Home Assistant."""
title = call.data.get(CONF_TITLE)
slug = call.data.get(CONF_SLUG)
tracking_number = call.data[CONF_TRACKING_NUMBER]
await aftership.add_package_tracking(tracking_number, title, slug)
async_dispatcher_send(hass, UPDATE_TOPIC)
hass.services.async_register(
DOMAIN,
SERVICE_ADD_TRACKING,
handle_add_tracking,
schema=ADD_TRACKING_SERVICE_SCHEMA,
)
async def handle_remove_tracking(call):
"""Call when a user removes an Aftership tracking from Home Assistant."""
slug = call.data[CONF_SLUG]
tracking_number = call.data[CONF_TRACKING_NUMBER]
await aftership.remove_package_tracking(slug, tracking_number)
async_dispatcher_send(hass, UPDATE_TOPIC)
hass.services.async_register(
DOMAIN,
SERVICE_REMOVE_TRACKING,
handle_remove_tracking,
schema=REMOVE_TRACKING_SERVICE_SCHEMA,
)
class AfterShipSensor(Entity):
"""Representation of a AfterShip sensor."""
def __init__(self, aftership, name):
"""Initialize the sensor."""
self._attributes = {}
self._name = name
self._state = None
self.aftership = aftership
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return "packages"
@property
def device_state_attributes(self):
"""Return attributes for the sensor."""
return self._attributes
@property
def icon(self):
"""Icon to use in the frontend."""
return ICON
async def async_added_to_hass(self):
"""Register callbacks."""
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
UPDATE_TOPIC, self._force_update
)
)
async def _force_update(self):
"""Force update of data."""
await self.async_update(no_throttle=True)
self.async_write_ha_state()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self, **kwargs):
"""Get the latest data from the AfterShip API."""
await self.aftership.get_trackings()
if not self.aftership.meta:
_LOGGER.error("Unknown errors when querying")
return
if self.aftership.meta["code"] != HTTP_OK:
_LOGGER.error(
"Errors when querying AfterShip. %s", str(self.aftership.meta)
)
return
status_to_ignore = {"delivered"}
status_counts = {}
trackings = []
not_delivered_count = 0
for track in self.aftership.trackings["trackings"]:
status = track["tag"].lower()
name = (
track["tracking_number"] if track["title"] is None else track["title"]
)
last_checkpoint = (
f"Shipment {track['tag'].lower()}"
if not track["checkpoints"]
else track["checkpoints"][-1]
)
status_counts[status] = status_counts.get(status, 0) + 1
trackings.append(
{
"name": name,
"tracking_number": track["tracking_number"],
"slug": track["slug"],
"link": f"{BASE}{track['slug']}/{track['tracking_number']}",
"last_update": track["updated_at"],
"expected_delivery": track["expected_delivery"],
"status": track["tag"],
"last_checkpoint": last_checkpoint,
}
)
if status not in status_to_ignore:
not_delivered_count += 1
else:
_LOGGER.debug("Ignoring %s as it has status: %s", name, status)
self._attributes = {
ATTR_ATTRIBUTION: ATTRIBUTION,
**status_counts,
ATTR_TRACKINGS: trackings,
}
self._state = not_delivered_count
|
|
"""The WiderFace dataset.
"""
# standard imports
import os
import logging
# third party imports
import numpy as np
# toolbox imports
from dltb.base.data import Data
from dltb.base.image import BoundingBox, Region, Landmarks
from dltb.tool.classifier import ClassScheme
from dltb.datasource import Imagesource, Sectioned, DataDirectory
# logging
LOG = logging.getLogger(__name__)
class WiderfaceScheme(ClassScheme):
"""The WiderFace dataset divides its data into
62 classes (actually just 61, as class 60 is missing).
Class labels can be obtained from directory names in the
data directories.
"""
def __init__(self) -> None:
"""Iniitalization of the :py:class:`WiderfaceScheme`.
"""
# The WIDER face dataset has 62 classes (but it seems
# that only 61 are used - class '60' is missing).
super().__init__(length=62, key='widerface')
@property
def prepared(self) -> bool:
"""Check if the :py:class:`WiderfaceScheme` has been initialized.
"""
return 'text' in self._labels
def prepare(self) -> None:
"""Prepare the labels for the Widerface dataset.
The labels will be read in from the directory names
in the WIDERFACE_DATA directory.
"""
if self.prepared:
return # nothing to do ...
widerface_data = os.getenv('WIDERFACE_DATA')
train_dir = os.path.join(widerface_data, 'WIDER_train', 'images')
text = [''] * len(self)
for dirname in os.listdir(train_dir):
number, label = dirname.split('--', maxsplit=1)
text[int(number)] = label
self.add_labels(text, 'text')
WiderfaceScheme()
class WiderFace(DataDirectory, Imagesource, Sectioned,
sections={'train', 'val', 'test'}):
# pylint: disable=too-many-ancestors
"""
http://shuoyang1213.me/WIDERFACE/
"Wider Face" is A face detection benchmark consisting of 32,203
images with 393,703 labeled faces.
The faces have wide variability in scale, pose, occlusion.
Images are categorized in 61 event class.
From each class train/validation/test datasets where split
in relation 40%/10%/50%.
Attributes
----------
blur: Tuple[str]
expression: Tuple[str]
illumination: Tuple[str]
occlusion: Tuple[str]
invalid: Tuple[str]
"""
blur = ('clear', 'normal blur', 'heavy blur')
expression = ('typical expression', 'exaggerate expression')
illumination = ('normal illumination', 'extreme illumination')
occlusion = ('no occlusion', 'partial occlusion', 'heavy occlusion')
pose = ('typical pose', 'atypical pose')
invalid = ('valid image', 'invalid image')
def __init__(self, section: str = 'train',
key: str = None, **kwargs) -> None:
"""Initialize the WIDER Face Datasource.
"""
self._widerface_data = os.getenv('WIDERFACE_DATA', '.')
self._section = section
scheme = ClassScheme['widerface']
directory = os.path.join(self._widerface_data,
'WIDER_' + self._section, 'images')
super().__init__(key=key or f"wider-faces-{section}",
section=section, directory=directory, scheme=scheme,
description=f"WIDER Faces", **kwargs)
self._annotations = None
def __str__(self):
return f'WIDER Faces ({self._section})'
#
# Preparation
#
def _prepare(self, **kwargs) -> None:
# pylint: disable=arguments-differ
"""Prepare the WIDER Face dataset. This will provide in a list of
all images provided by the dataset, either by reading in a
prepared file, or by traversing the directory.
"""
LOG.info("Preparing WiderFace[%r]: %s",
self.preparable, self.directory)
cache = f"widerface_{self._section}_filelist.p"
super()._prepare(filenames_cache=cache, **kwargs)
self._scheme.prepare()
self._prepare_annotations()
def _unprepare(self):
"""Prepare the WIDER Face dataset. This will provide in a list of
all images provided by the dataset, either by reading in a
prepared file, or by traversing the directory.
"""
self._annotations = None
super()._unprepare()
def _prepare_annotations(self):
"""Load the annotations for the training images.
The annotations are stored in a single large text file
('wider_face_train_bbx_gt.txtX'), with a multi-line entry per file.
An entry has the following structure: The first line contains
the filename of the training image. The second line contains
the number of faces in that image. Then follows one line for
each face, consisting of a bounding box (x,y,w,h) and attributes
(blur, expression, illumination, invalid, occlusion, pose)
encoded numerically. In these lines, all numbers are separated
by spaces. Example:
0--Parade/0_Parade_marchingband_1_95.jpg
5
828 209 56 76 0 0 0 0 0 0
661 258 49 65 0 0 0 0 0 0
503 253 48 66 0 0 1 0 0 0
366 181 51 74 0 0 1 0 0 0
148 176 54 68 0 0 1 0 0 0
"""
self._annotations = {}
# check if annotations file exists
filename = None
if self._widerface_data is not None:
filename = os.path.join(self._widerface_data, 'wider_face_split',
'wider_face_train_bbx_gt.txt')
if not os.path.isfile(filename):
return # file not found
# load the annotations
try:
with open(filename, "r") as file:
for filename in file:
filename = filename.rstrip()
lines = int(file.readline())
faces = []
for line_number in range(lines):
# x1, y1, w, h, blur, expression, illumination,
# invalid, occlusion, pose
attributes = tuple(int(a)
for a in file.readline().split())
if len(attributes) == 10:
faces.append(attributes)
else:
LOG.warning("bad annotation for '%s', line %d/%d':"
"got %d instead of 10 values",
filename, line_number,
lines, len(attributes))
if lines == 0:
# images with 0 faces nevertheless have one
# line with dummy attributes -> just ignore that line
file.readline()
# Store all faces for the current file
self._annotations[filename] = faces
except FileNotFoundError:
self._annotations = {}
#
# Data
#
def _get_meta(self, data: Data, **kwargs) -> None:
data.add_attribute('label', batch=True)
super()._get_meta(data, **kwargs)
def _get_data_from_file(self, data, filename: str) -> str:
"""
Arguments
---------
filename: str
The relative filename.
"""
super()._get_data_from_file(data, filename)
regions = []
for (pos_x, pos_y, width, height, blur, expression, illumination,
invalid, occlusion, pose) in self._annotations[filename]:
region = Region(BoundingBox(x=pos_x, y=pos_y,
width=width, height=height),
blur=blur, expression=expression,
illumination=illumination,
invalid=invalid, occlusion=occlusion,
pose=pose)
regions.append(region)
data.label = regions
# FIXME[todo]
class W300(DataDirectory, Imagesource):
"""The 300 Faces In-the-Wild Challenge (300-W), form the ICCV 2013.
The challenge targets facial landmark detection, using a 68 point
annotation scheme.
Besides 300-W, there are several other datasets annotated in the
same scheme: AFW, FRGC, HELEN, IBUG, LPFW, and XM2VTS.
For more information visit:
https://ibug.doc.ic.ac.uk/resources/facial-point-annotations/
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@staticmethod
def _load_annotation(filename: str) -> Landmarks:
"""Parse the landmark annotation file. Each image of the dataset is
accompanied by a file with the same name und the suffix '.pts'
providing the positions of the 68 points.
"""
# The file has the following format:
#
# version: 1
# n_points: 68
# {
# 403.167108 479.842932
# 407.333804 542.927159
# ...
# 625.877482 717.615332
# }
#
with open(filename) as file:
_ = file.readline().split(':')[1] # version
n_points = int(file.readline().split(':')[1])
points = np.ndarray((n_points, 2))
_ = file.readline() # '{'
for i in range(n_points):
pos_x, pos_y = file.readline.rstrip().split(' ')
points[i] = float(pos_x), float(pos_y)
return Landmarks(points)
|
|
# vim: expandtab:tabstop=4:shiftwidth=4
"""This module comprises Aws specific utility functions."""
import os
import re
# Buildbot does not have multi_inventory installed
#pylint: disable=no-name-in-module
from openshift_ansible import multi_inventory
class ArgumentError(Exception):
"""This class is raised when improper arguments are passed."""
def __init__(self, message):
"""Initialize an ArgumentError.
Keyword arguments:
message -- the exact error message being raised
"""
super(ArgumentError, self).__init__()
self.message = message
class AwsUtil(object):
"""This class contains the AWS utility functions."""
def __init__(self, host_type_aliases=None):
"""Initialize the AWS utility class.
Keyword arguments:
host_type_aliases -- a list of aliases to common host-types (e.g. ex-node)
"""
self.alias_lookup = {}
host_type_aliases = host_type_aliases or {}
self.host_type_aliases = host_type_aliases
self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
self.setup_host_type_alias_lookup()
def setup_host_type_alias_lookup(self):
"""Sets up the alias to host-type lookup table."""
for key, values in self.host_type_aliases.iteritems():
for value in values:
self.alias_lookup[value] = key
@staticmethod
def get_inventory(args=None, cached=False):
"""Calls the inventory script and returns a dictionary containing the inventory."
Keyword arguments:
args -- optional arguments to pass to the inventory script
"""
minv = multi_inventory.MultiInventory(args)
if cached:
minv.get_inventory_from_cache()
else:
minv.run()
return minv.result
def get_clusters(self):
"""Searches for cluster tags in the inventory and returns all of the clusters found."""
pattern = re.compile(r'^oo_clusterid_(.*)')
clusters = []
inv = self.get_inventory()
for key in inv.keys():
matched = pattern.match(key)
if matched:
clusters.append(matched.group(1))
clusters.sort()
return clusters
def get_environments(self):
"""Searches for env tags in the inventory and returns all of the envs found."""
pattern = re.compile(r'^oo_environment_(.*)')
envs = []
inv = self.get_inventory()
for key in inv.keys():
matched = pattern.match(key)
if matched:
envs.append(matched.group(1))
envs.sort()
return envs
def get_host_types(self):
"""Searches for host-type tags in the inventory and returns all host-types found."""
pattern = re.compile(r'^oo_hosttype_(.*)')
host_types = []
inv = self.get_inventory()
for key in inv.keys():
matched = pattern.match(key)
if matched:
host_types.append(matched.group(1))
host_types.sort()
return host_types
def get_sub_host_types(self):
"""Searches for sub-host-type tags in the inventory and returns all sub-host-types found."""
pattern = re.compile(r'^oo_subhosttype_(.*)')
sub_host_types = []
inv = self.get_inventory()
for key in inv.keys():
matched = pattern.match(key)
if matched:
sub_host_types.append(matched.group(1))
sub_host_types.sort()
return sub_host_types
def get_security_groups(self):
"""Searches for security_groups in the inventory and returns all SGs found."""
pattern = re.compile(r'^security_group_(.*)')
groups = []
inv = self.get_inventory()
for key in inv.keys():
matched = pattern.match(key)
if matched:
groups.append(matched.group(1))
groups.sort()
return groups
def build_host_dict_by_env(self, args=None):
"""Searches the inventory for hosts in an env and returns their hostvars."""
args = args or []
inv = self.get_inventory(args)
inst_by_env = {}
for _, host in inv['_meta']['hostvars'].items():
# If you don't have an environment tag, we're going to ignore you
if 'oo_environment' not in host:
continue
if host['oo_environment'] not in inst_by_env:
inst_by_env[host['oo_environment']] = {}
host_id = "%s:%s" % (host['oo_name'], host['oo_id'])
inst_by_env[host['oo_environment']][host_id] = host
return inst_by_env
def print_host_types(self):
"""Gets the list of host types and aliases and outputs them in columns."""
host_types = self.get_host_types()
ht_format_str = "%35s"
alias_format_str = "%-20s"
combined_format_str = ht_format_str + " " + alias_format_str
print
print combined_format_str % ('Host Types', 'Aliases')
print combined_format_str % ('----------', '-------')
for host_type in host_types:
aliases = []
if host_type in self.host_type_aliases:
aliases = self.host_type_aliases[host_type]
print combined_format_str % (host_type, ", ".join(aliases))
else:
print ht_format_str % host_type
print
def resolve_host_type(self, host_type):
"""Converts a host-type alias into a host-type.
Keyword arguments:
host_type -- The alias or host_type to look up.
Example (depends on aliases defined in config file):
host_type = ex-node
returns: openshift-node
"""
if self.alias_lookup.has_key(host_type):
return self.alias_lookup[host_type]
return host_type
@staticmethod
def gen_version_tag(ver):
"""Generate the version tag
"""
return "oo_version_%s" % ver
@staticmethod
def gen_clusterid_tag(clu):
"""Generate the clusterid tag
"""
return "oo_clusterid_%s" % clu
@staticmethod
def gen_env_tag(env):
"""Generate the environment tag
"""
return "oo_environment_%s" % env
def gen_host_type_tag(self, host_type, version):
"""Generate the host type tag
"""
if version == '2':
host_type = self.resolve_host_type(host_type)
return "oo_hosttype_%s" % host_type
@staticmethod
def gen_sub_host_type_tag(sub_host_type):
"""Generate the host type tag
"""
return "oo_subhosttype_%s" % sub_host_type
# This function uses all of these params to perform a filters on our host inventory.
# pylint: disable=too-many-arguments
def get_host_list(self, clusters=None, host_type=None, sub_host_type=None, envs=None, version=None, cached=False):
"""Get the list of hosts from the inventory using host-type and environment
"""
retval = set([])
envs = envs or []
inv = self.get_inventory(cached=cached)
retval.update(inv.get('all_hosts', []))
if clusters:
cluster_hosts = set([])
if len(clusters) > 1:
for cluster in clusters:
clu_tag = AwsUtil.gen_clusterid_tag(cluster)
cluster_hosts.update(inv.get(clu_tag, []))
else:
cluster_hosts.update(inv.get(AwsUtil.gen_clusterid_tag(clusters[0]), []))
retval.intersection_update(cluster_hosts)
if envs:
env_hosts = set([])
if len(envs) > 1:
for env in envs:
env_tag = AwsUtil.gen_env_tag(env)
env_hosts.update(inv.get(env_tag, []))
else:
env_hosts.update(inv.get(AwsUtil.gen_env_tag(envs[0]), []))
retval.intersection_update(env_hosts)
if host_type:
retval.intersection_update(inv.get(self.gen_host_type_tag(host_type, version), []))
if sub_host_type:
retval.intersection_update(inv.get(self.gen_sub_host_type_tag(sub_host_type), []))
if version != 'all':
retval.intersection_update(inv.get(AwsUtil.gen_version_tag(version), []))
return list(retval)
def convert_to_ip(self, hosts, cached=False):
"""convert a list of host names to ip addresses"""
inv = self.get_inventory(cached=cached)
ips = []
for host in hosts:
ips.append(inv['_meta']['hostvars'][host]['oo_public_ip'])
return ips
|
|
# some methods should use 'put' instead of 'get'
# some seem to require 'delete' now?
# use the right (latest) version of this:
# http://s3.amazonaws.com/h2o-release/h2o-dev/master/1019/docs-website/REST/endpoints/markdown/toc.md
import os, sys, time, requests, zipfile, StringIO, re
import h2o_args
# from h2o_cmd import runInspect, infoFromSummary
import h2o_cmd, h2o_util, h2o_browse as h2b, h2o_sandbox
from h2o_objects import H2O
from h2o_test import verboseprint, dump_json, check_sandbox_for_errors, get_sandbox_name, log
import urllib
def poll_job2(self, firstResult, algo=None, timeoutSecs=60, noPoll=False, **kwargs):
if noPoll:
result = firstResult
elif ('validation_error_count' in firstResult) and (firstResult['validation_error_count'] > 0):
h2p.yellow_print("parameter error in %s" % algo)
result = firstResult
else:
job_result = result1['jobs'][0]
job_key = job_result['key']['name']
verboseprint("%s job_key: %s" % (algo, job_key))
job_result = self.poll_job(job_key, timeoutSecs=timeoutSecs)
verboseprint(job_result)
elapsed = time.time() - start
print algo, " end on ", training_frame, 'took', time.time() - start, 'seconds'
print "%d pct. of timeout" % ((elapsed/timeoutSecs) * 100)
if job_result:
jobs = job_result['jobs'][0]
description = jobs['description']
dest = jobs['dest']
msec = jobs['msec']
status = jobs['status']
progress = jobs['progress']
if status=='FAILED':
print dump_json(job_result)
raise Exception("Taking exception on %s job status: %s %s %s %s" % \
(algo, status, progress, msec, description))
result = job_result
else:
raise Exception("build_model didn't get a job_result when it expected one")
verboseprint("result:", result)
h2o_sandbox.check_sandbox_for_errors()
return result
# This is done before import h2o_ray, which imports h2o_methods!
# ignoreNone is used if new = None shouldn't overwrite. Normally it does!
def check_params_update_kwargs(params_dict, kw, function, print_params, ignoreNone=False):
# only update params_dict..don't add
# throw away anything else as it should come from the model (propagating what RF used)
for k,v in kw.iteritems():
if k in params_dict:
if v or not ignoreNone:
# what if a type conversion happens here?
params_dict[k] = v
else:
raise Exception("illegal parameter '%s' with value '%s' in %s" % (k, v, function))
if print_params:
print "\n%s parameters:" % function, params_dict
sys.stdout.flush()
def get_cloud(self, noExtraErrorCheck=False, timeoutSecs=10):
# hardwire it to allow a 60 second timeout
a = self.do_json_request('3/Cloud.json', noExtraErrorCheck=noExtraErrorCheck, timeout=timeoutSecs)
# verboseprint(a)
version = a['version']
# local builds have (unknown) if not version.startswith('0'):
# local builds have (unknown) raise Exception("h2o version at node[0] doesn't look like h2o-dev version. (start with 0) %s" % version)
consensus = a['consensus']
locked = a['locked']
cloud_size = a['cloud_size']
cloud_name = a['cloud_name']
node_id = self.node_id
verboseprint('%s%s %s%s %s%s %s%s %s%s' % (
"\tnode_id: ", node_id,
"\tcloud_size: ", cloud_size,
"\tconsensus: ", consensus,
"\tlocked: ", locked,
"\tversion: ", version,
))
return a
def h2o_log_msg(self, message=None, timeoutSecs=15):
if not message:
message = "\n"
message += "\n#***********************"
message += "\npython_test_name: " + h2o_args.python_test_name
message += "\n#***********************"
params = {'message': message}
self.do_json_request('3/LogAndEcho.json', cmd='post', params=params, timeout=timeoutSecs)
# print "HACK: not doing 3/LogAndEcho.json"
def get_timeline(self):
return self.do_json_request('3/Timeline.json')
# Shutdown url is like a reset button. Doesn't send a response before it kills stuff
# safer if random things are wedged, rather than requiring response
# so request library might retry and get exception. allow that.
def shutdown_all(self):
try:
self.do_json_request('3/Shutdown.json', cmd='post', noExtraErrorCheck=True)
except:
print "Got exception on Shutdown.json. Ignoring"
pass
# don't want delayes between sending these to each node
# if you care, wait after you send them to each node
# Seems like it's not so good to just send to one node
# time.sleep(1) # a little delay needed?
return True
#*******************************************************************************
# examples from prithvi
# http://localhost:54321/Typeahead.json/files?src=?&limit=?
# http://localhost:54321/Typeahead.json/files?src=.%2Fsmalldata%2Fairlines%2F&limit=10
def typeahead(self, timeoutSecs=10, **kwargs):
params_dict = {
'src': None,
'limit': None,
}
check_params_update_kwargs(params_dict, kwargs, 'typeahead', print_params=True)
# odd ...needs /files
a = self.do_json_request('3/Typeahead.json/files', params=params_dict, timeout=timeoutSecs)
verboseprint("\ntypeahead result:", dump_json(a))
return a
#*******************************************************************************
def unlock (self, timeoutSecs=30, **kwargs):
a = self.do_json_request('3/UnlockKeys.json', params=None, timeout=timeoutSecs)
return a
# print "WARNING: faking unlock keys"
# pass
def remove_all_keys(self, timeoutSecs=120):
return self.do_json_request('3/DKV', cmd='delete', timeout=timeoutSecs)
# ignore errors on remove..key might already be gone due to h2o removing it now after parse
def remove_key(self, key, timeoutSecs=120):
a = self.do_json_request('3/DKV.json',
params={"key": key}, ignoreH2oError=True, cmd='delete', timeout=timeoutSecs)
self.unlock()
return a
def jobs_admin(self, timeoutSecs=120, **kwargs):
params_dict = {
# 'expression': None,
}
params_dict.update(kwargs)
verboseprint("\njobs_admin:", params_dict)
a = self.do_json_request('3/Jobs.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\njobs_admin result:", dump_json(a))
# print "WARNING: faking jobs admin"
# a = { 'jobs': {} }
return a
#******************************************************************************************8
def put_file(self, f, key=None, timeoutSecs=60):
if key is None:
key = os.path.basename(f)
### print "putfile specifying this key:", key
fileObj = open(f, 'rb')
resp = self.do_json_request(
# don't use .json suffix here...causes 404 (for now)
'3/PostFile',
cmd='post',
timeout=timeoutSecs,
params={"destination_frame": key},
files={"file": fileObj},
extraComment=str(f))
verboseprint("\nput_file response: ", dump_json(resp))
fileObj.close()
return key
def csv_download(self, key, csvPathname, timeoutSecs=60, **kwargs):
params = {'key': key}
paramsStr = '?' + '&'.join(['%s=%s' % (k, v) for (k, v) in params.items()])
url = self.url('3/DownloadDataset.json')
log('Start ' + url + paramsStr, comment=csvPathname)
# do it (absorb in 1024 byte chunks)
r = requests.get(url, params=params, timeout=timeoutSecs)
print "csv_download r.headers:", r.headers
if r.status_code == 200:
f = open(csvPathname, 'wb')
for chunk in r.iter_content(1024):
f.write(chunk)
print csvPathname, "size:", h2o_util.file_size_formatted(csvPathname)
def log_view(self, timeoutSecs=10, **kwargs):
a = self.do_json_request('LogView.json', timeout=timeoutSecs)
verboseprint("\nlog_view result:", dump_json(a))
return a
def log_download(self, logDir=None, timeoutSecs=30, **kwargs):
if logDir == None:
logDir = get_sandbox_name()
url = self.url('Logs/download')
log('Start ' + url);
print "\nDownloading h2o log(s) using:", url
r = requests.get(url, timeout=timeoutSecs, **kwargs)
if not r or not r.ok:
raise Exception("Maybe bad url? no r in log_download %s in %s:" % inspect.stack()[1][3])
z = zipfile.ZipFile(StringIO.StringIO(r.content))
print "z.namelist:", z.namelist()
print "z.printdir:", z.printdir()
nameList = z.namelist()
# the first is the h2ologs dir name.
h2oLogDir = logDir + "/" + nameList.pop(0)
print "h2oLogDir:", h2oLogDir
print "logDir:", logDir
# it's a zip of zipped files
# first unzip it
z = zipfile.ZipFile(StringIO.StringIO(r.content))
z.extractall(logDir)
# unzipped file should be in LOG_DIR now
# now unzip the files in that directory
for zname in nameList:
resultList = h2o_util.flat_unzip(logDir + "/" + zname, logDir)
print "\nlogDir:", logDir
for logfile in resultList:
numLines = sum(1 for line in open(logfile))
print logfile, "Lines:", numLines
print
return resultList
#******************************************************************************************8
def inspect(self, key, offset=None, view=None, max_column_display=1000, ignoreH2oError=False,
timeoutSecs=30):
params = {
# "src_key": key,
"key": key,
"offset": offset,
# view doesn't exist for 2. let it be passed here from old tests but not used
}
a = self.do_json_request('3/Inspect.json',
params=params,
ignoreH2oError=ignoreH2oError,
timeout=timeoutSecs
)
return a
#******************************************************************************************8
def split_frame(self, timeoutSecs=120, noPoll=False, **kwargs):
params_dict = {
'dataset': None,
'ratios': None,
'destKeys': None, # ['bigger', 'smaller']
}
check_params_update_kwargs(params_dict, kwargs, 'split_frame', print_params=True)
firstResult = self.do_json_request('3/SplitFrame.json', cmd='post', timeout=timeoutSecs, params=params_dict)
print "firstResult:", dump_json(firstResult)
# FIX! what is ['dest']['name'] ..It's not there at the beginning?
job_key = firstResult['key']['name']
if noPoll:
h2o_sandbox.check_sandbox_for_errors()
return firstResult
# is it polllable while it's in the CREATED state? msec looks wrong. start_time is 0
time.sleep(2)
result = self.poll_job(job_key)
verboseprint("split_frame result:", dump_json(result))
for d in result["jobs"][0]["destination_frames"]:
print d["name"]
return result
#******************************************************************************************8
def create_frame(self, timeoutSecs=120, noPoll=False, **kwargs):
# FIX! have to add legal params
params_dict = {
}
check_params_update_kwargs(params_dict, kwargs, 'create_frame', print_params=True)
firstResult = self.do_json_request('3/CreateFrame.json', cmd='post', timeout=timeoutSecs, params=params_dict)
job_key = firstResult['dest']['name']
if noPoll:
h2o_sandbox.check_sandbox_for_errors()
return firstResult
result = self.poll_job(job_key)
verboseprint("create_frame result:", dump_json(result))
return result
#******************************************************************************************8
def interaction(self, timeoutSecs=120, noPoll=False, **kwargs):
# FIX! have to add legal params
params_dict = {
}
check_params_update_kwargs(params_dict, kwargs, 'interaction', print_params=True)
firstResult = self.do_json_request('3/Interaction.json', cmd='post', timeout=timeoutSecs, params=params_dict)
job_key = firstResult['dest']['name']
if noPoll:
h2o_sandbox.check_sandbox_for_errors()
return firstResult
result = self.poll_job(job_key)
verboseprint("interaction result:", dump_json(result))
return result
#******************************************************************************************8
def rapids(self, timeoutSecs=120, ignoreH2oError=False, **kwargs):
# FIX! assume both of these are strings for now, not lists
if 'ast' in kwargs and kwargs['ast'] is not None:
assert isinstance(kwargs['ast'], basestring), "only string assumed? %s" % kwargs['ast']
if 'funs' in kwargs and kwargs['funs'] is not None:
assert isinstance(kwargs['funs'], basestring), "only string assumed? %s" % kwargs['funs']
# currently runExec only does one or the other
params_dict = {
'ast': None,
'funs': None,
}
check_params_update_kwargs(params_dict, kwargs, 'rapids', True)
result = self.do_json_request('99/Rapids.json', cmd='post', timeout=timeoutSecs, postData=params_dict)
verboseprint("rapids result:", dump_json(result))
# FIX! maybe add something for ignoring conditionally?
if 'exception' in result and result['exception'] and not ignoreH2oError:
exception = result['exception']
raise Exception('rapids with kwargs:\n%s\ngot exception:\n"%s"\n' % (dump_json(kwargs), exception))
h2o_sandbox.check_sandbox_for_errors()
return result
#******************************************************************************************8
def rapids_iseval(self, timeoutSecs=120, ignoreH2oError=False, **kwargs):
# FIX! assume both of these are strings for now, not lists
if 'ast_key' in kwargs and kwargs['ast_key'] is not None:
assert isinstance(kwargs['ast_key'], basestring), "only string assumed? %s" % kwargs['ast_key']
# currently runExec only does one or the other
params_dict = {
'ast_key': None,
}
check_params_update_kwargs(params_dict, kwargs, 'rapids_iseval', True)
# doesn't like 'put' here?
# doesn't like empty key
result = self.do_json_request('3/Rapids.json/isEval', cmd='get', timeout=timeoutSecs, params=params_dict)
verboseprint("rapids_iseval result:", dump_json(result))
# FIX! maybe add something for ignoring conditionally?
if 'exception' in result and result['exception'] and not ignoreH2oError:
exception = result['exception']
raise Exception('rapids with kwargs:\n%s\ngot exception:\n"%s"\n' % (dump_json(kwargs), exception))
h2o_sandbox.check_sandbox_for_errors()
return result
#******************************************************************************************8
def quantiles(self, timeoutSecs=300, print_params=True, **kwargs):
params_dict = {
'destination_key': None,
'training_frame': None,
'validation_frame': None,
'ignored_columns': None,
'score_each_iteration': None,
'probs': None,
}
check_params_update_kwargs(params_dict, kwargs, 'quantiles', print_params)
a = self.do_json_request('3/Quantiles.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\nquantiles result:", dump_json(a))
h2o_sandbox.check_sandbox_for_errors()
return a
#******************************************************************************************8
# attach methods to H2O object
# this happens before any H2O instances are created
# this file is imported into h2o
# ray has jobs below..is this old?
H2O.jobs_admin = jobs_admin
H2O.get_cloud = get_cloud
H2O.shutdown_all = shutdown_all
H2O.h2o_log_msg = h2o_log_msg
H2O.inspect = inspect
H2O.quantiles = quantiles
H2O.rapids = rapids
H2O.rapids_iseval = rapids_iseval
H2O.unlock = unlock
H2O.typeahead = typeahead
H2O.get_timeline = get_timeline
H2O.split_frame = split_frame
H2O.create_frame = create_frame
H2O.interaction = interaction
H2O.log_view = log_view
H2O.log_download = log_download
H2O.csv_download = csv_download
H2O.put_file = put_file
H2O.remove_all_keys = remove_all_keys
H2O.remove_key = remove_key
# attach some methods from ray
import h2o_ray
H2O.jobs = h2o_ray.jobs
H2O.poll_job = h2o_ray.poll_job
H2O.import_files = h2o_ray.import_files
H2O.parse = h2o_ray.parse
H2O.frames = h2o_ray.frames
H2O.columns = h2o_ray.columns
H2O.column = h2o_ray.column
H2O.summary = h2o_ray.summary
H2O.delete_frame = h2o_ray.delete_frame
H2O.delete_frames = h2o_ray.delete_frames
H2O.model_builders = h2o_ray.model_builders
H2O.validate_model_parameters = h2o_ray.validate_model_parameters
H2O.build_model = h2o_ray.build_model
H2O.compute_model_metrics = h2o_ray.compute_model_metrics
H2O.predict = h2o_ray.predict
H2O.model_metrics = h2o_ray.model_metrics
H2O.models = h2o_ray.models
H2O.delete_model = h2o_ray.delete_model
H2O.delete_models = h2o_ray.delete_models
H2O.endpoints = h2o_ray.endpoints
H2O.endpoint_by_number = h2o_ray.endpoint_by_number
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import os
import sys
import argparse
import logging
import yaml
__version__ = (0, 3, 1)
__version_string__ = ".".join(map(str, __version__))
from vdisk.actions.install import action as action_install
from vdisk.actions.create import action as action_create
from vdisk.actions.bootstrap import action as action_bootstrap
from vdisk.actions.enter import action as action_enter
from vdisk.actions.puppet import action as action_puppet
log = logging.getLogger(__name__)
class sizeunit(object):
units = {
't': 10 ** 13,
'g': 10 ** 9,
'm': 10 ** 6,
'k': 10 ** 3,
'T': 2 ** 40,
'G': 2 ** 30,
'M': 2 ** 20,
'K': 2 ** 10,
'b': 1,
}
DEFAULT_UNIT = 'B'
def __init__(self, string):
self.size, self.unit = self._parse_size(string)
self.original = string
@property
def formatted(self):
return self.original
def _parse_size(self, string):
unit = string[-1]
conversion = self.units.get(unit)
if conversion is not None:
return int(string[:-1]) * conversion, unit
return int(string), self.DEFAULT_UNIT
def read_config(path):
if not os.path.isfile(path):
raise Exception("Missing configuration: {0}".format(path))
with open(path) as f:
return yaml.load(f)
def setup_argument_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--version", action="version",
version="vdisk " + __version_string__)
parser.add_argument("--root",
metavar="<dir>",
help="Root directory of project, default: {default}",
default=os.getcwd())
parser.add_argument("--log-level", default=logging.INFO,
metavar="<level>",
help=("Set log level, valid values are: "
"DEBUG, INFO, ERROR. Default: INFO"),
type=lambda l: getattr(logging, l.upper(),
logging.INFO))
parser.add_argument("--ec2", action="store_true",
help=("Create an ec2-compatible image for pv-grub/hd00 AKI"),
default=False)
parser.add_argument("-V", "--volume-group",
metavar="<name>",
help="Name of volume group, default: VolGroup00",
default="VolGroup00")
parser.add_argument("--root-size",
metavar="<gb>",
help=("Size of root partition, must be smaller than "
"'--size'"),
default=sizeunit('7G'),
type=sizeunit)
parser.add_argument("-m", "--mountpoint",
metavar="<dir>",
help="Mount point for disk images, default: tmp/mount",
default="tmp/mount")
parser.add_argument("-S", "--shell",
metavar="<bin>",
help="Shell to use in chroot, default: /bin/sh",
default="/bin/sh")
parser.add_argument("-A", "--apt-get", dest="apt_get",
metavar="<bin>",
help="Apt get to use in chroot, default: apt-get",
default="apt-get")
parser.add_argument("-D", "--dpkg", dest="dpkg",
metavar="<bin>",
help="Dpkg to use in chroot, default: apt-get",
default="dpkg")
parser.add_argument("-G", "--gem",
metavar="<bin>",
help="Gem to use in chroot, default: gem",
default="gem")
parser.add_argument("-M", "--mirror",
metavar="<url>",
default="http://ftp.se.debian.org/debian",
help=("Installation mirror, default: "
"http://ftp.se.debian.org/debian"))
parser.add_argument("-c", "--config",
metavar="<config>",
help="vdisk configuration",
default=None)
parser.add_argument("image_path",
metavar="<image>",
help="Path to image")
actions = parser.add_subparsers()
create = actions.add_parser("create",
help="Create a new disk image")
create.add_argument("-s", "--size",
help="Size of image, default: 8G",
metavar="<size>",
default=sizeunit("8G"),
type=sizeunit)
create.add_argument("-f", "--force",
help="Force creation, even if file exists",
default=False,
action="store_true")
create.set_defaults(action=action_create)
bootstrap = actions.add_parser("bootstrap",
help=("bootstrap a new disk image w/ "
"debootstrap"))
bootstrap.add_argument("-S", "--suite", default="squeeze",
help="Installation suite, default: squeeze")
bootstrap.add_argument("-A", "--arch", default="amd64",
help="Installation architecture, default: amd64")
bootstrap.set_defaults(action=action_bootstrap)
install = actions.add_parser("install",
help=("Install packages and selections into "
"a disk image"))
install.add_argument("selections",
metavar="<file>",
nargs='?',
help="List of selections",
default=None)
install.add_argument("-d", "--download",
help="Only download the selections, don't install.",
default=False,
action="store_true")
install.set_defaults(action=action_install)
enter = actions.add_parser("enter",
help="Open a shell into a disk image")
enter.set_defaults(action=action_enter)
puppet = actions.add_parser("puppet",
help="Run puppet inside a disk image")
puppet.add_argument("puppetpath",
metavar="<dir>",
help="Path to puppet modules")
puppet.add_argument("-F", "--fact", dest="facts", action='append',
metavar="<name>=<value>",
help="Override puppet facts")
puppet.add_argument("puppetargs",
metavar="<puppet-args...>",
help="Arguments passed into puppet",
nargs=argparse.REMAINDER)
puppet.set_defaults(action=action_puppet)
return parser
def main(args):
from vdisk.preset.ec2_preset import EC2Preset
from vdisk.preset.generic_preset import GenericPreset
logging.basicConfig(level=logging.INFO)
parser = setup_argument_parser()
ns = parser.parse_args(args)
logging.getLogger().setLevel(ns.log_level)
if os.getuid() != 0:
log.error("vdisk uses loopback mounting, and needs to be run as root")
return -1
if ns.config is None:
ns.config = os.path.join(ns.root, "vdisk.yaml")
ns.config = read_config(ns.config)
if ns.ec2:
ns.preset = EC2Preset(ns)
else:
ns.preset = GenericPreset(ns)
return ns.action(ns)
def entry():
sys.exit(main(sys.argv[1:]))
|
|
"""Support for the Unitymedia Horizon HD Recorder."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant import util
from homeassistant.components.media_player import MediaPlayerDevice, PLATFORM_SCHEMA
from homeassistant.components.media_player.const import (
MEDIA_TYPE_CHANNEL,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PORT,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Horizon"
DEFAULT_PORT = 5900
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(seconds=1)
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
SUPPORT_HORIZON = (
SUPPORT_NEXT_TRACK
| SUPPORT_PAUSE
| SUPPORT_PLAY
| SUPPORT_PLAY_MEDIA
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Horizon platform."""
from horimote import Client, keys
from horimote.exceptions import AuthenticationError
host = config[CONF_HOST]
name = config[CONF_NAME]
port = config[CONF_PORT]
try:
client = Client(host, port=port)
except AuthenticationError as msg:
_LOGGER.error("Authentication to %s at %s failed: %s", name, host, msg)
return
except OSError as msg:
# occurs if horizon box is offline
_LOGGER.error("Connection to %s at %s failed: %s", name, host, msg)
raise PlatformNotReady
_LOGGER.info("Connection to %s at %s established", name, host)
add_entities([HorizonDevice(client, name, keys)], True)
class HorizonDevice(MediaPlayerDevice):
"""Representation of a Horizon HD Recorder."""
def __init__(self, client, name, keys):
"""Initialize the remote."""
self._client = client
self._name = name
self._state = None
self._keys = keys
@property
def name(self):
"""Return the name of the remote."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_HORIZON
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update(self):
"""Update State using the media server running on the Horizon."""
try:
if self._client.is_powered_on():
self._state = STATE_PLAYING
else:
self._state = STATE_OFF
except OSError:
self._state = STATE_OFF
def turn_on(self):
"""Turn the device on."""
if self._state is STATE_OFF:
self._send_key(self._keys.POWER)
def turn_off(self):
"""Turn the device off."""
if self._state is not STATE_OFF:
self._send_key(self._keys.POWER)
def media_previous_track(self):
"""Channel down."""
self._send_key(self._keys.CHAN_DOWN)
self._state = STATE_PLAYING
def media_next_track(self):
"""Channel up."""
self._send_key(self._keys.CHAN_UP)
self._state = STATE_PLAYING
def media_play(self):
"""Send play command."""
self._send_key(self._keys.PAUSE)
self._state = STATE_PLAYING
def media_pause(self):
"""Send pause command."""
self._send_key(self._keys.PAUSE)
self._state = STATE_PAUSED
def media_play_pause(self):
"""Send play/pause command."""
self._send_key(self._keys.PAUSE)
if self._state == STATE_PAUSED:
self._state = STATE_PLAYING
else:
self._state = STATE_PAUSED
def play_media(self, media_type, media_id, **kwargs):
"""Play media / switch to channel."""
if MEDIA_TYPE_CHANNEL == media_type:
try:
self._select_channel(int(media_id))
self._state = STATE_PLAYING
except ValueError:
_LOGGER.error("Invalid channel: %s", media_id)
else:
_LOGGER.error(
"Invalid media type %s. Supported type: %s",
media_type,
MEDIA_TYPE_CHANNEL,
)
def _select_channel(self, channel):
"""Select a channel (taken from einder library, thx)."""
self._send(channel=channel)
def _send_key(self, key):
"""Send a key to the Horizon device."""
self._send(key=key)
def _send(self, key=None, channel=None):
"""Send a key to the Horizon device."""
from horimote.exceptions import AuthenticationError
try:
if key:
self._client.send_key(key)
elif channel:
self._client.select_channel(channel)
except OSError as msg:
_LOGGER.error(
"%s disconnected: %s. Trying to reconnect...", self._name, msg
)
# for reconnect, first gracefully disconnect
self._client.disconnect()
try:
self._client.connect()
self._client.authorize()
except AuthenticationError as msg:
_LOGGER.error("Authentication to %s failed: %s", self._name, msg)
return
except OSError as msg:
# occurs if horizon box is offline
_LOGGER.error("Reconnect to %s failed: %s", self._name, msg)
return
self._send(key=key, channel=channel)
|
|
"""
pygments.lexers.igor
~~~~~~~~~~~~~~~~~~~~
Lexers for Igor Pro.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, words
from pygments.token import Text, Comment, Keyword, Name, String
__all__ = ['IgorLexer']
class IgorLexer(RegexLexer):
"""
Pygments Lexer for Igor Pro procedure files (.ipf).
See http://www.wavemetrics.com/ and http://www.igorexchange.com/.
.. versionadded:: 2.0
"""
name = 'Igor'
aliases = ['igor', 'igorpro']
filenames = ['*.ipf']
mimetypes = ['text/ipf']
flags = re.IGNORECASE | re.MULTILINE
flowControl = (
'if', 'else', 'elseif', 'endif', 'for', 'endfor', 'strswitch', 'switch',
'case', 'default', 'endswitch', 'do', 'while', 'try', 'catch', 'endtry',
'break', 'continue', 'return', 'AbortOnRTE', 'AbortOnValue'
)
types = (
'variable', 'string', 'constant', 'strconstant', 'NVAR', 'SVAR', 'WAVE',
'STRUCT', 'dfref', 'funcref', 'char', 'uchar', 'int16', 'uint16', 'int32',
'uint32', 'int64', 'uint64', 'float', 'double'
)
keywords = (
'override', 'ThreadSafe', 'MultiThread', 'static', 'Proc',
'Picture', 'Prompt', 'DoPrompt', 'macro', 'window', 'function', 'end',
'Structure', 'EndStructure', 'EndMacro', 'Menu', 'SubMenu'
)
operations = (
'Abort', 'AddFIFOData', 'AddFIFOVectData', 'AddMovieAudio', 'AddMovieFrame',
'AddWavesToBoxPlot', 'AddWavesToViolinPlot', 'AdoptFiles', 'APMath', 'Append',
'AppendBoxPlot', 'AppendImage', 'AppendLayoutObject', 'AppendMatrixContour',
'AppendText', 'AppendToGizmo', 'AppendToGraph', 'AppendToLayout', 'AppendToTable',
'AppendViolinPlot', 'AppendXYZContour', 'AutoPositionWindow',
'AxonTelegraphFindServers', 'BackgroundInfo', 'Beep', 'BoundingBall', 'BoxSmooth',
'BrowseURL', 'BuildMenu', 'Button', 'cd', 'Chart', 'CheckBox', 'CheckDisplayed',
'ChooseColor', 'Close', 'CloseHelp', 'CloseMovie', 'CloseProc', 'ColorScale',
'ColorTab2Wave', 'Concatenate', 'ControlBar', 'ControlInfo', 'ControlUpdate',
'ConvertGlobalStringTextEncoding', 'ConvexHull', 'Convolve', 'CopyDimLabels',
'CopyFile', 'CopyFolder', 'CopyScales', 'Correlate', 'CreateAliasShortcut',
'CreateBrowser', 'Cross', 'CtrlBackground', 'CtrlFIFO', 'CtrlNamedBackground',
'Cursor', 'CurveFit', 'CustomControl', 'CWT', 'DAQmx_AI_SetupReader',
'DAQmx_AO_SetOutputs', 'DAQmx_CTR_CountEdges', 'DAQmx_CTR_OutputPulse',
'DAQmx_CTR_Period', 'DAQmx_CTR_PulseWidth', 'DAQmx_DIO_Config',
'DAQmx_DIO_WriteNewData', 'DAQmx_Scan', 'DAQmx_WaveformGen', 'Debugger',
'DebuggerOptions', 'DefaultFont', 'DefaultGuiControls', 'DefaultGuiFont',
'DefaultTextEncoding', 'DefineGuide', 'DelayUpdate', 'DeleteAnnotations',
'DeleteFile', 'DeleteFolder', 'DeletePoints', 'Differentiate', 'dir', 'Display',
'DisplayHelpTopic', 'DisplayProcedure', 'DoAlert', 'DoIgorMenu', 'DoUpdate',
'DoWindow', 'DoXOPIdle', 'DPSS', 'DrawAction', 'DrawArc', 'DrawBezier',
'DrawLine', 'DrawOval', 'DrawPICT', 'DrawPoly', 'DrawRect', 'DrawRRect',
'DrawText', 'DrawUserShape', 'DSPDetrend', 'DSPPeriodogram', 'Duplicate',
'DuplicateDataFolder', 'DWT', 'EdgeStats', 'Edit', 'ErrorBars',
'EstimatePeakSizes', 'Execute', 'ExecuteScriptText', 'ExperimentInfo',
'ExperimentModified', 'ExportGizmo', 'Extract', 'FastGaussTransform', 'FastOp',
'FBinRead', 'FBinWrite', 'FFT', 'FGetPos', 'FIFOStatus', 'FIFO2Wave', 'FilterFIR',
'FilterIIR', 'FindAPeak', 'FindContour', 'FindDuplicates', 'FindLevel',
'FindLevels', 'FindPeak', 'FindPointsInPoly', 'FindRoots', 'FindSequence',
'FindValue', 'FMaxFlat', 'FPClustering', 'fprintf', 'FReadLine', 'FSetPos',
'FStatus', 'FTPCreateDirectory', 'FTPDelete', 'FTPDownload', 'FTPUpload',
'FuncFit', 'FuncFitMD', 'GBLoadWave', 'GetAxis', 'GetCamera', 'GetFileFolderInfo',
'GetGizmo', 'GetLastUserMenuInfo', 'GetMarquee', 'GetMouse', 'GetSelection',
'GetWindow', 'GISCreateVectorLayer', 'GISGetRasterInfo',
'GISGetRegisteredFileInfo', 'GISGetVectorLayerInfo', 'GISLoadRasterData',
'GISLoadVectorData', 'GISRasterizeVectorData', 'GISRegisterFile',
'GISTransformCoords', 'GISUnRegisterFile', 'GISWriteFieldData',
'GISWriteGeometryData', 'GISWriteRaster', 'GPIBReadBinaryWave2',
'GPIBReadBinary2', 'GPIBReadWave2', 'GPIBRead2', 'GPIBWriteBinaryWave2',
'GPIBWriteBinary2', 'GPIBWriteWave2', 'GPIBWrite2', 'GPIB2', 'GraphNormal',
'GraphWaveDraw', 'GraphWaveEdit', 'Grep', 'GroupBox', 'Hanning', 'HDFInfo',
'HDFReadImage', 'HDFReadSDS', 'HDFReadVset', 'HDF5CloseFile', 'HDF5CloseGroup',
'HDF5ConvertColors', 'HDF5CreateFile', 'HDF5CreateGroup', 'HDF5CreateLink',
'HDF5Dump', 'HDF5DumpErrors', 'HDF5DumpState', 'HDF5FlushFile',
'HDF5ListAttributes', 'HDF5ListGroup', 'HDF5LoadData', 'HDF5LoadGroup',
'HDF5LoadImage', 'HDF5OpenFile', 'HDF5OpenGroup', 'HDF5SaveData', 'HDF5SaveGroup',
'HDF5SaveImage', 'HDF5TestOperation', 'HDF5UnlinkObject', 'HideIgorMenus',
'HideInfo', 'HideProcedures', 'HideTools', 'HilbertTransform', 'Histogram', 'ICA',
'IFFT', 'ImageAnalyzeParticles', 'ImageBlend', 'ImageBoundaryToMask',
'ImageComposite', 'ImageEdgeDetection', 'ImageFileInfo', 'ImageFilter',
'ImageFocus', 'ImageFromXYZ', 'ImageGenerateROIMask', 'ImageGLCM',
'ImageHistModification', 'ImageHistogram', 'ImageInterpolate', 'ImageLineProfile',
'ImageLoad', 'ImageMorphology', 'ImageRegistration', 'ImageRemoveBackground',
'ImageRestore', 'ImageRotate', 'ImageSave', 'ImageSeedFill', 'ImageSkeleton3d',
'ImageSnake', 'ImageStats', 'ImageThreshold', 'ImageTransform',
'ImageUnwrapPhase', 'ImageWindow', 'IndexSort', 'InsertPoints', 'Integrate',
'IntegrateODE', 'Integrate2D', 'Interpolate2', 'Interpolate3D', 'Interp3DPath',
'ITCCloseAll2', 'ITCCloseDevice2', 'ITCConfigAllChannels2',
'ITCConfigChannelReset2', 'ITCConfigChannelUpload2', 'ITCConfigChannel2',
'ITCFIFOAvailableAll2', 'ITCFIFOAvailable2', 'ITCGetAllChannelsConfig2',
'ITCGetChannelConfig2', 'ITCGetCurrentDevice2', 'ITCGetDeviceInfo2',
'ITCGetDevices2', 'ITCGetErrorString2', 'ITCGetSerialNumber2', 'ITCGetState2',
'ITCGetVersions2', 'ITCInitialize2', 'ITCOpenDevice2', 'ITCReadADC2',
'ITCReadDigital2', 'ITCReadTimer2', 'ITCSelectDevice2', 'ITCSetDAC2',
'ITCSetGlobals2', 'ITCSetModes2', 'ITCSetState2', 'ITCStartAcq2', 'ITCStopAcq2',
'ITCUpdateFIFOPositionAll2', 'ITCUpdateFIFOPosition2', 'ITCWriteDigital2',
'JCAMPLoadWave', 'JointHistogram', 'KillBackground', 'KillControl',
'KillDataFolder', 'KillFIFO', 'KillFreeAxis', 'KillPath', 'KillPICTs',
'KillStrings', 'KillVariables', 'KillWaves', 'KillWindow', 'KMeans', 'Label',
'Layout', 'LayoutPageAction', 'LayoutSlideShow', 'Legend',
'LinearFeedbackShiftRegister', 'ListBox', 'LoadData', 'LoadPackagePreferences',
'LoadPICT', 'LoadWave', 'Loess', 'LombPeriodogram', 'Make', 'MakeIndex',
'MarkPerfTestTime', 'MatrixConvolve', 'MatrixCorr', 'MatrixEigenV',
'MatrixFilter', 'MatrixGaussJ', 'MatrixGLM', 'MatrixInverse', 'MatrixLinearSolve',
'MatrixLinearSolveTD', 'MatrixLLS', 'MatrixLUBkSub', 'MatrixLUD', 'MatrixLUDTD',
'MatrixMultiply', 'MatrixOP', 'MatrixSchur', 'MatrixSolve', 'MatrixSVBkSub',
'MatrixSVD', 'MatrixTranspose', 'MCC_FindServers', 'MeasureStyledText',
'MFR_CheckForNewBricklets',
'MFR_CloseResultFile', 'MFR_CreateOverviewTable', 'MFR_GetBrickletCount',
'MFR_GetBrickletData', 'MFR_GetBrickletDeployData', 'MFR_GetBrickletMetaData',
'MFR_GetBrickletRawData', 'MFR_GetReportTemplate', 'MFR_GetResultFileMetaData',
'MFR_GetResultFileName', 'MFR_GetVernissageVersion', 'MFR_GetVersion',
'MFR_GetXOPErrorMessage', 'MFR_OpenResultFile',
'MLLoadWave', 'Modify', 'ModifyBoxPlot', 'ModifyBrowser', 'ModifyCamera',
'ModifyContour', 'ModifyControl', 'ModifyControlList', 'ModifyFreeAxis',
'ModifyGizmo', 'ModifyGraph', 'ModifyImage', 'ModifyLayout', 'ModifyPanel',
'ModifyTable', 'ModifyViolinPlot', 'ModifyWaterfall', 'MoveDataFolder',
'MoveFile', 'MoveFolder', 'MoveString', 'MoveSubwindow', 'MoveVariable',
'MoveWave', 'MoveWindow', 'MultiTaperPSD', 'MultiThreadingControl',
'NC_CloseFile', 'NC_DumpErrors', 'NC_Inquire', 'NC_ListAttributes',
'NC_ListObjects', 'NC_LoadData', 'NC_OpenFile', 'NeuralNetworkRun',
'NeuralNetworkTrain', 'NewCamera', 'NewDataFolder', 'NewFIFO', 'NewFIFOChan',
'NewFreeAxis', 'NewGizmo', 'NewImage', 'NewLayout', 'NewMovie', 'NewNotebook',
'NewPanel', 'NewPath', 'NewWaterfall', 'NILoadWave', 'NI4882', 'Note', 'Notebook',
'NotebookAction', 'Open', 'OpenHelp', 'OpenNotebook', 'Optimize',
'ParseOperationTemplate', 'PathInfo', 'PauseForUser', 'PauseUpdate', 'PCA',
'PlayMovie', 'PlayMovieAction', 'PlaySound', 'PopupContextualMenu', 'PopupMenu',
'Preferences', 'PrimeFactors', 'Print', 'printf', 'PrintGraphs', 'PrintLayout',
'PrintNotebook', 'PrintSettings', 'PrintTable', 'Project', 'PulseStats',
'PutScrapText', 'pwd', 'Quit', 'RatioFromNumber', 'Redimension', 'Remez',
'Remove', 'RemoveContour', 'RemoveFromGizmo', 'RemoveFromGraph',
'RemoveFromLayout', 'RemoveFromTable', 'RemoveImage', 'RemoveLayoutObjects',
'RemovePath', 'Rename', 'RenameDataFolder', 'RenamePath', 'RenamePICT',
'RenameWindow', 'ReorderImages', 'ReorderTraces', 'ReplaceText', 'ReplaceWave',
'Resample', 'ResumeUpdate', 'Reverse', 'Rotate', 'Save', 'SaveData',
'SaveExperiment', 'SaveGizmoCopy', 'SaveGraphCopy', 'SaveNotebook',
'SavePackagePreferences', 'SavePICT', 'SaveTableCopy', 'SetActiveSubwindow',
'SetAxis', 'SetBackground', 'SetDashPattern', 'SetDataFolder', 'SetDimLabel',
'SetDrawEnv', 'SetDrawLayer', 'SetFileFolderInfo', 'SetFormula', 'SetIdlePeriod',
'SetIgorHook', 'SetIgorMenuMode', 'SetIgorOption', 'SetMarquee',
'SetProcessSleep', 'SetRandomSeed', 'SetScale', 'SetVariable', 'SetWaveLock',
'SetWaveTextEncoding', 'SetWindow', 'ShowIgorMenus', 'ShowInfo', 'ShowTools',
'Silent', 'Sleep', 'Slider', 'Smooth', 'SmoothCustom', 'Sort', 'SortColumns',
'SoundInRecord', 'SoundInSet', 'SoundInStartChart', 'SoundInStatus',
'SoundInStopChart', 'SoundLoadWave', 'SoundSaveWave', 'SphericalInterpolate',
'SphericalTriangulate', 'SplitString', 'SplitWave', 'sprintf', 'SQLHighLevelOp',
'sscanf', 'Stack', 'StackWindows', 'StatsAngularDistanceTest', 'StatsANOVA1Test',
'StatsANOVA2NRTest', 'StatsANOVA2RMTest', 'StatsANOVA2Test', 'StatsChiTest',
'StatsCircularCorrelationTest', 'StatsCircularMeans', 'StatsCircularMoments',
'StatsCircularTwoSampleTest', 'StatsCochranTest', 'StatsContingencyTable',
'StatsDIPTest', 'StatsDunnettTest', 'StatsFriedmanTest', 'StatsFTest',
'StatsHodgesAjneTest', 'StatsJBTest', 'StatsKDE', 'StatsKendallTauTest',
'StatsKSTest', 'StatsKWTest', 'StatsLinearCorrelationTest',
'StatsLinearRegression', 'StatsMultiCorrelationTest', 'StatsNPMCTest',
'StatsNPNominalSRTest', 'StatsQuantiles', 'StatsRankCorrelationTest',
'StatsResample', 'StatsSample', 'StatsScheffeTest', 'StatsShapiroWilkTest',
'StatsSignTest', 'StatsSRTest', 'StatsTTest', 'StatsTukeyTest',
'StatsVariancesTest', 'StatsWatsonUSquaredTest', 'StatsWatsonWilliamsTest',
'StatsWheelerWatsonTest', 'StatsWilcoxonRankTest', 'StatsWRCorrelationTest',
'STFT', 'String', 'StructFill', 'StructGet', 'StructPut', 'SumDimension',
'SumSeries', 'TabControl', 'Tag', 'TDMLoadData', 'TDMSaveData', 'TextBox',
'ThreadGroupPutDF', 'ThreadStart', 'TickWavesFromAxis', 'Tile', 'TileWindows',
'TitleBox', 'ToCommandLine', 'ToolsGrid', 'Triangulate3d', 'Unwrap', 'URLRequest',
'ValDisplay', 'Variable', 'VDTClosePort2', 'VDTGetPortList2', 'VDTGetStatus2',
'VDTOpenPort2', 'VDTOperationsPort2', 'VDTReadBinaryWave2', 'VDTReadBinary2',
'VDTReadHexWave2', 'VDTReadHex2', 'VDTReadWave2', 'VDTRead2', 'VDTTerminalPort2',
'VDTWriteBinaryWave2', 'VDTWriteBinary2', 'VDTWriteHexWave2', 'VDTWriteHex2',
'VDTWriteWave2', 'VDTWrite2', 'VDT2', 'VISAControl', 'VISARead', 'VISAReadBinary',
'VISAReadBinaryWave', 'VISAReadWave', 'VISAWrite', 'VISAWriteBinary',
'VISAWriteBinaryWave', 'VISAWriteWave', 'WaveMeanStdv', 'WaveStats',
'WaveTransform', 'wfprintf', 'WignerTransform', 'WindowFunction', 'XLLoadWave'
)
functions = (
'abs', 'acos', 'acosh', 'AddListItem', 'AiryA', 'AiryAD', 'AiryB', 'AiryBD',
'alog', 'AnnotationInfo', 'AnnotationList', 'area', 'areaXY', 'asin', 'asinh',
'atan', 'atanh', 'atan2', 'AxisInfo', 'AxisList', 'AxisValFromPixel',
'AxonTelegraphAGetDataNum', 'AxonTelegraphAGetDataString',
'AxonTelegraphAGetDataStruct', 'AxonTelegraphGetDataNum',
'AxonTelegraphGetDataString', 'AxonTelegraphGetDataStruct',
'AxonTelegraphGetTimeoutMs', 'AxonTelegraphSetTimeoutMs', 'Base64Decode',
'Base64Encode', 'Besseli', 'Besselj', 'Besselk', 'Bessely', 'beta', 'betai',
'BinarySearch', 'BinarySearchInterp', 'binomial', 'binomialln', 'binomialNoise',
'cabs', 'CaptureHistory', 'CaptureHistoryStart', 'ceil', 'cequal', 'char2num',
'chebyshev', 'chebyshevU', 'CheckName', 'ChildWindowList', 'CleanupName', 'cmplx',
'cmpstr', 'conj', 'ContourInfo', 'ContourNameList', 'ContourNameToWaveRef',
'ContourZ', 'ControlNameList', 'ConvertTextEncoding', 'cos', 'cosh',
'cosIntegral', 'cot', 'coth', 'CountObjects', 'CountObjectsDFR', 'cpowi',
'CreationDate', 'csc', 'csch', 'CsrInfo', 'CsrWave', 'CsrWaveRef', 'CsrXWave',
'CsrXWaveRef', 'CTabList', 'DataFolderDir', 'DataFolderExists',
'DataFolderRefsEqual', 'DataFolderRefStatus', 'date', 'datetime', 'DateToJulian',
'date2secs', 'Dawson', 'defined', 'deltax', 'digamma', 'dilogarithm', 'DimDelta',
'DimOffset', 'DimSize', 'ei', 'enoise', 'equalWaves', 'erf', 'erfc', 'erfcw',
'exists', 'exp', 'expInt', 'expIntegralE1', 'expNoise', 'factorial', 'Faddeeva',
'fakedata', 'faverage', 'faverageXY', 'fDAQmx_AI_GetReader',
'fDAQmx_AO_UpdateOutputs', 'fDAQmx_ConnectTerminals', 'fDAQmx_CTR_Finished',
'fDAQmx_CTR_IsFinished', 'fDAQmx_CTR_IsPulseFinished', 'fDAQmx_CTR_ReadCounter',
'fDAQmx_CTR_ReadWithOptions', 'fDAQmx_CTR_SetPulseFrequency', 'fDAQmx_CTR_Start',
'fDAQmx_DeviceNames', 'fDAQmx_DIO_Finished', 'fDAQmx_DIO_PortWidth',
'fDAQmx_DIO_Read', 'fDAQmx_DIO_Write', 'fDAQmx_DisconnectTerminals',
'fDAQmx_ErrorString', 'fDAQmx_ExternalCalDate', 'fDAQmx_NumAnalogInputs',
'fDAQmx_NumAnalogOutputs', 'fDAQmx_NumCounters', 'fDAQmx_NumDIOPorts',
'fDAQmx_ReadChan', 'fDAQmx_ReadNamedChan', 'fDAQmx_ResetDevice',
'fDAQmx_ScanGetAvailable', 'fDAQmx_ScanGetNextIndex', 'fDAQmx_ScanStart',
'fDAQmx_ScanStop', 'fDAQmx_ScanWait', 'fDAQmx_ScanWaitWithTimeout',
'fDAQmx_SelfCalDate', 'fDAQmx_SelfCalibration', 'fDAQmx_WaveformStart',
'fDAQmx_WaveformStop', 'fDAQmx_WF_IsFinished', 'fDAQmx_WF_WaitUntilFinished',
'fDAQmx_WriteChan', 'FetchURL', 'FindDimLabel', 'FindListItem', 'floor',
'FontList', 'FontSizeHeight', 'FontSizeStringWidth', 'FresnelCos', 'FresnelSin',
'FuncRefInfo', 'FunctionInfo', 'FunctionList', 'FunctionPath', 'gamma',
'gammaEuler', 'gammaInc', 'gammaNoise', 'gammln', 'gammp', 'gammq', 'Gauss',
'Gauss1D', 'Gauss2D', 'gcd', 'GetBrowserLine', 'GetBrowserSelection',
'GetDataFolder', 'GetDataFolderDFR', 'GetDefaultFont', 'GetDefaultFontSize',
'GetDefaultFontStyle', 'GetDimLabel', 'GetEnvironmentVariable', 'GetErrMessage',
'GetFormula', 'GetIndependentModuleName', 'GetIndexedObjName',
'GetIndexedObjNameDFR', 'GetKeyState', 'GetRTErrMessage', 'GetRTError',
'GetRTLocation', 'GetRTLocInfo', 'GetRTStackInfo', 'GetScrapText', 'GetUserData',
'GetWavesDataFolder', 'GetWavesDataFolderDFR', 'GISGetAllFileFormats',
'GISSRefsAreEqual', 'GizmoInfo', 'GizmoScale', 'gnoise', 'GrepList', 'GrepString',
'GuideInfo', 'GuideNameList', 'Hash', 'hcsr', 'HDF5AttributeInfo',
'HDF5DatasetInfo', 'HDF5LibraryInfo', 'HDF5TypeInfo', 'hermite', 'hermiteGauss',
'HyperGNoise', 'HyperGPFQ', 'HyperG0F1', 'HyperG1F1', 'HyperG2F1', 'IgorInfo',
'IgorVersion', 'imag', 'ImageInfo', 'ImageNameList', 'ImageNameToWaveRef',
'IndependentModuleList', 'IndexedDir', 'IndexedFile', 'IndexToScale', 'Inf',
'Integrate1D', 'interp', 'Interp2D', 'Interp3D', 'inverseERF', 'inverseERFC',
'ItemsInList', 'JacobiCn', 'JacobiSn', 'JulianToDate', 'Laguerre', 'LaguerreA',
'LaguerreGauss', 'LambertW', 'LayoutInfo', 'leftx', 'LegendreA', 'limit',
'ListMatch', 'ListToTextWave', 'ListToWaveRefWave', 'ln', 'log', 'logNormalNoise',
'lorentzianNoise', 'LowerStr', 'MacroList', 'magsqr', 'MandelbrotPoint',
'MarcumQ', 'MatrixCondition', 'MatrixDet', 'MatrixDot', 'MatrixRank',
'MatrixTrace', 'max', 'MCC_AutoBridgeBal', 'MCC_AutoFastComp',
'MCC_AutoPipetteOffset', 'MCC_AutoSlowComp', 'MCC_AutoWholeCellComp',
'MCC_GetBridgeBalEnable', 'MCC_GetBridgeBalResist', 'MCC_GetFastCompCap',
'MCC_GetFastCompTau', 'MCC_GetHolding', 'MCC_GetHoldingEnable', 'MCC_GetMode',
'MCC_GetNeutralizationCap', 'MCC_GetNeutralizationEnable',
'MCC_GetOscKillerEnable', 'MCC_GetPipetteOffset', 'MCC_GetPrimarySignalGain',
'MCC_GetPrimarySignalHPF', 'MCC_GetPrimarySignalLPF', 'MCC_GetRsCompBandwidth',
'MCC_GetRsCompCorrection', 'MCC_GetRsCompEnable', 'MCC_GetRsCompPrediction',
'MCC_GetSecondarySignalGain', 'MCC_GetSecondarySignalLPF', 'MCC_GetSlowCompCap',
'MCC_GetSlowCompTau', 'MCC_GetSlowCompTauX20Enable',
'MCC_GetSlowCurrentInjEnable', 'MCC_GetSlowCurrentInjLevel',
'MCC_GetSlowCurrentInjSetlTime', 'MCC_GetWholeCellCompCap',
'MCC_GetWholeCellCompEnable', 'MCC_GetWholeCellCompResist',
'MCC_SelectMultiClamp700B', 'MCC_SetBridgeBalEnable', 'MCC_SetBridgeBalResist',
'MCC_SetFastCompCap', 'MCC_SetFastCompTau', 'MCC_SetHolding',
'MCC_SetHoldingEnable', 'MCC_SetMode', 'MCC_SetNeutralizationCap',
'MCC_SetNeutralizationEnable', 'MCC_SetOscKillerEnable', 'MCC_SetPipetteOffset',
'MCC_SetPrimarySignalGain', 'MCC_SetPrimarySignalHPF', 'MCC_SetPrimarySignalLPF',
'MCC_SetRsCompBandwidth', 'MCC_SetRsCompCorrection', 'MCC_SetRsCompEnable',
'MCC_SetRsCompPrediction', 'MCC_SetSecondarySignalGain',
'MCC_SetSecondarySignalLPF', 'MCC_SetSlowCompCap', 'MCC_SetSlowCompTau',
'MCC_SetSlowCompTauX20Enable', 'MCC_SetSlowCurrentInjEnable',
'MCC_SetSlowCurrentInjLevel', 'MCC_SetSlowCurrentInjSetlTime', 'MCC_SetTimeoutMs',
'MCC_SetWholeCellCompCap', 'MCC_SetWholeCellCompEnable',
'MCC_SetWholeCellCompResist', 'mean', 'median', 'min', 'mod', 'ModDate',
'MPFXEMGPeak', 'MPFXExpConvExpPeak', 'MPFXGaussPeak', 'MPFXLorenzianPeak',
'MPFXVoigtPeak', 'NameOfWave', 'NaN', 'NewFreeDataFolder', 'NewFreeWave', 'norm',
'NormalizeUnicode', 'note', 'NumberByKey', 'numpnts', 'numtype',
'NumVarOrDefault', 'num2char', 'num2istr', 'num2str', 'NVAR_Exists',
'OperationList', 'PadString', 'PanelResolution', 'ParamIsDefault',
'ParseFilePath', 'PathList', 'pcsr', 'Pi', 'PICTInfo', 'PICTList',
'PixelFromAxisVal', 'pnt2x', 'poissonNoise', 'poly', 'PolygonArea', 'poly2D',
'PossiblyQuoteName', 'ProcedureText', 'p2rect', 'qcsr', 'real', 'RemoveByKey',
'RemoveEnding', 'RemoveFromList', 'RemoveListItem', 'ReplaceNumberByKey',
'ReplaceString', 'ReplaceStringByKey', 'rightx', 'round', 'r2polar', 'sawtooth',
'scaleToIndex', 'ScreenResolution', 'sec', 'sech', 'Secs2Date', 'Secs2Time',
'SelectNumber', 'SelectString', 'SetEnvironmentVariable', 'sign', 'sin', 'sinc',
'sinh', 'sinIntegral', 'SortList', 'SpecialCharacterInfo', 'SpecialCharacterList',
'SpecialDirPath', 'SphericalBessJ', 'SphericalBessJD', 'SphericalBessY',
'SphericalBessYD', 'SphericalHarmonics', 'SQLAllocHandle', 'SQLAllocStmt',
'SQLBinaryWavesToTextWave', 'SQLBindCol', 'SQLBindParameter', 'SQLBrowseConnect',
'SQLBulkOperations', 'SQLCancel', 'SQLCloseCursor', 'SQLColAttributeNum',
'SQLColAttributeStr', 'SQLColumnPrivileges', 'SQLColumns', 'SQLConnect',
'SQLDataSources', 'SQLDescribeCol', 'SQLDescribeParam', 'SQLDisconnect',
'SQLDriverConnect', 'SQLDrivers', 'SQLEndTran', 'SQLError', 'SQLExecDirect',
'SQLExecute', 'SQLFetch', 'SQLFetchScroll', 'SQLForeignKeys', 'SQLFreeConnect',
'SQLFreeEnv', 'SQLFreeHandle', 'SQLFreeStmt', 'SQLGetConnectAttrNum',
'SQLGetConnectAttrStr', 'SQLGetCursorName', 'SQLGetDataNum', 'SQLGetDataStr',
'SQLGetDescFieldNum', 'SQLGetDescFieldStr', 'SQLGetDescRec', 'SQLGetDiagFieldNum',
'SQLGetDiagFieldStr', 'SQLGetDiagRec', 'SQLGetEnvAttrNum', 'SQLGetEnvAttrStr',
'SQLGetFunctions', 'SQLGetInfoNum', 'SQLGetInfoStr', 'SQLGetStmtAttrNum',
'SQLGetStmtAttrStr', 'SQLGetTypeInfo', 'SQLMoreResults', 'SQLNativeSql',
'SQLNumParams', 'SQLNumResultCols', 'SQLNumResultRowsIfKnown',
'SQLNumRowsFetched', 'SQLParamData', 'SQLPrepare', 'SQLPrimaryKeys',
'SQLProcedureColumns', 'SQLProcedures', 'SQLPutData', 'SQLReinitialize',
'SQLRowCount', 'SQLSetConnectAttrNum', 'SQLSetConnectAttrStr', 'SQLSetCursorName',
'SQLSetDescFieldNum', 'SQLSetDescFieldStr', 'SQLSetDescRec', 'SQLSetEnvAttrNum',
'SQLSetEnvAttrStr', 'SQLSetPos', 'SQLSetStmtAttrNum', 'SQLSetStmtAttrStr',
'SQLSpecialColumns', 'SQLStatistics', 'SQLTablePrivileges', 'SQLTables',
'SQLTextWaveToBinaryWaves', 'SQLTextWaveTo2DBinaryWave', 'SQLUpdateBoundValues',
'SQLXOPCheckState', 'SQL2DBinaryWaveToTextWave', 'sqrt', 'StartMSTimer',
'StatsBetaCDF', 'StatsBetaPDF', 'StatsBinomialCDF', 'StatsBinomialPDF',
'StatsCauchyCDF', 'StatsCauchyPDF', 'StatsChiCDF', 'StatsChiPDF', 'StatsCMSSDCDF',
'StatsCorrelation', 'StatsDExpCDF', 'StatsDExpPDF', 'StatsErlangCDF',
'StatsErlangPDF', 'StatsErrorPDF', 'StatsEValueCDF', 'StatsEValuePDF',
'StatsExpCDF', 'StatsExpPDF', 'StatsFCDF', 'StatsFPDF', 'StatsFriedmanCDF',
'StatsGammaCDF', 'StatsGammaPDF', 'StatsGeometricCDF', 'StatsGeometricPDF',
'StatsGEVCDF', 'StatsGEVPDF', 'StatsHyperGCDF', 'StatsHyperGPDF',
'StatsInvBetaCDF', 'StatsInvBinomialCDF', 'StatsInvCauchyCDF', 'StatsInvChiCDF',
'StatsInvCMSSDCDF', 'StatsInvDExpCDF', 'StatsInvEValueCDF', 'StatsInvExpCDF',
'StatsInvFCDF', 'StatsInvFriedmanCDF', 'StatsInvGammaCDF', 'StatsInvGeometricCDF',
'StatsInvKuiperCDF', 'StatsInvLogisticCDF', 'StatsInvLogNormalCDF',
'StatsInvMaxwellCDF', 'StatsInvMooreCDF', 'StatsInvNBinomialCDF',
'StatsInvNCChiCDF', 'StatsInvNCFCDF', 'StatsInvNormalCDF', 'StatsInvParetoCDF',
'StatsInvPoissonCDF', 'StatsInvPowerCDF', 'StatsInvQCDF', 'StatsInvQpCDF',
'StatsInvRayleighCDF', 'StatsInvRectangularCDF', 'StatsInvSpearmanCDF',
'StatsInvStudentCDF', 'StatsInvTopDownCDF', 'StatsInvTriangularCDF',
'StatsInvUsquaredCDF', 'StatsInvVonMisesCDF', 'StatsInvWeibullCDF',
'StatsKuiperCDF', 'StatsLogisticCDF', 'StatsLogisticPDF', 'StatsLogNormalCDF',
'StatsLogNormalPDF', 'StatsMaxwellCDF', 'StatsMaxwellPDF', 'StatsMedian',
'StatsMooreCDF', 'StatsNBinomialCDF', 'StatsNBinomialPDF', 'StatsNCChiCDF',
'StatsNCChiPDF', 'StatsNCFCDF', 'StatsNCFPDF', 'StatsNCTCDF', 'StatsNCTPDF',
'StatsNormalCDF', 'StatsNormalPDF', 'StatsParetoCDF', 'StatsParetoPDF',
'StatsPermute', 'StatsPoissonCDF', 'StatsPoissonPDF', 'StatsPowerCDF',
'StatsPowerNoise', 'StatsPowerPDF', 'StatsQCDF', 'StatsQpCDF', 'StatsRayleighCDF',
'StatsRayleighPDF', 'StatsRectangularCDF', 'StatsRectangularPDF', 'StatsRunsCDF',
'StatsSpearmanRhoCDF', 'StatsStudentCDF', 'StatsStudentPDF', 'StatsTopDownCDF',
'StatsTriangularCDF', 'StatsTriangularPDF', 'StatsTrimmedMean',
'StatsUSquaredCDF', 'StatsVonMisesCDF', 'StatsVonMisesNoise', 'StatsVonMisesPDF',
'StatsWaldCDF', 'StatsWaldPDF', 'StatsWeibullCDF', 'StatsWeibullPDF',
'StopMSTimer', 'StringByKey', 'stringCRC', 'StringFromList', 'StringList',
'stringmatch', 'strlen', 'strsearch', 'StrVarOrDefault', 'str2num', 'StudentA',
'StudentT', 'sum', 'SVAR_Exists', 'TableInfo', 'TagVal', 'TagWaveRef', 'tan',
'tango_close_device', 'tango_command_inout', 'tango_compute_image_proj',
'tango_get_dev_attr_list', 'tango_get_dev_black_box', 'tango_get_dev_cmd_list',
'tango_get_dev_status', 'tango_get_dev_timeout', 'tango_get_error_stack',
'tango_open_device', 'tango_ping_device', 'tango_read_attribute',
'tango_read_attributes', 'tango_reload_dev_interface',
'tango_resume_attr_monitor', 'tango_set_attr_monitor_period',
'tango_set_dev_timeout', 'tango_start_attr_monitor', 'tango_stop_attr_monitor',
'tango_suspend_attr_monitor', 'tango_write_attribute', 'tango_write_attributes',
'tanh', 'TDMAddChannel', 'TDMAddGroup', 'TDMAppendDataValues',
'TDMAppendDataValuesTime', 'TDMChannelPropertyExists', 'TDMCloseChannel',
'TDMCloseFile', 'TDMCloseGroup', 'TDMCreateChannelProperty', 'TDMCreateFile',
'TDMCreateFileProperty', 'TDMCreateGroupProperty', 'TDMFilePropertyExists',
'TDMGetChannelPropertyNames', 'TDMGetChannelPropertyNum',
'TDMGetChannelPropertyStr', 'TDMGetChannelPropertyTime',
'TDMGetChannelPropertyType', 'TDMGetChannels', 'TDMGetChannelStringPropertyLen',
'TDMGetDataType', 'TDMGetDataValues', 'TDMGetDataValuesTime',
'TDMGetFilePropertyNames', 'TDMGetFilePropertyNum', 'TDMGetFilePropertyStr',
'TDMGetFilePropertyTime', 'TDMGetFilePropertyType', 'TDMGetFileStringPropertyLen',
'TDMGetGroupPropertyNames', 'TDMGetGroupPropertyNum', 'TDMGetGroupPropertyStr',
'TDMGetGroupPropertyTime', 'TDMGetGroupPropertyType', 'TDMGetGroups',
'TDMGetGroupStringPropertyLen', 'TDMGetLibraryErrorDescription',
'TDMGetNumChannelProperties', 'TDMGetNumChannels', 'TDMGetNumDataValues',
'TDMGetNumFileProperties', 'TDMGetNumGroupProperties', 'TDMGetNumGroups',
'TDMGroupPropertyExists', 'TDMOpenFile', 'TDMOpenFileEx', 'TDMRemoveChannel',
'TDMRemoveGroup', 'TDMReplaceDataValues', 'TDMReplaceDataValuesTime',
'TDMSaveFile', 'TDMSetChannelPropertyNum', 'TDMSetChannelPropertyStr',
'TDMSetChannelPropertyTime', 'TDMSetDataValues', 'TDMSetDataValuesTime',
'TDMSetFilePropertyNum', 'TDMSetFilePropertyStr', 'TDMSetFilePropertyTime',
'TDMSetGroupPropertyNum', 'TDMSetGroupPropertyStr', 'TDMSetGroupPropertyTime',
'TextEncodingCode', 'TextEncodingName', 'TextFile', 'ThreadGroupCreate',
'ThreadGroupGetDF', 'ThreadGroupGetDFR', 'ThreadGroupRelease', 'ThreadGroupWait',
'ThreadProcessorCount', 'ThreadReturnValue', 'ticks', 'time', 'TraceFromPixel',
'TraceInfo', 'TraceNameList', 'TraceNameToWaveRef', 'TrimString', 'trunc',
'UniqueName', 'UnPadString', 'UnsetEnvironmentVariable', 'UpperStr', 'URLDecode',
'URLEncode', 'VariableList', 'Variance', 'vcsr', 'viAssertIntrSignal',
'viAssertTrigger', 'viAssertUtilSignal', 'viClear', 'viClose', 'viDisableEvent',
'viDiscardEvents', 'viEnableEvent', 'viFindNext', 'viFindRsrc', 'viGetAttribute',
'viGetAttributeString', 'viGpibCommand', 'viGpibControlATN', 'viGpibControlREN',
'viGpibPassControl', 'viGpibSendIFC', 'viIn8', 'viIn16', 'viIn32', 'viLock',
'viMapAddress', 'viMapTrigger', 'viMemAlloc', 'viMemFree', 'viMoveIn8',
'viMoveIn16', 'viMoveIn32', 'viMoveOut8', 'viMoveOut16', 'viMoveOut32', 'viOpen',
'viOpenDefaultRM', 'viOut8', 'viOut16', 'viOut32', 'viPeek8', 'viPeek16',
'viPeek32', 'viPoke8', 'viPoke16', 'viPoke32', 'viRead', 'viReadSTB',
'viSetAttribute', 'viSetAttributeString', 'viStatusDesc', 'viTerminate',
'viUnlock', 'viUnmapAddress', 'viUnmapTrigger', 'viUsbControlIn',
'viUsbControlOut', 'viVxiCommandQuery', 'viWaitOnEvent', 'viWrite', 'VoigtFunc',
'VoigtPeak', 'WaveCRC', 'WaveDims', 'WaveExists', 'WaveHash', 'WaveInfo',
'WaveList', 'WaveMax', 'WaveMin', 'WaveName', 'WaveRefIndexed',
'WaveRefIndexedDFR', 'WaveRefsEqual', 'WaveRefWaveToList', 'WaveTextEncoding',
'WaveType', 'WaveUnits', 'WhichListItem', 'WinList', 'WinName', 'WinRecreation',
'WinType', 'wnoise', 'xcsr', 'XWaveName', 'XWaveRefFromTrace', 'x2pnt', 'zcsr',
'ZernikeR', 'zeromq_client_connect', 'zeromq_client_recv',
'zeromq_client_send', 'zeromq_handler_start', 'zeromq_handler_stop',
'zeromq_server_bind', 'zeromq_server_recv', 'zeromq_server_send', 'zeromq_set',
'zeromq_stop', 'zeromq_test_callfunction', 'zeromq_test_serializeWave', 'zeta'
)
tokens = {
'root': [
(r'//.*$', Comment.Single),
(r'"([^"\\]|\\.)*"', String),
# Flow Control.
(words(flowControl, prefix=r'\b', suffix=r'\b'), Keyword),
# Types.
(words(types, prefix=r'\b', suffix=r'\b'), Keyword.Type),
# Keywords.
(words(keywords, prefix=r'\b', suffix=r'\b'), Keyword.Reserved),
# Built-in operations.
(words(operations, prefix=r'\b', suffix=r'\b'), Name.Class),
# Built-in functions.
(words(functions, prefix=r'\b', suffix=r'\b'), Name.Function),
# Compiler directives.
(r'^#(include|pragma|define|undef|ifdef|ifndef|if|elif|else|endif)',
Name.Decorator),
(r'[^a-z"/]+$', Text),
(r'.', Text),
],
}
|
|
#
# Copyright John Reid 2008, 2009
#
"""
Code to analyse sequences in preparation for the HDPM.
"""
from shared import *
import biopsy.analyse_remos.consolidate_hits as CH
import biopsy.analyse_remos.expectations as EX
from boost.graph import Graph
class LabelledGraph(Graph):
"""
A boost.graph.Graph that indexes vertices by strings.
"""
def __init__(self, label_property_name='label', label_property_type='string'):
Graph.__init__(self)
self.labels = Graph.add_vertex_property(self, name=label_property_name, type=label_property_type)
self.vertex_map = {}
def add_labelled_vertex(self, label):
if label in self.vertices:
raise RuntimeError('Vertex for "%s" already in graph' % label)
v = self.add_vertex()
self.labels[v] = label
self.vertex_map[label] = v
return v
def get_vertex(self, label):
if label in self.vertex_map:
return self.vertex_map[label]
else:
return self.add_labelled_vertex(label)
def find_all_aligned_sequences_in_refs(remome, refs):
"Return a set of all aligned sequence sets that have an id in ref"
aligned_sequences = remome.get_aligned_sequences()
result = set()
for aligned_seq in aligned_sequences:
for seq_id in aligned_seq.get_sequence_ids():
gene_id = seq_id.gene_id
ref = biopsy.DbRef(biopsy.db.ensembl, gene_id.prefix, gene_id.num)
#import IPython; IPython.Debugger.Pdb().set_trace()
if ref in refs:
result.add(aligned_seq)
break
logging.info('Matched %d aligned sequence sets to genes', len(result))
return result
def get_remo_sequences(remo, centre_id, masked=True):
"""
@return: a sequence of sequences for the remo. The first sequence is the centre sequence
"""
sequences = biopsy.SequenceVec()
sequences.append(remo.get_sequence_for(centre_id, masked))
for seq_id in remo.get_sequence_ids():
if seq_id != centre_id:
yield remo.get_sequence_for(seq_id, masked)
def get_sequences_for_sequence_sets(remome, aligned_sequences, masked=True):
sequence_dict = {}
logging.info('Using %s sequences', masked and 'masked' or 'unmasked')
for aligned_seq in aligned_sequences:
remos = remome.get_remos_for(aligned_seq)
gene_id = aligned_seq.centre_sequence.gene_id
gene = biopsy.DbRef(biopsy.db.ensembl, gene_id.prefix, gene_id.num)
if gene not in sequence_dict:
sequence_dict[gene] = list()
for remo in remos:
sequences = biopsy.SequenceVec()
sequences.extend(get_remo_sequences(remo, aligned_seq.centre_sequence, masked=masked))
sequence_dict[gene].append(sequences)
return sequence_dict
@output_cached_method('pssm-ensembl-map')
def get_pssm_to_ensembl_map():
"""
@return: A map from PSSMs to sets of ENSMUSGs.
"""
import biopsy.transfac as T
pssm_filter = T.PssmFilter()
pssm_map = cookbook.DictOfSets()
for acc in biopsy.get_transfac_pssm_accessions(pssm_filter):
for f in biopsy.get_factors_for_pssm(acc):
f = T.Factor(f)
if f.gene:
for ref in f.gene.entry.db_refs:
if ref.db == biopsy.db.ensembl and ref.table == 'ENSMUSG':
pssm_map[acc].add(str(ref))
logging.info('Found %d PSSMs that map to Ensembl mouse genes', len(pssm_map))
return pssm_map
@global_cached_method('human-mouse-orthologs')
def get_human_mouse_orthologs():
from biopsy.identifiers.biomart import quick_query
logging.info('Getting human mouse orthologs from Ensembl')
result = dict(quick_query(dataset='hsapiens_gene_ensembl', attributes=['ensembl_gene_id', 'mouse_ensembl_gene']))
logging.info('Mapped %d human genes to mouse', len(result))
return result
@global_cached_method('rat-mouse-orthologs')
def get_rat_mouse_orthologs():
from biopsy.identifiers.biomart import quick_query
logging.info('Getting rat mouse orthologs from Ensembl')
result = dict(quick_query(dataset='rnorvegicus_gene_ensembl', attributes=['ensembl_gene_id', 'mouse_ensembl_gene']))
logging.info('Mapped %d rat genes to mouse', len(result))
return result
@output_cached_method('pssm-ensembl-map-min-range')
def get_pssm_to_ensembl_map_min_range():
"""
@return: A map from PSSMs to single ENSMUSGs.
Uses a greedy heuristic to minimise the range of the map.
"""
import biopsy.transfac as T
human_mouse_orthologs = get_human_mouse_orthologs()
rat_mouse_orthologs = get_rat_mouse_orthologs()
# Build a graph containing possible mappings from PSSMs to ENSMUSGs
graph = LabelledGraph()
pssms = set()
genes = set()
pssm_filter = T.PssmFilter()
def add_mapping(acc, ref):
ref = str(ref)
pssms.add(acc)
genes.add(ref)
graph.add_edge(graph.get_vertex(acc), graph.get_vertex(ref))
def deal_with_pssm(acc, use_orthologs=False):
for f in imap(T.Factor, biopsy.get_factors_for_pssm(acc)):
if f.gene:
for ref in f.gene.entry.db_refs:
if ref.db == biopsy.db.ensembl:
if ref.table == 'ENSMUSG':
add_mapping(acc, ref)
elif use_orthologs:
if ref.table == 'ENSRNOG':
ortholog = rat_mouse_orthologs[str(ref)]
if ortholog:
add_mapping(acc, ortholog)
elif ref.table == 'ENSG':
ortholog = human_mouse_orthologs[str(ref)]
if ortholog:
add_mapping(acc, ortholog)
for acc in biopsy.get_transfac_pssm_accessions(pssm_filter):
deal_with_pssm(acc, use_orthologs=False)
# if we didn't map the PSSM directly onto a mouse gene trying going through rat/human orthologs
if acc not in pssms:
deal_with_pssm(acc, use_orthologs=True)
logging.info('%d PSSMs map onto to %d Ensembl mouse genes', len(pssms), len(genes))
# Use greedy heuristic to build Many-to-1 mapping
sorted_by_degree = [(graph.in_degree(graph.get_vertex(g)), g) for g in genes]
sorted_by_degree.sort(reverse=True)
pssm_map = dict()
for degree, g in sorted_by_degree:
for pssm_v in graph.adjacent_vertices(graph.vertex_map[g]):
pssm = graph.labels[pssm_v]
if pssm not in pssm_map: # if we don't already have a map for this pssm, then use this mapping
pssm_map[pssm] = g
del pssm_map['M00158']
# Calculate how many Ensembl genes are in range now
logging.info('%d PSSMs map onto to %d Ensembl mouse genes (minimum range)', len(pssms), len(set(pssm_map.values())))
return pssm_map
@cookbook.cache_decorator.cachedmethod
def get_remome(threshold=100):
"Get the remome of the given threshold."
remome_file = os.path.join(biopsy.get_data_dir(), 'ReMos', '%d' % threshold, '%d.filtered' % threshold)
logging.info('Loading remome %d from %s', threshold, remome_file)
remome = biopsy.Remome.load(remome_file)
logging.info('Have %d aligned sequence sets in remome', len(remome.get_aligned_sequences()))
return remome
def get_aligned_sequences(remome_threshold=100):
return set(a for a in get_remome(remome_threshold).get_aligned_sequences())
@output_cached_method('sequence-dict')
def get_sequence_dict(remome_threshold=100, masked=True):
aligned_sequences = get_aligned_sequences(remome_threshold)
return get_sequences_for_sequence_sets(get_remome(remome_threshold), aligned_sequences, masked)
@output_cached_method('remome-analysis')
def analyse_remome(remome_threshold=100, masked=True, threshold=0.01):
return analyse_remome_sequences(get_sequence_dict(remome_threshold, masked), threshold=threshold)
def analyse_remome_sequences(sequence_dict, threshold=0.01):
"""
Analyse the sequences.
@return: A dict mapping gene ids to sequences of (hits, max_chain) tuples.
"""
# get the pssm accessions we will use
pssm_accs = biopsy.SequenceVec()
pssm_accs.extend(get_pssm_to_ensembl_map_min_range().keys())
result = cookbook.DictOfLists()
for gene, sequences in sequence_dict.iteritems():
logging.info('Analysing %d remos for %s', len(sequences), gene)
hit_counts = None
for seqs in sequences:
#import IPython; IPython.Debugger.Pdb().set_trace()
hits, max_chain, unadjusted_hits = biopsy.score_pssms_on_phylo_sequences(
pssm_accs,
seqs,
threshold = threshold,
phylo_threshold = threshold
)
result[gene].append((hits, max_chain))
return result
def consolidate_gene_analysis(gene_analysis):
"""
Take the analysis for one gene and consolidate its hits. The analysis should be an iterable over
(hits, max_chain) tuples. The hits are filtered by the hit_threshold. The maximal chain is left
as is.
"""
hit_counts = None
max_chain_counts = None
pssm_map = get_pssm_to_ensembl_map_min_range()
for hits, max_chain in gene_analysis:
factor_mapped_hits = map_binders(hits, pssm_map)
#consolidated_hits = CH.consolidate_hits(factor_mapped_hits)
consolidated_hits = CH.maximal_chain_hits(factor_mapped_hits)
if consolidated_hits:
hit_counts = EX.num_hits_per_binder(consolidated_hits, hit_counts)
if None != max_chain:
factor_mapped_hits = map_binders(max_chain, pssm_map)
#consolidated_hits = CH.consolidate_hits(factor_mapped_hits)
consolidated_hits = CH.maximal_chain_hits(factor_mapped_hits)
if consolidated_hits:
max_chain_counts = EX.num_hits_per_binder(consolidated_hits, max_chain_counts)
return hit_counts, max_chain_counts
@output_cached_method('consolidated-hits')
def consolidate_remome_analysis(
remome_threshold=100,
masked=True,
use_max_chain=False,
analysis_threshold=.01
):
"""
Consolidates analysis for entire remome.
"""
remome_analysis = analyse_remome(remome_threshold, masked, analysis_threshold)
return dict(
(gene, consolidate_gene_analysis(gene_analysis))
for gene, gene_analysis in remome_analysis.iteritems()
)
@output_cached_method('hit-counts')
def get_hit_counts_for_remome_analysis(
remome_threshold=100,
masked=True,
use_max_chain=False,
analysis_threshold=.01,
):
"""
Get the consolidated hit counts for the remome analysis.
@arg remome_threshold: The threshold used to build the remome.
@arg masked: Were the sequences masked before BiFa analysis?
@arg use_max_chain: Use the counts from the maximal chain?
@arg hit_threshold: If not using the maximal chain filter the hits by this threshold.
"""
consolidated_analysis = consolidate_remome_analysis(
remome_threshold,
masked,
use_max_chain,
analysis_threshold
)
if use_max_chain:
return dict((gene, counts[1]) for gene, counts in consolidated_analysis.iteritems() if counts[1])
else:
return dict((gene, counts[0]) for gene, counts in consolidated_analysis.iteritems() if counts[0])
class AnalyseSequence(object):
def __init__(self, pssms, threshold=.01):
logging.info('Creating sequence analyser with threshold of %f for %d pssms', threshold, len(pssms))
self.pssms = biopsy.SequenceVec()
self.pssms.extend(pssms)
self.threshold = threshold
def __call__(self, sequence):
import biopsy
import biopsy.analyse_remos.consolidate_hits as CH
hits = biopsy.score_pssms_on_sequence(self.pssms, sequence.strip('Nn'), self.threshold)
return CH.maximal_chain_hits(hits)
def get_sequence_analyser():
pssm_to_ensembl_map = get_pssm_to_ensembl_map_min_range()
return AnalyseSequence(pssm_to_ensembl_map.keys(), threshold=options.analysis_threshold)
def get_analysis_union(analysis):
"Takes analysis and returns one sequence containing all the hits."
result = list()
for gene, hit_seqs in analysis.iteritems():
for hits in hit_seqs:
result.extend(hits)
return result
def analyse_union(analysis):
"Take an analysis and return various statistics."
def p_binding(hit):
return hit.p_binding
union = get_analysis_union(analysis)
union.sort(key=p_binding, reverse=True)
# find the highest scoring hit of each binder
already_found = set()
best_scores = list()
for hit in union:
if hit.binder not in already_found:
best_scores.append((hit.binder, hit.p_binding))
already_found.add(hit.binder)
return union, best_scores
|
|
import pytest
from yelp_beans.logic.user import add_preferences
from yelp_beans.logic.user import create_new_employees_from_list
from yelp_beans.logic.user import hash_employee_data
from yelp_beans.logic.user import is_valid_user_subscription_preference
from yelp_beans.logic.user import mark_termed_employees
from yelp_beans.logic.user import remove_preferences
from yelp_beans.logic.user import sync_employees
from yelp_beans.logic.user import update_current_employees
from yelp_beans.logic.user import user_preference
from yelp_beans.logic.user import validate_employee_data
from yelp_beans.models import MeetingSpec
from yelp_beans.models import Rule
from yelp_beans.models import User
from yelp_beans.models import UserSubscriptionPreferences
def test_sync_creates_new_employee(database, data_source):
"""
Data source contains a user that we don't have in our database,
we must create a new user in our database.
"""
sync_employees(data_source)
user = User.query.filter(User.email == 'samsmith@yelp.com').one()
assert user.first_name == 'Sam'
def test_sync_marks_employee_as_terminated(database, data_source):
"""
Data source contains a list of active users for beans. If the user
is not present in the data source, but present in our database we
mark the employee as terminated
"""
sync_employees(data_source)
user = User.query.filter(User.email == 'samsmith@yelp.com').one()
assert not user.terminated
sync_employees({})
user = User.query.filter(User.email == 'samsmith@yelp.com').one()
assert user.terminated
def test_sync_updates_current_employee(database, data_source):
"""
Data source contains a user that we are already tracking in our database,
we must update the user in the database to reflect new information.
Returns
"""
sync_employees(data_source)
user = User.query.filter(User.email == 'samsmith@yelp.com').one()
assert user.first_name == 'Sam'
assert user.meta_data['department'] == 'Engineering'
data = data_source[0]
data['first_name'] = 'John'
data['department'] = 'Design'
data['metadata']['department'] = 'Design'
sync_employees(data_source)
user = User.query.filter(User.email == 'samsmith@yelp.com').one()
assert user.first_name == 'John'
assert user.meta_data['department'] == 'Design'
def test_hash_employee_data(data_source, data_source_by_key):
"""
Given a json object, return a dictionary by email of users.
"""
employees = hash_employee_data(data_source)
assert len(list(employees.keys())) == 2
assert set(employees.keys()) == {'samsmith@yelp.com', 'derrickjohnson@yelp.com'}
assert employees['samsmith@yelp.com']['last_name'] == 'Smith'
def test_hash_employee_data_errors_with_no_email(data_source, data_source_by_key):
data = data_source
del data[0]['email']
with pytest.raises(KeyError):
hash_employee_data(data)
def test_validate_employee_data(data_source):
data = data_source
validate_employee_data(data)
# raises without email
with pytest.raises(KeyError):
data = data_source
del data[0]['email']
validate_employee_data(data)
# raises without first name
with pytest.raises(KeyError):
data = data_source
del data[0]['first_name']
validate_employee_data(data)
# raises without last name
with pytest.raises(KeyError):
data = data_source
del data[0]['last_name']
validate_employee_data(data)
def test_mark_terminated_employees(database, fake_user):
mark_termed_employees([fake_user])
user = User.query.one()
assert user.terminated
def test_create_new_employees_from_list(session, data_source):
create_new_employees_from_list(data_source)
user = User.query.filter(User.email == 'samsmith@yelp.com').one()
assert user.email == 'samsmith@yelp.com'
assert user.meta_data == {
'department': 'Engineering',
'title': 'Engineer',
'floor': '10',
'desk': '100',
'manager': 'Bo Demillo'
}
def test_update_current_employees(session, data_source):
create_new_employees_from_list(data_source)
user = User.query.filter(User.email == 'samsmith@yelp.com').one()
assert user.photo_url == 'www.cdn.com/SamSmith.png'
assert user.meta_data == {
'department': 'Engineering',
'title': 'Engineer',
'floor': '10',
'desk': '100',
'manager': 'Bo Demillo'
}
local_data_employee = {user.email: user for user in User.query.all()}
remote_data_employee = hash_employee_data(data_source)
remote_data_employee['samsmith@yelp.com']['photo_url'] = 'new'
remote_data_employee['samsmith@yelp.com']['department'] = 'Sales'
remote_data_employee['samsmith@yelp.com']['metadata']['department'] = 'Sales'
update_current_employees(local_data_employee, remote_data_employee)
user = User.query.filter(User.email == 'samsmith@yelp.com').one()
assert user.photo_url == 'new'
assert user.meta_data['department'] == 'Sales'
assert user.meta_data == {
'department': 'Sales',
'title': 'Engineer',
'floor': '10',
'desk': '100',
'manager': 'Bo Demillo'
}
def test_user_preference(session, subscription):
preference = subscription.datetime[0]
user_pref = UserSubscriptionPreferences(
preference=preference,
subscription=subscription,
)
session.add(user_pref)
user = User(email='a@yelp.com', meta_data={'department': 'dept'}, subscription_preferences=[user_pref])
session.add(user)
meeting_spec = MeetingSpec(meeting_subscription=subscription, datetime=subscription.datetime[0].datetime)
session.add(meeting_spec)
session.commit()
assert user_pref == user_preference(user, meeting_spec)
def test_remove_preferences_removes_on_opt_out(session, subscription):
preference = subscription.datetime[0]
user_pref = UserSubscriptionPreferences(
preference=preference,
subscription=subscription,
)
session.add(user_pref)
user = User(email='a@yelp.com', meta_data={'department': 'dept'}, subscription_preferences=[user_pref])
session.add(user)
session.commit()
assert user.subscription_preferences == [user_pref]
updated_preferences = {preference.id: False}
removed = remove_preferences(user, updated_preferences, subscription.id)
assert removed == {user_pref.preference_id}
user = User.query.filter(User.id == user.id).one()
assert user.subscription_preferences == []
assert UserSubscriptionPreferences.query.all() == []
def test_remove_preferences_does_not_remove_on_opt_in(session, subscription):
preference = subscription.datetime[0]
user_pref = UserSubscriptionPreferences(
preference=preference,
subscription=subscription,
)
session.add(user_pref)
user = User(email='a@yelp.com', meta_data={'department': 'dept'}, subscription_preferences=[user_pref])
session.add(user)
session.commit()
assert user.subscription_preferences == [user_pref]
updated_preferences = {preference.id: True}
removed = remove_preferences(user, updated_preferences, subscription)
assert removed == set()
user = User.query.filter(User.id == user.id).one()
assert user.subscription_preferences == [user_pref]
assert len(UserSubscriptionPreferences.query.all()) == 1
def test_remove_preferences_multiple_remove_on_opt_in(session, subscription):
preference_1 = subscription.datetime[0]
preference_2 = subscription.datetime[1]
user_pref_1 = UserSubscriptionPreferences(
preference=preference_1,
subscription=subscription,
)
user_pref_2 = UserSubscriptionPreferences(
preference=preference_2,
subscription=subscription,
)
session.add(user_pref_1)
session.add(user_pref_2)
user = User(
email='a@yelp.com',
meta_data={'department': 'dept'},
subscription_preferences=[user_pref_1, user_pref_2]
)
session.add(user)
session.commit()
assert user.subscription_preferences == [user_pref_1, user_pref_2]
updated_preferences = {preference_1.id: False, preference_2.id: False}
removed = remove_preferences(user, updated_preferences, subscription.id)
assert removed == {user_pref_1.preference_id, user_pref_2.preference_id}
user = user = User.query.filter(User.id == user.id).one()
assert user.subscription_preferences == []
assert UserSubscriptionPreferences.query.all() == []
def test_add_preferences_adds_on_opt_in(session, subscription):
preference = subscription.datetime[0]
user = User(email='a@yelp.com', meta_data={'department': 'dept'})
session.add(user)
session.commit()
updated_preferences = {preference.id: True}
assert len(user.subscription_preferences) == 0
added = add_preferences(user, updated_preferences, subscription.id)
assert added.pop() == preference.id
user = User.query.filter(User.id == user.id).one()
assert user.subscription_preferences[0].preference == preference
def test_add_preferences_adds_multiple_on_opt_in(session, subscription):
preference_1 = subscription.datetime[0]
preference_2 = subscription.datetime[1]
user = User(email='a@yelp.com', meta_data={'department': 'dept'})
session.add(user)
session.commit()
updated_preferences = {preference_1.id: True, preference_2.id: True}
assert len(user.subscription_preferences) == 0
added = add_preferences(user, updated_preferences, subscription.id)
assert preference_1.id in added
assert preference_2.id in added
assert len(user.subscription_preferences) == 2
user = User.query.filter(User.id == user.id).one()
assert user.subscription_preferences[0].preference in (preference_1, preference_2)
assert user.subscription_preferences[1].preference in (preference_1, preference_2)
def test_is_valid_user_subscription_preference_no_subscription(subscription):
preference = subscription.datetime[0]
user = User(email='a@yelp.com', meta_data={'department': 'dept'})
user_sub = UserSubscriptionPreferences(preference=preference, subscription_id=None, user=user)
subscription.user_rules = [Rule(name='department', value='dept')]
subscription.rule_logic = 'all'
result = is_valid_user_subscription_preference(user_sub, None)
assert not result
def test_is_valid_user_subscription_preference_user_terminated(subscription):
preference = subscription.datetime[0]
user = User(email='a@yelp.com', meta_data={'department': 'dept'}, terminated=True)
user_sub = UserSubscriptionPreferences(preference=preference, subscription_id=subscription.id, user=user)
subscription.user_rules = [Rule(name='department', value='dept')]
subscription.rule_logic = 'all'
result = is_valid_user_subscription_preference(user_sub, subscription)
assert not result
def test_is_valid_user_subscription_preference_fails_rules(subscription):
preference = subscription.datetime[0]
user = User(email='a@yelp.com', meta_data={'department': 'dept'})
user_sub = UserSubscriptionPreferences(preference=preference, subscription_id=subscription.id, user=user)
subscription.user_rules = [Rule(name='department', value='other dept')]
subscription.rule_logic = 'all'
result = is_valid_user_subscription_preference(user_sub, subscription)
assert not result
def test_is_valid_user_subscription_preference_valid(subscription):
preference = subscription.datetime[0]
user = User(email='a@yelp.com', meta_data={'department': 'dept'})
user_sub = UserSubscriptionPreferences(preference=preference, subscription_id=subscription.id, user=user)
subscription.user_rules = [Rule(name='department', value='dept')]
subscription.rule_logic = 'all'
result = is_valid_user_subscription_preference(user_sub, subscription)
assert result
|
|
# vim: set expandtab ts=4 sw=4 filetype=python fileencoding=utf8:
import logging
import textwrap
import psycopg2.extras
log = logging.getLogger(__name__)
fighters_and_moves = dict({
"Liu Kang": [
("Fire blast", 20),
("Flying kick", 30),
],
'Scorpion': [
("Harpoon", 30),
("Teleport punch", 30),
],
'Sub Zero': [
("Ice ball", 10),
("Foot slide", 20),
]
})
def create_tables(pgconn):
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
create table fighters
(
fighter_id serial primary key,
title text not null unique
)
"""))
cursor.execute(textwrap.dedent("""
create table special_moves
(
special_move_id serial primary key,
fighter_id integer references fighters (fighter_id),
title text not null,
unique(fighter_id, title),
damage integer not null
)
"""))
def insert_data(pgconn):
global fighters_and_moves
cursor = pgconn.cursor()
for fighter, special_moves in fighters_and_moves.items():
# I discovered the "returning" keyword a few years ago and it
# has vastly simplified a lot of my database code.
cursor.execute(textwrap.dedent("""
insert into fighters
(title)
values
(%(fighter)s)
returning fighter_id
"""), {
'fighter': fighter,
})
fighter_id = cursor.fetchone().fighter_id
log.info("Stored fighter {0} with fighter ID {1}.".format(
fighter, fighter_id))
for title, damage in special_moves:
cursor.execute(textwrap.dedent("""
insert into special_moves
(fighter_id, title, damage)
values
(
%(fighter_id)s, %(title)s, %(damage)s
)
returning special_move_id
"""), {
'fighter_id': fighter_id,
'title': title,
'damage': damage
})
special_move_id = cursor.fetchone().special_move_id
log.info(
"Stored special move {0} with special move "
"ID {1}.".format(
title, special_move_id))
def do_ugly_report(pgconn):
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
select f.title, sm.title as move_name, sm.damage
from fighters f
join special_moves sm
on f.fighter_id = sm.fighter_id
order by f.title, sm.title
"""))
print("+" * 60)
print("UGLY REPORT")
print("+" * 60)
print("")
print("{0:20} {1:20} {2:20}".format(
"Fighter",
"Move",
"Damage"))
print("-"*20 + " " + "-"*20 + " " + "-"*18 + " ")
for row in cursor:
# the < symbol means "left-justify", which seems to be the
# default for strings, but not for integers.
print("{0:20} {1:20} {2:<20}".format(*row))
print("")
def do_pretty_report_with_array_agg_1(pgconn):
"""
Use array_agg to roll up the just the titles of the special moves.
The titles will come back as a list of strings.
"""
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
select f.title,
array_agg(sm.title order by sm.title) as special_moves
from fighters f
join special_moves sm
on f.fighter_id = sm.fighter_id
group by f.fighter_id
order by f.title
"""))
print("+" * 60)
print("PRETTY REPORT 1")
print("+" * 60)
print("")
for row in cursor:
print("{0}".format(row.title))
log.debug("In array_agg_1, type(row.special_moves) is {0}".format(
type(row.special_moves)))
for move in row.special_moves:
print(" {0}".format(move))
print("")
def do_pretty_report_with_array_agg_2(pgconn):
"""
Try to use array_agg to roll up more than one columns from the
special moves table.
But beware! This doesn't work right! Because psycopg2 has no
understanding of how to convert the thing made with the row(...)
function, it just returns it as a string.
"""
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
select f.title,
array_agg(row(sm.title, sm.damage) order by sm.title) as special_moves
from fighters f
join special_moves sm
on f.fighter_id = sm.fighter_id
group by f.fighter_id
order by f.title
"""))
print("+" * 60)
print("PRETTY REPORT 2")
print("+" * 60)
print("")
for row in cursor:
print("{0}".format(row.title))
log.debug("In array_agg_2, type(row.special_moves) is {0}".format(
type(row.special_moves)))
print(" {0}".format(row.special_moves))
print("")
def register_type(pgconn):
# Tell psycopg2 that when it gets an instance of a special moves
# type, it should make a named tuple instance for the special moves
# table.
psycopg2.extras.register_composite('special_moves', pgconn)
def do_pretty_report_with_array_agg_3(pgconn):
"""
Note this weird syntax in the SQL::
row(
sm.special_move_id,
sm.fighter_id,
sm.title,
sm.damage)::special_moves
That :: operator casts whatever is on the left to a different type.
Every table in postgresql is also a user-defined type.
In this case, I'm saying "treat these four values in this row as a
special_moves type instance.
Here are much simpler / less scary examples::
>>> cursor.execute("select '2014-08-01' as x")
>>> type(cursor.fetchone().x)
<type 'str'>
>>> cursor.execute("select '2014-08-01'::date as x")
>>> type(cursor.fetchone().x)
<type 'datetime.date'>
"""
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
select f.title,
array_agg(row(
sm.special_move_id,
sm.fighter_id,
sm.title,
sm.damage)::special_moves) as special_moves
from fighters f
join special_moves sm
on f.fighter_id = sm.fighter_id
group by f.fighter_id
order by f.title
"""))
print("+" * 60)
print("PRETTY REPORT 3")
print("+" * 60)
print("")
for row in cursor:
print("{0}".format(row.title))
log.debug("In array_agg_3, type(row.special_moves) is {0}".format(
type(row.special_moves)))
for sm in row.special_moves:
log.debug("type(sm) is {0}.".format(type(sm)))
print(" {0:20} {1:<20}".format(sm.title, sm.damage))
print("")
class Fighter(object):
def __init__(self, fighter_id, title):
self.fighter_id = fighter_id
self.title = title
def __str__(self):
return "{0}: {1} (fighter ID: {2})".format(
self.__class__.__name__,
self.title,
self.fighter_id)
class FighterFactory(psycopg2.extras.CompositeCaster):
def make(self, values):
d = dict(zip(self.attnames, values))
return Fighter(**d)
class SpecialMove(object):
def __init__(self, special_move_id, fighter_id, title, damage):
self.special_move_id = special_move_id
self.fighter_id = fighter_id
self.title = title
self.damage = damage
def __str__(self):
return "{0}: {1} (special move ID: {2})".format(
self.__class__.__name__,
self.title,
self.special_move_id)
class SpecialMoveFactory(psycopg2.extras.CompositeCaster):
def make(self, values):
d = dict(zip(self.attnames, values))
return SpecialMove(**d)
def register_home_made_types(pgconn):
# Register our own home-made FighterFactory to instantiate instances
# of our own Fighter class.
psycopg2.extras.register_composite(
'fighters',
pgconn,
factory=FighterFactory)
# Same thing, but different tables.
psycopg2.extras.register_composite(
'special_moves',
pgconn,
factory=SpecialMoveFactory)
def cast_to_our_own_classes(pgconn):
"""
In this one, I use this syntax::
(f.*)::fighters
instead of something like::
row(f.fighter_id,
f.title)::fighters
They do the exact same thing.
"""
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
select (f.*)::fighters as fighter
from fighters f
order by f.title
"""))
log.debug("cast to our own Fighter class")
for row in cursor:
log.debug(row.fighter)
def do_pretty_report_with_array_agg_4(pgconn):
"""
Get a single row back for each fighter, with two columns in each
row.
The first column should hold an instance of our homemade Fighter
class.
The second column should hold an array of instances of our
customized
SpecialMove class.
"""
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
select (f.*)::fighters as fighter,
array_agg((sm.*)::special_moves order by sm.title) as special_moves
from fighters f
join special_moves sm
on f.fighter_id = sm.fighter_id
group by f.fighter_id
order by f.title
"""))
print("+" * 60)
print("PRETTY REPORT 4")
print("+" * 60)
print("")
for row in cursor:
log.debug("type(row.fighter): {0}".format(type(row.fighter)))
# This uses the Fighter.__str__ method
print(row.fighter)
log.debug("type(row.special_moves): {0}".format(type(row.special_moves)))
for sm in row.special_moves:
log.debug("type(sm): {0}".format(type(sm)))
print(sm)
|
|
from __future__ import absolute_import
import os
import shlex
import signal
import subprocess
import time
from collections import namedtuple
from operator import attrgetter
from textwrap import dedent
from docker import errors
from .. import mock
from compose.cli.command import get_project
from compose.cli.docker_client import docker_client
from compose.container import Container
from tests.integration.testcases import DockerClientTestCase
from tests.integration.testcases import pull_busybox
ProcessResult = namedtuple('ProcessResult', 'stdout stderr')
BUILD_CACHE_TEXT = 'Using cache'
BUILD_PULL_TEXT = 'Status: Image is up to date for busybox:latest'
def start_process(base_dir, options):
proc = subprocess.Popen(
['docker-compose'] + options,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=base_dir)
print("Running process: %s" % proc.pid)
return proc
def wait_on_process(proc, returncode=0):
stdout, stderr = proc.communicate()
if proc.returncode != returncode:
print(stderr.decode('utf-8'))
assert proc.returncode == returncode
return ProcessResult(stdout.decode('utf-8'), stderr.decode('utf-8'))
def wait_on_condition(condition, delay=0.1, timeout=20):
start_time = time.time()
while not condition():
if time.time() - start_time > timeout:
raise AssertionError("Timeout: %s" % condition)
time.sleep(delay)
def kill_service(service):
for container in service.containers():
container.kill()
class ContainerCountCondition(object):
def __init__(self, project, expected):
self.project = project
self.expected = expected
def __call__(self):
return len(self.project.containers()) == self.expected
def __str__(self):
return "waiting for counter count == %s" % self.expected
class ContainerStateCondition(object):
def __init__(self, client, name, running):
self.client = client
self.name = name
self.running = running
# State.Running == true
def __call__(self):
try:
container = self.client.inspect_container(self.name)
return container['State']['Running'] == self.running
except errors.APIError:
return False
def __str__(self):
return "waiting for container to have state %s" % self.expected
class CLITestCase(DockerClientTestCase):
def setUp(self):
super(CLITestCase, self).setUp()
self.base_dir = 'tests/fixtures/simple-composefile'
def tearDown(self):
if self.base_dir:
self.project.kill()
self.project.remove_stopped()
for container in self.project.containers(stopped=True, one_off=True):
container.remove(force=True)
super(CLITestCase, self).tearDown()
@property
def project(self):
# Hack: allow project to be overridden
if not hasattr(self, '_project'):
self._project = get_project(self.base_dir)
return self._project
def dispatch(self, options, project_options=None, returncode=0):
project_options = project_options or []
proc = start_process(self.base_dir, project_options + options)
return wait_on_process(proc, returncode=returncode)
def test_help(self):
self.base_dir = 'tests/fixtures/no-composefile'
result = self.dispatch(['help', 'up'], returncode=1)
assert 'Usage: up [options] [SERVICE...]' in result.stderr
# Prevent tearDown from trying to create a project
self.base_dir = None
def test_config_list_services(self):
result = self.dispatch(['config', '--services'])
assert set(result.stdout.rstrip().split('\n')) == {'simple', 'another'}
def test_config_quiet_with_error(self):
self.base_dir = None
result = self.dispatch([
'-f', 'tests/fixtures/invalid-composefile/invalid.yml',
'config', '-q'
], returncode=1)
assert "'notaservice' doesn't have any configuration" in result.stderr
def test_config_quiet(self):
assert self.dispatch(['config', '-q']).stdout == ''
def test_config_default(self):
result = self.dispatch(['config'])
assert dedent("""
simple:
command: top
image: busybox:latest
""").lstrip() in result.stdout
assert dedent("""
another:
command: top
image: busybox:latest
""").lstrip() in result.stdout
def test_ps(self):
self.project.get_service('simple').create_container()
result = self.dispatch(['ps'])
assert 'simplecomposefile_simple_1' in result.stdout
def test_ps_default_composefile(self):
self.base_dir = 'tests/fixtures/multiple-composefiles'
self.dispatch(['up', '-d'])
result = self.dispatch(['ps'])
self.assertIn('multiplecomposefiles_simple_1', result.stdout)
self.assertIn('multiplecomposefiles_another_1', result.stdout)
self.assertNotIn('multiplecomposefiles_yetanother_1', result.stdout)
def test_ps_alternate_composefile(self):
config_path = os.path.abspath(
'tests/fixtures/multiple-composefiles/compose2.yml')
self._project = get_project(self.base_dir, [config_path])
self.base_dir = 'tests/fixtures/multiple-composefiles'
self.dispatch(['-f', 'compose2.yml', 'up', '-d'])
result = self.dispatch(['-f', 'compose2.yml', 'ps'])
self.assertNotIn('multiplecomposefiles_simple_1', result.stdout)
self.assertNotIn('multiplecomposefiles_another_1', result.stdout)
self.assertIn('multiplecomposefiles_yetanother_1', result.stdout)
def test_pull(self):
result = self.dispatch(['pull'])
assert sorted(result.stderr.split('\n'))[1:] == [
'Pulling another (busybox:latest)...',
'Pulling simple (busybox:latest)...',
]
def test_pull_with_digest(self):
result = self.dispatch(['-f', 'digest.yml', 'pull'])
assert 'Pulling simple (busybox:latest)...' in result.stderr
assert ('Pulling digest (busybox@'
'sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b520'
'04ee8502d)...') in result.stderr
def test_pull_with_ignore_pull_failures(self):
result = self.dispatch([
'-f', 'ignore-pull-failures.yml',
'pull', '--ignore-pull-failures'])
assert 'Pulling simple (busybox:latest)...' in result.stderr
assert 'Pulling another (nonexisting-image:latest)...' in result.stderr
assert 'Error: image library/nonexisting-image:latest not found' in result.stderr
def test_build_plain(self):
self.base_dir = 'tests/fixtures/simple-dockerfile'
self.dispatch(['build', 'simple'])
result = self.dispatch(['build', 'simple'])
assert BUILD_CACHE_TEXT in result.stdout
assert BUILD_PULL_TEXT not in result.stdout
def test_build_no_cache(self):
self.base_dir = 'tests/fixtures/simple-dockerfile'
self.dispatch(['build', 'simple'])
result = self.dispatch(['build', '--no-cache', 'simple'])
assert BUILD_CACHE_TEXT not in result.stdout
assert BUILD_PULL_TEXT not in result.stdout
def test_build_pull(self):
# Make sure we have the latest busybox already
pull_busybox(self.client)
self.base_dir = 'tests/fixtures/simple-dockerfile'
self.dispatch(['build', 'simple'], None)
result = self.dispatch(['build', '--pull', 'simple'])
assert BUILD_CACHE_TEXT in result.stdout
assert BUILD_PULL_TEXT in result.stdout
def test_build_no_cache_pull(self):
# Make sure we have the latest busybox already
pull_busybox(self.client)
self.base_dir = 'tests/fixtures/simple-dockerfile'
self.dispatch(['build', 'simple'])
result = self.dispatch(['build', '--no-cache', '--pull', 'simple'])
assert BUILD_CACHE_TEXT not in result.stdout
assert BUILD_PULL_TEXT in result.stdout
def test_build_failed(self):
self.base_dir = 'tests/fixtures/simple-failing-dockerfile'
self.dispatch(['build', 'simple'], returncode=1)
labels = ["com.docker.compose.test_failing_image=true"]
containers = [
Container.from_ps(self.project.client, c)
for c in self.project.client.containers(
all=True,
filters={"label": labels})
]
assert len(containers) == 1
def test_build_failed_forcerm(self):
self.base_dir = 'tests/fixtures/simple-failing-dockerfile'
self.dispatch(['build', '--force-rm', 'simple'], returncode=1)
labels = ["com.docker.compose.test_failing_image=true"]
containers = [
Container.from_ps(self.project.client, c)
for c in self.project.client.containers(
all=True,
filters={"label": labels})
]
assert not containers
def test_create(self):
self.dispatch(['create'])
service = self.project.get_service('simple')
another = self.project.get_service('another')
self.assertEqual(len(service.containers()), 0)
self.assertEqual(len(another.containers()), 0)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.assertEqual(len(another.containers(stopped=True)), 1)
def test_create_with_force_recreate(self):
self.dispatch(['create'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 0)
self.assertEqual(len(service.containers(stopped=True)), 1)
old_ids = [c.id for c in service.containers(stopped=True)]
self.dispatch(['create', '--force-recreate'], None)
self.assertEqual(len(service.containers()), 0)
self.assertEqual(len(service.containers(stopped=True)), 1)
new_ids = [c.id for c in service.containers(stopped=True)]
self.assertNotEqual(old_ids, new_ids)
def test_create_with_no_recreate(self):
self.dispatch(['create'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 0)
self.assertEqual(len(service.containers(stopped=True)), 1)
old_ids = [c.id for c in service.containers(stopped=True)]
self.dispatch(['create', '--no-recreate'], None)
self.assertEqual(len(service.containers()), 0)
self.assertEqual(len(service.containers(stopped=True)), 1)
new_ids = [c.id for c in service.containers(stopped=True)]
self.assertEqual(old_ids, new_ids)
def test_create_with_force_recreate_and_no_recreate(self):
self.dispatch(
['create', '--force-recreate', '--no-recreate'],
returncode=1)
def test_up_detached(self):
self.dispatch(['up', '-d'])
service = self.project.get_service('simple')
another = self.project.get_service('another')
self.assertEqual(len(service.containers()), 1)
self.assertEqual(len(another.containers()), 1)
# Ensure containers don't have stdin and stdout connected in -d mode
container, = service.containers()
self.assertFalse(container.get('Config.AttachStderr'))
self.assertFalse(container.get('Config.AttachStdout'))
self.assertFalse(container.get('Config.AttachStdin'))
def test_up_attached(self):
self.base_dir = 'tests/fixtures/echo-services'
result = self.dispatch(['up', '--no-color'])
assert 'simple_1 | simple' in result.stdout
assert 'another_1 | another' in result.stdout
def test_up_without_networking(self):
self.require_api_version('1.21')
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['up', '-d'], None)
client = docker_client(version='1.21')
networks = client.networks(names=[self.project.name])
self.assertEqual(len(networks), 0)
for service in self.project.get_services():
containers = service.containers()
self.assertEqual(len(containers), 1)
self.assertNotEqual(containers[0].get('Config.Hostname'), service.name)
web_container = self.project.get_service('web').containers()[0]
self.assertTrue(web_container.get('HostConfig.Links'))
def test_up_with_networking(self):
self.require_api_version('1.21')
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['--x-networking', 'up', '-d'], None)
client = docker_client(version='1.21')
services = self.project.get_services()
networks = client.networks(names=[self.project.name])
for n in networks:
self.addCleanup(client.remove_network, n['Id'])
self.assertEqual(len(networks), 1)
self.assertEqual(networks[0]['Driver'], 'bridge')
network = client.inspect_network(networks[0]['Id'])
self.assertEqual(len(network['Containers']), len(services))
for service in services:
containers = service.containers()
self.assertEqual(len(containers), 1)
self.assertIn(containers[0].id, network['Containers'])
web_container = self.project.get_service('web').containers()[0]
self.assertFalse(web_container.get('HostConfig.Links'))
def test_up_with_links(self):
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['up', '-d', 'web'], None)
web = self.project.get_service('web')
db = self.project.get_service('db')
console = self.project.get_service('console')
self.assertEqual(len(web.containers()), 1)
self.assertEqual(len(db.containers()), 1)
self.assertEqual(len(console.containers()), 0)
def test_up_with_no_deps(self):
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['up', '-d', '--no-deps', 'web'], None)
web = self.project.get_service('web')
db = self.project.get_service('db')
console = self.project.get_service('console')
self.assertEqual(len(web.containers()), 1)
self.assertEqual(len(db.containers()), 0)
self.assertEqual(len(console.containers()), 0)
def test_up_with_force_recreate(self):
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
old_ids = [c.id for c in service.containers()]
self.dispatch(['up', '-d', '--force-recreate'], None)
self.assertEqual(len(service.containers()), 1)
new_ids = [c.id for c in service.containers()]
self.assertNotEqual(old_ids, new_ids)
def test_up_with_no_recreate(self):
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
old_ids = [c.id for c in service.containers()]
self.dispatch(['up', '-d', '--no-recreate'], None)
self.assertEqual(len(service.containers()), 1)
new_ids = [c.id for c in service.containers()]
self.assertEqual(old_ids, new_ids)
def test_up_with_force_recreate_and_no_recreate(self):
self.dispatch(
['up', '-d', '--force-recreate', '--no-recreate'],
returncode=1)
def test_up_with_timeout(self):
self.dispatch(['up', '-d', '-t', '1'])
service = self.project.get_service('simple')
another = self.project.get_service('another')
self.assertEqual(len(service.containers()), 1)
self.assertEqual(len(another.containers()), 1)
# Ensure containers don't have stdin and stdout connected in -d mode
config = service.containers()[0].inspect()['Config']
self.assertFalse(config['AttachStderr'])
self.assertFalse(config['AttachStdout'])
self.assertFalse(config['AttachStdin'])
def test_up_handles_sigint(self):
proc = start_process(self.base_dir, ['up', '-t', '2'])
wait_on_condition(ContainerCountCondition(self.project, 2))
os.kill(proc.pid, signal.SIGINT)
wait_on_condition(ContainerCountCondition(self.project, 0))
def test_up_handles_sigterm(self):
proc = start_process(self.base_dir, ['up', '-t', '2'])
wait_on_condition(ContainerCountCondition(self.project, 2))
os.kill(proc.pid, signal.SIGTERM)
wait_on_condition(ContainerCountCondition(self.project, 0))
def test_run_service_without_links(self):
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['run', 'console', '/bin/true'])
self.assertEqual(len(self.project.containers()), 0)
# Ensure stdin/out was open
container = self.project.containers(stopped=True, one_off=True)[0]
config = container.inspect()['Config']
self.assertTrue(config['AttachStderr'])
self.assertTrue(config['AttachStdout'])
self.assertTrue(config['AttachStdin'])
def test_run_service_with_links(self):
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['run', 'web', '/bin/true'], None)
db = self.project.get_service('db')
console = self.project.get_service('console')
self.assertEqual(len(db.containers()), 1)
self.assertEqual(len(console.containers()), 0)
def test_run_with_no_deps(self):
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['run', '--no-deps', 'web', '/bin/true'])
db = self.project.get_service('db')
self.assertEqual(len(db.containers()), 0)
def test_run_does_not_recreate_linked_containers(self):
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['up', '-d', 'db'])
db = self.project.get_service('db')
self.assertEqual(len(db.containers()), 1)
old_ids = [c.id for c in db.containers()]
self.dispatch(['run', 'web', '/bin/true'], None)
self.assertEqual(len(db.containers()), 1)
new_ids = [c.id for c in db.containers()]
self.assertEqual(old_ids, new_ids)
def test_run_without_command(self):
self.base_dir = 'tests/fixtures/commands-composefile'
self.check_build('tests/fixtures/simple-dockerfile', tag='composetest_test')
self.dispatch(['run', 'implicit'])
service = self.project.get_service('implicit')
containers = service.containers(stopped=True, one_off=True)
self.assertEqual(
[c.human_readable_command for c in containers],
[u'/bin/sh -c echo "success"'],
)
self.dispatch(['run', 'explicit'])
service = self.project.get_service('explicit')
containers = service.containers(stopped=True, one_off=True)
self.assertEqual(
[c.human_readable_command for c in containers],
[u'/bin/true'],
)
def test_run_service_with_entrypoint_overridden(self):
self.base_dir = 'tests/fixtures/dockerfile_with_entrypoint'
name = 'service'
self.dispatch(['run', '--entrypoint', '/bin/echo', name, 'helloworld'])
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=True)[0]
self.assertEqual(
shlex.split(container.human_readable_command),
[u'/bin/echo', u'helloworld'],
)
def test_run_service_with_user_overridden(self):
self.base_dir = 'tests/fixtures/user-composefile'
name = 'service'
user = 'sshd'
self.dispatch(['run', '--user={user}'.format(user=user), name], returncode=1)
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=True)[0]
self.assertEqual(user, container.get('Config.User'))
def test_run_service_with_user_overridden_short_form(self):
self.base_dir = 'tests/fixtures/user-composefile'
name = 'service'
user = 'sshd'
self.dispatch(['run', '-u', user, name], returncode=1)
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=True)[0]
self.assertEqual(user, container.get('Config.User'))
def test_run_service_with_environement_overridden(self):
name = 'service'
self.base_dir = 'tests/fixtures/environment-composefile'
self.dispatch([
'run', '-e', 'foo=notbar',
'-e', 'allo=moto=bobo',
'-e', 'alpha=beta',
name,
'/bin/true',
])
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=True)[0]
# env overriden
self.assertEqual('notbar', container.environment['foo'])
# keep environement from yaml
self.assertEqual('world', container.environment['hello'])
# added option from command line
self.assertEqual('beta', container.environment['alpha'])
# make sure a value with a = don't crash out
self.assertEqual('moto=bobo', container.environment['allo'])
def test_run_service_without_map_ports(self):
# create one off container
self.base_dir = 'tests/fixtures/ports-composefile'
self.dispatch(['run', '-d', 'simple'])
container = self.project.get_service('simple').containers(one_off=True)[0]
# get port information
port_random = container.get_local_port(3000)
port_assigned = container.get_local_port(3001)
# close all one off containers we just created
container.stop()
# check the ports
self.assertEqual(port_random, None)
self.assertEqual(port_assigned, None)
def test_run_service_with_map_ports(self):
# create one off container
self.base_dir = 'tests/fixtures/ports-composefile'
self.dispatch(['run', '-d', '--service-ports', 'simple'])
container = self.project.get_service('simple').containers(one_off=True)[0]
# get port information
port_random = container.get_local_port(3000)
port_assigned = container.get_local_port(3001)
port_range = container.get_local_port(3002), container.get_local_port(3003)
# close all one off containers we just created
container.stop()
# check the ports
self.assertNotEqual(port_random, None)
self.assertIn("0.0.0.0", port_random)
self.assertEqual(port_assigned, "0.0.0.0:49152")
self.assertEqual(port_range[0], "0.0.0.0:49153")
self.assertEqual(port_range[1], "0.0.0.0:49154")
def test_run_service_with_explicitly_maped_ports(self):
# create one off container
self.base_dir = 'tests/fixtures/ports-composefile'
self.dispatch(['run', '-d', '-p', '30000:3000', '--publish', '30001:3001', 'simple'])
container = self.project.get_service('simple').containers(one_off=True)[0]
# get port information
port_short = container.get_local_port(3000)
port_full = container.get_local_port(3001)
# close all one off containers we just created
container.stop()
# check the ports
self.assertEqual(port_short, "0.0.0.0:30000")
self.assertEqual(port_full, "0.0.0.0:30001")
def test_run_service_with_explicitly_maped_ip_ports(self):
# create one off container
self.base_dir = 'tests/fixtures/ports-composefile'
self.dispatch(['run', '-d', '-p', '127.0.0.1:30000:3000', '--publish', '127.0.0.1:30001:3001', 'simple'], None)
container = self.project.get_service('simple').containers(one_off=True)[0]
# get port information
port_short = container.get_local_port(3000)
port_full = container.get_local_port(3001)
# close all one off containers we just created
container.stop()
# check the ports
self.assertEqual(port_short, "127.0.0.1:30000")
self.assertEqual(port_full, "127.0.0.1:30001")
def test_run_with_custom_name(self):
self.base_dir = 'tests/fixtures/environment-composefile'
name = 'the-container-name'
self.dispatch(['run', '--name', name, 'service', '/bin/true'])
service = self.project.get_service('service')
container, = service.containers(stopped=True, one_off=True)
self.assertEqual(container.name, name)
def test_run_with_networking(self):
self.require_api_version('1.21')
client = docker_client(version='1.21')
self.base_dir = 'tests/fixtures/simple-dockerfile'
self.dispatch(['--x-networking', 'run', 'simple', 'true'], None)
service = self.project.get_service('simple')
container, = service.containers(stopped=True, one_off=True)
networks = client.networks(names=[self.project.name])
for n in networks:
self.addCleanup(client.remove_network, n['Id'])
self.assertEqual(len(networks), 1)
self.assertEqual(container.human_readable_command, u'true')
def test_run_handles_sigint(self):
proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top'])
wait_on_condition(ContainerStateCondition(
self.project.client,
'simplecomposefile_simple_run_1',
running=True))
os.kill(proc.pid, signal.SIGINT)
wait_on_condition(ContainerStateCondition(
self.project.client,
'simplecomposefile_simple_run_1',
running=False))
def test_run_handles_sigterm(self):
proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top'])
wait_on_condition(ContainerStateCondition(
self.project.client,
'simplecomposefile_simple_run_1',
running=True))
os.kill(proc.pid, signal.SIGTERM)
wait_on_condition(ContainerStateCondition(
self.project.client,
'simplecomposefile_simple_run_1',
running=False))
def test_rm(self):
service = self.project.get_service('simple')
service.create_container()
kill_service(service)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.dispatch(['rm', '--force'], None)
self.assertEqual(len(service.containers(stopped=True)), 0)
service = self.project.get_service('simple')
service.create_container()
kill_service(service)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.dispatch(['rm', '-f'], None)
self.assertEqual(len(service.containers(stopped=True)), 0)
def test_stop(self):
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
self.dispatch(['stop', '-t', '1'], None)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.assertFalse(service.containers(stopped=True)[0].is_running)
def test_start_no_containers(self):
result = self.dispatch(['start'], returncode=1)
assert 'No containers to start' in result.stderr
def test_pause_unpause(self):
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertFalse(service.containers()[0].is_paused)
self.dispatch(['pause'], None)
self.assertTrue(service.containers()[0].is_paused)
self.dispatch(['unpause'], None)
self.assertFalse(service.containers()[0].is_paused)
def test_pause_no_containers(self):
result = self.dispatch(['pause'], returncode=1)
assert 'No containers to pause' in result.stderr
def test_unpause_no_containers(self):
result = self.dispatch(['unpause'], returncode=1)
assert 'No containers to unpause' in result.stderr
def test_logs_invalid_service_name(self):
self.dispatch(['logs', 'madeupname'], returncode=1)
def test_kill(self):
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
self.dispatch(['kill'], None)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.assertFalse(service.containers(stopped=True)[0].is_running)
def test_kill_signal_sigstop(self):
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
self.dispatch(['kill', '-s', 'SIGSTOP'], None)
self.assertEqual(len(service.containers()), 1)
# The container is still running. It has only been paused
self.assertTrue(service.containers()[0].is_running)
def test_kill_stopped_service(self):
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.dispatch(['kill', '-s', 'SIGSTOP'], None)
self.assertTrue(service.containers()[0].is_running)
self.dispatch(['kill', '-s', 'SIGKILL'], None)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.assertFalse(service.containers(stopped=True)[0].is_running)
def test_restart(self):
service = self.project.get_service('simple')
container = service.create_container()
container.start()
started_at = container.dictionary['State']['StartedAt']
self.dispatch(['restart', '-t', '1'], None)
container.inspect()
self.assertNotEqual(
container.dictionary['State']['FinishedAt'],
'0001-01-01T00:00:00Z',
)
self.assertNotEqual(
container.dictionary['State']['StartedAt'],
started_at,
)
def test_restart_stopped_container(self):
service = self.project.get_service('simple')
container = service.create_container()
container.start()
container.kill()
self.assertEqual(len(service.containers(stopped=True)), 1)
self.dispatch(['restart', '-t', '1'], None)
self.assertEqual(len(service.containers(stopped=False)), 1)
def test_restart_no_containers(self):
result = self.dispatch(['restart'], returncode=1)
assert 'No containers to restart' in result.stderr
def test_scale(self):
project = self.project
self.dispatch(['scale', 'simple=1'])
self.assertEqual(len(project.get_service('simple').containers()), 1)
self.dispatch(['scale', 'simple=3', 'another=2'])
self.assertEqual(len(project.get_service('simple').containers()), 3)
self.assertEqual(len(project.get_service('another').containers()), 2)
self.dispatch(['scale', 'simple=1', 'another=1'])
self.assertEqual(len(project.get_service('simple').containers()), 1)
self.assertEqual(len(project.get_service('another').containers()), 1)
self.dispatch(['scale', 'simple=1', 'another=1'])
self.assertEqual(len(project.get_service('simple').containers()), 1)
self.assertEqual(len(project.get_service('another').containers()), 1)
self.dispatch(['scale', 'simple=0', 'another=0'])
self.assertEqual(len(project.get_service('simple').containers()), 0)
self.assertEqual(len(project.get_service('another').containers()), 0)
def test_port(self):
self.base_dir = 'tests/fixtures/ports-composefile'
self.dispatch(['up', '-d'], None)
container = self.project.get_service('simple').get_container()
def get_port(number):
result = self.dispatch(['port', 'simple', str(number)])
return result.stdout.rstrip()
self.assertEqual(get_port(3000), container.get_local_port(3000))
self.assertEqual(get_port(3001), "0.0.0.0:49152")
self.assertEqual(get_port(3002), "0.0.0.0:49153")
def test_port_with_scale(self):
self.base_dir = 'tests/fixtures/ports-composefile-scale'
self.dispatch(['scale', 'simple=2'], None)
containers = sorted(
self.project.containers(service_names=['simple']),
key=attrgetter('name'))
def get_port(number, index=None):
if index is None:
result = self.dispatch(['port', 'simple', str(number)])
else:
result = self.dispatch(['port', '--index=' + str(index), 'simple', str(number)])
return result.stdout.rstrip()
self.assertEqual(get_port(3000), containers[0].get_local_port(3000))
self.assertEqual(get_port(3000, index=1), containers[0].get_local_port(3000))
self.assertEqual(get_port(3000, index=2), containers[1].get_local_port(3000))
self.assertEqual(get_port(3002), "")
def test_env_file_relative_to_compose_file(self):
config_path = os.path.abspath('tests/fixtures/env-file/docker-compose.yml')
self.dispatch(['-f', config_path, 'up', '-d'], None)
self._project = get_project(self.base_dir, [config_path])
containers = self.project.containers(stopped=True)
self.assertEqual(len(containers), 1)
self.assertIn("FOO=1", containers[0].get('Config.Env'))
@mock.patch.dict(os.environ)
def test_home_and_env_var_in_volume_path(self):
os.environ['VOLUME_NAME'] = 'my-volume'
os.environ['HOME'] = '/tmp/home-dir'
self.base_dir = 'tests/fixtures/volume-path-interpolation'
self.dispatch(['up', '-d'], None)
container = self.project.containers(stopped=True)[0]
actual_host_path = container.get('Volumes')['/container-path']
components = actual_host_path.split('/')
assert components[-2:] == ['home-dir', 'my-volume']
def test_up_with_default_override_file(self):
self.base_dir = 'tests/fixtures/override-files'
self.dispatch(['up', '-d'], None)
containers = self.project.containers()
self.assertEqual(len(containers), 2)
web, db = containers
self.assertEqual(web.human_readable_command, 'top')
self.assertEqual(db.human_readable_command, 'top')
def test_up_with_multiple_files(self):
self.base_dir = 'tests/fixtures/override-files'
config_paths = [
'docker-compose.yml',
'docker-compose.override.yml',
'extra.yml',
]
self._project = get_project(self.base_dir, config_paths)
self.dispatch(
[
'-f', config_paths[0],
'-f', config_paths[1],
'-f', config_paths[2],
'up', '-d',
],
None)
containers = self.project.containers()
self.assertEqual(len(containers), 3)
web, other, db = containers
self.assertEqual(web.human_readable_command, 'top')
self.assertTrue({'db', 'other'} <= set(web.links()))
self.assertEqual(db.human_readable_command, 'top')
self.assertEqual(other.human_readable_command, 'top')
def test_up_with_extends(self):
self.base_dir = 'tests/fixtures/extends'
self.dispatch(['up', '-d'], None)
self.assertEqual(
set([s.name for s in self.project.services]),
set(['mydb', 'myweb']),
)
# Sort by name so we get [db, web]
containers = sorted(
self.project.containers(stopped=True),
key=lambda c: c.name,
)
self.assertEqual(len(containers), 2)
web = containers[1]
self.assertEqual(set(web.links()), set(['db', 'mydb_1', 'extends_mydb_1']))
expected_env = set([
"FOO=1",
"BAR=2",
"BAZ=2",
])
self.assertTrue(expected_env <= set(web.get('Config.Env')))
|
|
"""Extra magics for terminal use."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from logging import error
import os
import sys
from IPython.core.error import TryNext, UsageError
from IPython.core.magic import Magics, magics_class, line_magic
from IPython.lib.clipboard import ClipboardEmpty
from IPython.utils.text import SList, strip_email_quotes
from IPython.utils import py3compat
def get_pasted_lines(sentinel, l_input=py3compat.input, quiet=False):
""" Yield pasted lines until the user enters the given sentinel value.
"""
if not quiet:
print("Pasting code; enter '%s' alone on the line to stop or use Ctrl-D." \
% sentinel)
prompt = ":"
else:
prompt = ""
while True:
try:
l = l_input(prompt)
if l == sentinel:
return
else:
yield l
except EOFError:
print('<EOF>')
return
@magics_class
class TerminalMagics(Magics):
def __init__(self, shell):
super(TerminalMagics, self).__init__(shell)
def store_or_execute(self, block, name):
""" Execute a block, or store it in a variable, per the user's request.
"""
if name:
# If storing it for further editing
self.shell.user_ns[name] = SList(block.splitlines())
print("Block assigned to '%s'" % name)
else:
b = self.preclean_input(block)
self.shell.user_ns['pasted_block'] = b
self.shell.using_paste_magics = True
try:
self.shell.run_cell(b)
finally:
self.shell.using_paste_magics = False
def preclean_input(self, block):
lines = block.splitlines()
while lines and not lines[0].strip():
lines = lines[1:]
return strip_email_quotes('\n'.join(lines))
def rerun_pasted(self, name='pasted_block'):
""" Rerun a previously pasted command.
"""
b = self.shell.user_ns.get(name)
# Sanity checks
if b is None:
raise UsageError('No previous pasted block available')
if not isinstance(b, str):
raise UsageError(
"Variable 'pasted_block' is not a string, can't execute")
print("Re-executing '%s...' (%d chars)"% (b.split('\n',1)[0], len(b)))
self.shell.run_cell(b)
@line_magic
def autoindent(self, parameter_s = ''):
"""Toggle autoindent on/off (deprecated)"""
self.shell.set_autoindent()
print("Automatic indentation is:",['OFF','ON'][self.shell.autoindent])
@line_magic
def cpaste(self, parameter_s=''):
"""Paste & execute a pre-formatted code block from clipboard.
You must terminate the block with '--' (two minus-signs) or Ctrl-D
alone on the line. You can also provide your own sentinel with '%paste
-s %%' ('%%' is the new sentinel for this operation).
The block is dedented prior to execution to enable execution of method
definitions. '>' and '+' characters at the beginning of a line are
ignored, to allow pasting directly from e-mails, diff files and
doctests (the '...' continuation prompt is also stripped). The
executed block is also assigned to variable named 'pasted_block' for
later editing with '%edit pasted_block'.
You can also pass a variable name as an argument, e.g. '%cpaste foo'.
This assigns the pasted block to variable 'foo' as string, without
dedenting or executing it (preceding >>> and + is still stripped)
'%cpaste -r' re-executes the block previously entered by cpaste.
'%cpaste -q' suppresses any additional output messages.
Do not be alarmed by garbled output on Windows (it's a readline bug).
Just press enter and type -- (and press enter again) and the block
will be what was just pasted.
IPython statements (magics, shell escapes) are not supported (yet).
See also
--------
paste: automatically pull code from clipboard.
Examples
--------
::
In [8]: %cpaste
Pasting code; enter '--' alone on the line to stop.
:>>> a = ["world!", "Hello"]
:>>> print " ".join(sorted(a))
:--
Hello world!
"""
opts, name = self.parse_options(parameter_s, 'rqs:', mode='string')
if 'r' in opts:
self.rerun_pasted()
return
quiet = ('q' in opts)
sentinel = opts.get('s', u'--')
block = '\n'.join(get_pasted_lines(sentinel, quiet=quiet))
self.store_or_execute(block, name)
@line_magic
def paste(self, parameter_s=''):
"""Paste & execute a pre-formatted code block from clipboard.
The text is pulled directly from the clipboard without user
intervention and printed back on the screen before execution (unless
the -q flag is given to force quiet mode).
The block is dedented prior to execution to enable execution of method
definitions. '>' and '+' characters at the beginning of a line are
ignored, to allow pasting directly from e-mails, diff files and
doctests (the '...' continuation prompt is also stripped). The
executed block is also assigned to variable named 'pasted_block' for
later editing with '%edit pasted_block'.
You can also pass a variable name as an argument, e.g. '%paste foo'.
This assigns the pasted block to variable 'foo' as string, without
executing it (preceding >>> and + is still stripped).
Options:
-r: re-executes the block previously entered by cpaste.
-q: quiet mode: do not echo the pasted text back to the terminal.
IPython statements (magics, shell escapes) are not supported (yet).
See also
--------
cpaste: manually paste code into terminal until you mark its end.
"""
opts, name = self.parse_options(parameter_s, 'rq', mode='string')
if 'r' in opts:
self.rerun_pasted()
return
try:
block = self.shell.hooks.clipboard_get()
except TryNext as clipboard_exc:
message = getattr(clipboard_exc, 'args')
if message:
error(message[0])
else:
error('Could not get text from the clipboard.')
return
except ClipboardEmpty:
raise UsageError("The clipboard appears to be empty")
# By default, echo back to terminal unless quiet mode is requested
if 'q' not in opts:
write = self.shell.write
write(self.shell.pycolorize(block))
if not block.endswith('\n'):
write('\n')
write("## -- End pasted text --\n")
self.store_or_execute(block, name)
# Class-level: add a '%cls' magic only on Windows
if sys.platform == 'win32':
@line_magic
def cls(self, s):
"""Clear screen.
"""
os.system("cls")
|
|
import numpy as np
import nltk
from tqdm import tqdm
import pickle
import math
import os
import random
from cornell_data import CornellData
from batch import Batch
# Monkey patch math.isclose for Python <3.5
if not hasattr(math, 'isclose'):
math.isclose = lambda a, b, rel_tol=1e-09, abs_tol=0.0: \
abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
# Dataset class (Warning: No vocab limit currently)
class TextData:
# Load all conversations
def __init__(self, args):
# Model parameters
self.args = args
# Path variables
self.corpus_dir = os.path.join(self.args.root_dir, 'data/converse/cornell/')
self.samples_dir = os.path.join(self.args.root_dir, 'data/converse/samples/')
self.samples_name = self._construct_name()
self.pad_token = -1 # Padding
self.go_token = -1 # Start of sequence
self.eos_token = -1 # End of sequence
self.unknown_token = -1 # Word dropped from vocabulary
self.training_samples = [] # 2d array containing each question and his answer [[input,target]]
self.word2id = {}
self.id2word = {} # For a rapid conversion
self.load_corpus(self.samples_dir)
# Plot some stats:
print('Loaded: {} words, {} QA'.format(len(self.word2id), len(self.training_samples)))
if self.args.play_dataset:
self.play_dataset()
# Return the name of the dataset that the program should use with the current parameters.
# Compute this from the base name, the given tag (self.args.dataset_tag) and the sentence length.
def _construct_name(self):
base_name = 'dataset'
if self.args.dataset_tag:
base_name += '-' + self.args.dataset_tag
return base_name + '-' + str(self.args.max_length) + '.pkl'
# Only keep a small fraction of the dataset, given by the ratio
def make_lighter(self, ratio_dataset):
if not math.isclose(ratio_dataset, 1.0):
self.shuffle()
print('WARNING: Ratio feature not implemented.')
pass
# Shuffle the training samples
def shuffle(self):
print("Shuffling the dataset...")
random.shuffle(self.training_samples)
# Create a single batch from the sample list. The batch size is automatically defined by the number of
# samples given. The inputs should already be inverted, and the target should already have <go> and <eos>.
# Warning: This function should not make direct calls to args.batch_size.
# Args:
# samples (list<Obj>): a list of samples, each sample being of the form: [input, target]
# Return:
# Batch: a batch object
def _create_batch(self, samples):
batch = Batch()
batch_size = len(samples)
# Create the batch tensor
for i in range(batch_size):
# Unpack the sample
sample = samples[i]
# Watson mode: invert question and answer
if not self.args.test and self.args.watson_mode:
sample = list(reversed(sample))
# Reverse inputs (and not outputs) -- a trick defined in the original seq2seq paper.
batch.encoder_seqs.append(list(reversed(sample[0])))
# Add the <go> and <eos> tokens.
batch.decoder_seqs.append([self.go_token] + sample[1] + [self.eos_token])
# Same as decoder, but shifted to the left (ignore the <go>).
batch.target_seqs.append(batch.decoder_seqs[-1][1:])
# Long sentences should have been filtered during the dataset creation.
assert len(batch.encoder_seqs[i]) <= self.args.max_length_enco
assert len(batch.decoder_seqs[i]) <= self.args.max_length_deco
# Add padding & define weight
batch.encoder_seqs[i] = [self.pad_token] * \
(self.args.max_length_enco - len(batch.encoder_seqs[i])) + \
batch.encoder_seqs[i] # Left padding for the input
batch.weights.append([1.0] * len(batch.target_seqs[i]) + [0.0] * (self.args.max_length_deco - len(batch.target_seqs[i])))
batch.decoder_seqs[i] = batch.decoder_seqs[i] + [self.pad_token] * (self.args.max_length_deco - len(batch.decoder_seqs[i]))
batch.target_seqs[i] = batch.target_seqs[i] + [self.pad_token] * (self.args.max_length_deco - len(batch.target_seqs[i]))
# Simple hack to reshape the batch
encoder_seqs_t = [] # Corrected orientation
for i in range(self.args.max_length_enco):
encoder_seq_t = []
for j in range(batch_size):
encoder_seq_t.append(batch.encoder_seqs[j][i])
encoder_seqs_t.append(encoder_seq_t)
batch.encoder_seqs = encoder_seqs_t
decoder_seqs_t = []
target_seqs_t = []
weights_t = []
for i in range(self.args.max_length_deco):
decoder_seq_t = []
target_seq_t = []
weight_t = []
for j in range(batch_size):
decoder_seq_t.append(batch.decoder_seqs[j][i])
target_seq_t.append(batch.target_seqs[j][i])
weight_t.append(batch.weights[j][i])
decoder_seqs_t.append(decoder_seq_t)
target_seqs_t.append(target_seq_t)
weights_t.append(weight_t)
batch.decoder_seqs = decoder_seqs_t
batch.target_seqs = target_seqs_t
batch.weights = weights_t
return batch
# Prepare the batches for the current epoch
# Return:
# list<Batch>: get a list of the batches for the next epoch.
def get_batches(self):
self.shuffle()
batches = []
# Generator over the mini-batch training samples
def gen_next_samples():
for i in range(0, self.get_sample_size(), self.args.batch_size):
yield self.training_samples[i:min(i + self.args.batch_size, self.get_sample_size())]
for samples in gen_next_samples():
batch = self._create_batch(samples)
batches.append(batch)
return batches
# Return size of the dataset
def get_sample_size(self):
return len(self.training_samples)
# Return number of words in dataset
def get_vocabulary_size(self):
return len(self.word2id)
# Load/create the conversations data
def load_corpus(self, dir_name):
if not os.path.exists(os.path.join(dir_name, self.samples_name)):
print('Training samples not found. Creating dataset...')
# Corpus creation
cornell_dataset = CornellData(self.corpus_dir)
self.create_corpus(cornell_dataset.get_conversations())
# Saving tf samples
print('Saving dataset...')
self.save_dataset(dir_name)
else:
print('Loading dataset from {}...'.format(dir_name))
self.load_dataset(dir_name)
assert self.pad_token == 0
# Save samples to file
def save_dataset(self, dir_name):
with open(os.path.join(dir_name, self.samples_name), 'wb') as handle:
# Warning: If adding something here, also modifying load_dataset
data = {
"word2id": self.word2id,
"id2word": self.id2word,
"training_samples": self.training_samples
}
# Using the highest protocol available
pickle.dump(data, handle, -1)
# Load samples from file
def load_dataset(self, dir_name):
with open(os.path.join(dir_name, self.samples_name), 'rb') as handle:
# Warning: If adding something here, also modifying save_dataset
data = pickle.load(handle)
self.word2id = data["word2id"]
self.id2word = data["id2word"]
self.training_samples = data["training_samples"]
self.pad_token = self.word2id["<pad>"]
self.go_token = self.word2id["<go>"]
self.eos_token = self.word2id["<eos>"]
self.unknown_token = self.word2id["<unknown>"] # Restore special words
# Extract all data from the given vocabulary
def create_corpus(self, conversations):
# Add standard tokens
self.pad_token = self.get_word_id("<pad>") # Padding (Warning: first things to add > id=0 !!)
self.go_token = self.get_word_id("<go>") # Start of sequence
self.eos_token = self.get_word_id("<eos>") # End of sequence
self.unknown_token = self.get_word_id("<unknown>") # Word dropped from vocabulary
# Preprocessing data. The dataset will be saved in the same order it has been extracted.
for conversation in tqdm(conversations, desc="Extract conversations"):
self.extract_conversation(conversation)
# Extract sample lines from the conversations.
# Args:
# conversation (Obj): a conversation object containing the lines to extract
def extract_conversation(self, conversation):
# Iterate over all lines in the conversation except for the last (since there's no answer for it)
for i in range(len(conversation["lines"]) - 1):
input_line = conversation["lines"][i]
target_line = conversation["lines"][i + 1]
input_words = self.extract_text(input_line["text"])
target_words = self.extract_text(target_line["text"], True)
# Filter wrong samples (if one of the list is empty)
if input_words and target_words:
self.training_samples.append([input_words, target_words])
# Extract the words from sample lines
# Args:
# line (str): a line containing the text to extract
# is_target (bool): Define the question on the answer
# Return:
# list<int>: list of the word ids of a sentence
def extract_text(self, line, is_target=False):
words = []
# Extract sentences
sentences_token = nltk.sent_tokenize(line)
# We add sentence by sentence until we reach the maximum length
for i in range(len(sentences_token)):
# If question: we only keep the last sentences
# If answer: we only keep the first sentences
if not is_target:
i = len(sentences_token) - 1 - i
tokens = nltk.word_tokenize(sentences_token[i])
# If the total length is not too big, we still can add one more sentence
if len(words) + len(tokens) <= self.args.max_length:
temp_words = []
# Create the vocabulary and the training sentences
for token in tokens:
temp_words.append(self.get_word_id(token))
if is_target:
words = words + temp_words
else:
words = temp_words + words
else:
# We reached the max length already
break
return words
# Get the id of the word (and add it to the dictionary if not existing). If the word does not exist and
# create is set to False, the function will return the unknown_token value.
# Args:
# word (str): word to add
# create (Bool): if True and the word does not exist already, the world will be added
# Return:
# int: the id of the word created
def get_word_id(self, word, create=True):
# Should we only keep words with more than one occurrence?
word = word.lower()
# Get the id if the word already exists.
word_id = self.word2id.get(word, -1)
# If not, create a new entry.
if word_id == -1:
if create:
word_id = len(self.word2id)
self.word2id[word] = word_id
self.id2word[word_id] = word
else:
word_id = self.unknown_token
return word_id
# Print a complete batch, useful for debugging
def print_batch(self, batch):
print('----- Print batch -----')
for i in range(len(batch.encoder_seqs[0])): # Batch size
print("\n".join([
'Encoder: {}'.format(self.batch_seq2str(batch.encoder_seqs, seq_id=i)),
'Decoder: {}'.format(self.batch_seq2str(batch.decoder_seqs, seq_id=i)),
'Targets: {}'.format(self.batch_seq2str(batch.target_seqs, seq_id=i)),
'Weights: {}'.format(' '.join([str(weight) for weight in [batch_weight[i] for batch_weight in batch.weights]]))
]))
# Convert a list of integers into a human readable string
# Args:
# sequence (list<int>): the sentence to print
# clean (Bool): if set, remove the <go>, <pad> and <eos> tokens
# reverse (Bool): for the input, option to restore the standard order
# Return:
# str: the sentence
def sequence2str(self, sequence, clean=False, reverse=False):
if not sequence: return ''
if not clean: return ' '.join([self.id2word[idx] for idx in sequence])
sentence = []
for word_id in sequence:
# If end of generated sentence...
if word_id == self.eos_token:
break
elif word_id != self.pad_token and word_id != self.go_token:
sentence.append(self.id2word[word_id])
# Reverse means input so no <eos> (otherwise pb with previous early stop)
if reverse:
sentence.reverse()
return self.prettify(sentence)
# Convert a list of integer into a human readable string. The difference between the previous
# function is that on a batch object, the values have been reorganized as batch instead of sentence.
# Args:
# batch_seq (list<list<int>>): the sentence(s) to print
# seq_id (int): the position of the sequence inside the batch
# kwargs: the formatting options( See sequence2str() )
# Return:
# str: the sentence
def batch_seq2str(self, batch_seq, seq_id=0, **kwargs):
sequence = []
for i in range(len(batch_seq)):
sequence.append(batch_seq[i][seq_id])
return self.sequence2str(sequence, **kwargs)
# Encode a sequence and return a batch as an input for the model
def sentence2enco(self, sentence):
if sentence == '': return None
# (1) Divide the sentence in token
tokens = nltk.word_tokenize(sentence)
if len(tokens) > self.args.max_length:
return None
# (2) Convert the token in word ids
word_ids = []
# Create the vocabulary and the training sentences
for token in tokens:
word_ids.append(self.get_word_id(token, create=False))
# (3) Create the batch (add padding, reverse)
batch = self._create_batch([[word_ids, []]]) # Mono batch, no target output
return batch
# Decode the output of the decoder and return a human friendly sentence
# Args:
# decoder_outputs (list<np.array>)
def deco2sentence(self, decoder_outputs):
sequence = []
# Choose the words with the highest prediction score
for out in decoder_outputs:
# Adding each predicted word ids
sequence.append(np.argmax(out))
# Return the raw sentence. Let the caller do some cleaning eventually.
return sequence
@staticmethod
def prettify(words):
sentence = ''
i = 0
for word in words:
# Capitalize first word
if i == 0 and not word.isupper():
word = word.title()
# always capitalize i's
elif word == 'i':
word = 'I'
if i == 0 \
or word in [',', '?', '!', ';', ':', '.', '...'] \
or word.startswith("'") \
or word.startswith("n't"):
sentence += word
else:
sentence += ' {}'.format(word)
i += 1
return sentence
# Print a random dialogue from the dataset
def play_dataset(self):
print('Randomly playing samples:')
for i in range(self.args.play_dataset):
id_sample = random.randint(0, len(self.training_samples))
print("\n".join([
'Q: {}'.format(self.sequence2str(self.training_samples[id_sample][0])),
'A: {}'.format(self.sequence2str(self.training_samples[id_sample][1]))
]) + "\n")
pass
|
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
from acq4.pyqtgraph.Qt import QtCore, QtGui
from CanvasItem import CanvasItem
import numpy as np
import scipy.ndimage as ndimage
import acq4.pyqtgraph as pg
import acq4.pyqtgraph.flowchart
import acq4.util.DataManager as DataManager
import acq4.util.debug as debug
class ImageCanvasItem(CanvasItem):
def __init__(self, image=None, **opts):
"""
CanvasItem displaying an image.
The image may be 2 or 3-dimensional.
Options:
image: May be a fileHandle, ndarray, or GraphicsItem.
handle: May optionally be specified in place of image
"""
## If no image was specified, check for a file handle..
if image is None:
image = opts.get('handle', None)
item = None
self.data = None
if isinstance(image, QtGui.QGraphicsItem):
item = image
elif isinstance(image, np.ndarray):
self.data = image
elif isinstance(image, DataManager.FileHandle):
opts['handle'] = image
self.handle = image
self.data = self.handle.read()
if 'name' not in opts:
opts['name'] = self.handle.shortName()
try:
if 'transform' in self.handle.info():
tr = pg.SRTTransform3D(self.handle.info()['transform'])
tr = pg.SRTTransform(tr) ## convert to 2D
opts['pos'] = tr.getTranslation()
opts['scale'] = tr.getScale()
opts['angle'] = tr.getRotation()
else: ## check for older info formats
if 'imagePosition' in self.handle.info():
opts['scale'] = self.handle.info()['pixelSize']
opts['pos'] = self.handle.info()['imagePosition']
elif 'Downsample' in self.handle.info():
### Needed to support an older format stored by 2p imager
if 'pixelSize' in self.handle.info():
opts['scale'] = self.handle.info()['pixelSize']
if 'microscope' in self.handle.info():
m = self.handle.info()['microscope']
opts['pos'] = m['position'][0:2]
else:
info = self.data._info[-1]
opts['pos'] = info.get('imagePosition', None)
elif hasattr(self.data, '_info'):
info = self.data._info[-1]
opts['scale'] = info.get('pixelSize', None)
opts['pos'] = info.get('imagePosition', None)
else:
opts['defaultUserTransform'] = {'scale': (1e-5, 1e-5)}
opts['scalable'] = True
except:
debug.printExc('Error reading transformation for image file %s:' % image.name())
if item is None:
item = pg.ImageItem()
CanvasItem.__init__(self, item, **opts)
self.splitter = QtGui.QSplitter()
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.layout.addWidget(self.splitter, self.layout.rowCount(), 0, 1, 2)
self.filterGroup = pg.GroupBox("Image Filter")
fgl = QtGui.QGridLayout()
fgl.setContentsMargins(3, 3, 3, 3)
fgl.setSpacing(1)
self.filterGroup.setLayout(fgl)
self.filter = ImageFilterWidget()
self.filter.sigStateChanged.connect(self.filterStateChanged)
fgl.addWidget(self.filter)
self.splitter.addWidget(self.filterGroup)
self.histogram = pg.HistogramLUTWidget()
self.histogram.setImageItem(self.graphicsItem())
# addWidget arguments: row, column, rowspan, colspan
self.splitter.addWidget(self.histogram)
self.imgModeCombo = QtGui.QComboBox()
self.imgModeCombo.addItems(['SourceOver', 'Overlay', 'Plus', 'Multiply'])
self.layout.addWidget(self.imgModeCombo, self.layout.rowCount(), 0, 1, 1)
self.imgModeCombo.currentIndexChanged.connect(self.imgModeChanged)
self.autoBtn = QtGui.QPushButton("Auto")
self.autoBtn.setCheckable(True)
self.autoBtn.setChecked(True)
self.layout.addWidget(self.autoBtn, self.layout.rowCount()-1, 1, 1, 1)
self.timeSlider = QtGui.QSlider(QtCore.Qt.Horizontal)
self.layout.addWidget(self.timeSlider, self.layout.rowCount(), 0, 1, 2)
self.timeSlider.valueChanged.connect(self.timeChanged)
# ## controls that only appear if there is a time axis
self.timeControls = [self.timeSlider]
if self.data is not None:
if isinstance(self.data, pg.metaarray.MetaArray):
self.filter.setInput(self.data.asarray())
else:
self.filter.setInput(self.data)
self.updateImage()
# Needed to ensure selection box wraps the image properly
tr = self.saveTransform()
self.resetUserTransform()
self.restoreTransform(tr)
# Why doesn't this work?
#self.selectBoxFromUser() ## move select box to match new bounds
@classmethod
def checkFile(cls, fh):
if not fh.isFile():
return 0
ext = fh.ext().lower()
if ext == '.ma':
return 10
elif ext in ['.ma', '.png', '.jpg', '.tif']:
return 100
return 0
def timeChanged(self, t):
self.updateImage()
def imgModeChanged(self):
mode = str(self.imgModeCombo.currentText())
self.graphicsItem().setCompositionMode(getattr(QtGui.QPainter, 'CompositionMode_' + mode))
def filterStateChanged(self):
self.updateImage()
def updateImage(self):
img = self.graphicsItem()
# Try running data through flowchart filter
data = self.filter.output()
if data is None:
data = self.data
if data.ndim == 4:
showTime = True
elif data.ndim == 3:
if data.shape[2] <= 4: ## assume last axis is color
showTime = False
else:
showTime = True
else:
showTime = False
if showTime:
self.timeSlider.setMinimum(0)
self.timeSlider.setMaximum(data.shape[0]-1)
self.graphicsItem().setImage(data[self.timeSlider.value()], autoLevels=self.autoBtn.isChecked())
else:
self.graphicsItem().setImage(data, autoLevels=self.autoBtn.isChecked())
for widget in self.timeControls:
widget.setVisible(showTime)
def saveState(self, **kwds):
state = CanvasItem.saveState(self, **kwds)
state['imagestate'] = self.histogram.saveState()
state['filter'] = self.filter.saveState()
state['composition'] = self.imgModeCombo.currentText()
return state
def restoreState(self, state):
CanvasItem.restoreState(self, state)
self.filter.restoreState(state['filter'])
self.imgModeCombo.setCurrentIndex(self.imgModeCombo.findText(state['composition']))
self.histogram.restoreState(state['imagestate'])
class ImageFilterWidget(QtGui.QWidget):
sigStateChanged = QtCore.Signal()
def __init__(self):
QtGui.QWidget.__init__(self)
self.layout = QtGui.QGridLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.layout)
# Set up filter buttons
self.btns = OrderedDict()
row, col = 0, 0
for name in ['Mean', 'Max', 'Max w/Gaussian', 'Max w/Median', 'Edge']:
btn = QtGui.QPushButton(name)
self.btns[name] = btn
btn.setCheckable(True)
self.layout.addWidget(btn, row, col)
btn.clicked.connect(self.filterBtnClicked)
col += 1
if col > 1:
col = 0
row += 1
# show flowchart control panel inside a collapsible group box
self.fcGroup = pg.GroupBox('Filter Settings')
fgl = QtGui.QVBoxLayout()
self.fcGroup.setLayout(fgl)
fgl.setContentsMargins(0, 0, 0, 0)
self.layout.addWidget(self.fcGroup, row+1, 0, 1, 2)
self.fc = pg.flowchart.Flowchart(terminals={'dataIn': {'io':'in'}, 'dataOut': {'io':'out'}})
fgl.addWidget(self.fc.widget())
self.fcGroup.setCollapsed(True)
self.fc.sigStateChanged.connect(self.sigStateChanged)
def filterBtnClicked(self, checked):
# remember slice before clearing fc
snode = self.fc.nodes().get('Slice', None)
if snode is not None:
snstate = snode.saveState()
else:
snstate = None
print snstate
self.fc.clear()
if not checked:
return
btn = self.sender()
# uncheck all other filter btns
for b in self.btns.values():
if b is not btn:
b.setChecked(False)
name = btn.text()
if name == 'Mean':
s = self.fc.createNode('Slice', name="Slice")
m = self.fc.createNode('Mean', name="Mean", pos=[150, 0])
self.fc.connectTerminals(self.fc['dataIn'], s['In'])
self.fc.connectTerminals(s['Out'], m['In'])
self.fc.connectTerminals(m['Out'], self.fc['dataOut'])
elif name == 'Max':
s = self.fc.createNode('Slice', name="Slice")
m = self.fc.createNode('Max', name="Max", pos=[150, 0])
self.fc.connectTerminals(self.fc['dataIn'], s['In'])
self.fc.connectTerminals(s['Out'], m['In'])
self.fc.connectTerminals(m['Out'], self.fc['dataOut'])
elif name == 'Max w/Gaussian':
s = self.fc.createNode('Slice', name="Slice", pos=[-40, 0])
f = self.fc.createNode('GaussianFilter', name="GaussianFilter", pos=[70, 0])
m = self.fc.createNode('Max', name="Max", pos=[180, 0])
self.fc.connectTerminals(self.fc['dataIn'], s['In'])
self.fc.connectTerminals(s['Out'], f['In'])
self.fc.connectTerminals(f['Out'], m['In'])
self.fc.connectTerminals(m['Out'], self.fc['dataOut'])
elif name == 'Max w/Median':
s = self.fc.createNode('Slice', name="Slice", pos=[-40, 0])
f = self.fc.createNode('MedianFilter', name="MedianFilter", pos=[70, 0])
m = self.fc.createNode('Max', name="Max", pos=[180, 0])
self.fc.connectTerminals(self.fc['dataIn'], s['In'])
self.fc.connectTerminals(s['Out'], f['In'])
self.fc.connectTerminals(f['Out'], m['In'])
self.fc.connectTerminals(m['Out'], self.fc['dataOut'])
elif name == 'Edge':
s = self.fc.createNode('Slice', name="Slice", pos=[-40, 0])
f1 = self.fc.createNode('PythonEval', name='GaussDiff', pos=[70, 0])
f1.setCode("""
from scipy.ndimage import gaussian_filter
img = args['input'].astype(float)
edge = gaussian_filter(img, (0, 2, 2)) - gaussian_filter(img, (0, 1, 1))
return {'output': edge}
""")
m = self.fc.createNode('Max', name="Max", pos=[180, 0])
self.fc.connectTerminals(self.fc['dataIn'], s['In'])
self.fc.connectTerminals(s['Out'], f1['input'])
self.fc.connectTerminals(f1['output'], m['In'])
self.fc.connectTerminals(m['Out'], self.fc['dataOut'])
# restore slice is possible
if snstate is not None:
snode = self.fc.nodes().get('Slice', None)
if snode is not None:
print "restore!"
snode.restoreState(snstate)
def setInput(self, img):
self.fc.setInput(dataIn=img)
def output(self):
return self.fc.output()['dataOut']
def process(self, img):
return self.fc.process(dataIn=img)['dataOut']
def saveState(self):
return {'flowchart': self.fc.saveState()}
def restoreState(self, state):
self.fc.restoreState(state['flowchart'])
|
|
#!/usr/bin/env python
from future.standard_library import install_aliases
install_aliases()
from builtins import range
from argparse import ArgumentParser
from multiprocessing.queues import Empty
from multiprocessing import Process, Queue, Value, cpu_count
import requests
import shutil
import sys
import signal
import random
import os
import logging
from urllib.parse import urljoin, quote
def get_num_pages(api_url, url, page_size=None):
""" Use the showNumPages query
to get the number of pages in the result set
"""
query = {'url': url,
'showNumPages': True}
if page_size:
query['pageSize'] = page_size
# Get the result
session = requests.Session()
r = session.get(api_url, params=query)
pages_info = r.json()
if isinstance(pages_info, dict):
return pages_info['pages']
elif isinstance(pages_info, int):
return pages_info
else:
msg = 'Num pages query returned invalid data: ' + r.text
raise Exception(msg)
def fetch_result_page(job_params):
""" query the api, getting the specified
results page and write to output file
for that page
"""
api_url = job_params['api_url']
url = job_params['url']
page = job_params['page']
num_pages = job_params['num_pages']
output_prefix = job_params['output_prefix']
timeout = job_params['timeout']
gzipped = job_params['gzipped']
headers = job_params['headers']
dir_ = job_params['dir']
query = {'url': url,
'page': page}
if job_params.get('json'):
query['output'] = 'json'
if job_params.get('fl'):
query['fl'] = job_params['fl']
if job_params.get('page_size'):
query['pageSize'] = job_params['page_size']
# format filename to number of digits
nd = len(str(num_pages))
format_ = '%0' + str(nd) + 'd'
page_str = format_ % page
filename = output_prefix + page_str
logging.debug('Fetching page {0} ({2} of {1})'.format(
page_str, num_pages, page + 1))
# Add any custom headers that may have been specified
req_headers = {}
if headers:
for h in headers:
n, v = h.split(':', 1)
n = n.strip()
v = v.strip()
req_headers[n] = v
# Get the result
session = requests.Session()
r = session.get(api_url, params=query, headers=req_headers,
stream=True, timeout=timeout)
if r.status_code == 404:
logging.error('No Results for for this query')
r.close()
return
if r.status_code != 200:
r.raise_for_status()
r.close()
return
# use dir, if provided
if dir_:
if not os.path.isdir(dir_):
os.makedirs(dir_)
filename = os.path.join(dir_, filename)
if not gzipped:
with open(filename, 'w+b') as fh:
for chunk in r.iter_content(1024):
fh.write(chunk)
else:
if r.headers.get('content-encoding') == 'gzip':
filename += '.gz'
with open(filename, 'w+b') as fh:
shutil.copyfileobj(r.raw, fh)
logging.debug('Done with "{0}"'.format(filename))
def do_work(job_queue, counter=None):
""" Process work function, read more fetch page jobs
from queue until all jobs are finished
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
while not job_queue.empty():
try:
job = job_queue.get_nowait()
fetch_result_page(job)
num_done = 0
with counter.get_lock():
counter.value += 1
num_done = counter.value
logging.info('{0} page(s) of {1} finished'.format(num_done,
job['num_pages']))
except Empty:
pass
except KeyboardInterrupt:
break
except Exception:
if not job:
raise
retries = job.get('retries', 0)
if retries < job['max_retries']:
logging.error('Retrying Page {0}'.format(job['page']))
job['retries'] = retries + 1
job_queue.put_nowait(job)
else:
logging.error('Max retries exceeded for page {0}'.
format(job['page']))
def run_workers(num_workers, jobs, shuffle):
""" Queue up all jobs start workers with job_queue
catch KeyboardInterrupt to allow interrupting all workers
Not using Pool to better hande KeyboardInterrupt gracefully
Adapted from example at:
http://bryceboe.com/2012/02/14/python-multiprocessing-pool-and-keyboardinterrupt-revisited/
"""
# Queue up all jobs
job_queue = Queue()
counter = Value('i', 0)
# optionally shuffle queue
if shuffle:
jobs = list(jobs)
random.shuffle(jobs)
for job in jobs:
job_queue.put(job)
workers = []
for i in range(0, num_workers):
tmp = Process(target=do_work,
args=(job_queue, counter))
tmp.start()
workers.append(tmp)
try:
for worker in workers:
worker.join()
except KeyboardInterrupt:
logging.info('Received Ctrl-C, interrupting all workers')
for worker in workers:
worker.terminate()
worker.join()
raise
def get_args():
url_help = """
url to query in the index:
For prefix, use:
http://example.com/*
For domain query, use:
*.example.com
"""
field_list_help = """
select fields to include: eg, --fl url,timestamp
"""
parser = ArgumentParser('CDX Index API Client')
parser.add_argument('url',
help=url_help)
parser.add_argument('-n', '--show-num-pages', action='store_true',
help='Show Number of Pages only and exit')
parser.add_argument('-p', '--processes', type=int,
help='Number of worker processes to use')
parser.add_argument('--fl',
help=field_list_help)
parser.add_argument('-j', '--json', action='store_true',
help='Use json output instead of cdx(j)')
parser.add_argument('-z', '--gzipped', action='store_true',
help='Storge gzipped results, with .gz extensions')
parser.add_argument('-o', '--output-prefix',
help='Custom output prefix, append with -NN for each page')
parser.add_argument('-d', '--directory',
help='Specify custom output directory')
parser.add_argument('--page-size', type=int,
help='size of each page in blocks, >=1')
group = parser.add_mutually_exclusive_group()
group.add_argument('-c', '--coll',
help=('The index collection to use or ' +
'"all" to use all available indexes. ' +
'The default value is the most recent available index'))
CDX_SERVER_URL = 'http://index.commoncrawl.org/'
group.add_argument('--cdx-server-url', default=CDX_SERVER_URL,
help='Set endpoint for CDX Server API ' +
'default to %s' % CDX_SERVER_URL)
parser.add_argument('--timeout', default=30, type=int,
help='HTTP read timeout before retry')
parser.add_argument('--max-retries', default=5, type=int,
help='Number of retry attempts')
parser.add_argument('-v', '--verbose', action='store_true',
help='Verbose logging of debug msgs')
parser.add_argument('--pages', type=int, nargs='*',
help=('Get only the specified result page(s) instead ' +
'of all results'))
parser.add_argument('--header', nargs='*',
help='Add custom header to request')
parser.add_argument('--in-order', action='store_true',
help='Fetch pages in order (default is to shuffle page list)')
r = parser.parse_args()
# Logging
if r.verbose:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(format='%(asctime)s: [%(levelname)s]: %(message)s',
level=level)
logging.getLogger("requests").setLevel(logging.WARNING)
return r
def read_index(r, api_url, prefix=None):
logging.info('Getting Index From ' + api_url)
logging.debug('Getting Num Pages...')
num_pages = get_num_pages(api_url, r.url, r.page_size)
# Num Pages Only Query
if r.show_num_pages:
print(num_pages)
return
if num_pages == 0:
print('No results found for: ' + r.url)
# set output
if not r.output_prefix:
if r.url.startswith('*'):
output_prefix = 'domain-' + r.url.strip('*.')
elif r.url.endswith('*'):
output_prefix = 'prefix-' + r.url.strip('*')
elif r.url.startswith(('http://', 'https://', '//')):
output_prefix = r.url.split('//', 1)[-1]
else:
output_prefix = r.url
output_prefix = output_prefix.strip('/')
output_prefix = output_prefix.replace('/', '-')
output_prefix = quote(output_prefix) + '-'
else:
output_prefix = r.output_prefix
if prefix:
output_prefix += prefix
def get_page_job(page):
job = {}
job['api_url'] = api_url
job['url'] = r.url
job['page'] = page
job['num_pages'] = num_pages
job['output_prefix'] = output_prefix
job['fl'] = r.fl
job['json'] = r.json
job['page_size'] = r.page_size
job['timeout'] = r.timeout
job['max_retries'] = r.max_retries
job['gzipped'] = r.gzipped
job['headers'] = r.header
job['dir'] = r.directory
return job
if r.pages:
page_list = r.pages
logging.info('Fetching pages {0} of {1}'.format(r.pages, r.url))
num_pages = len(page_list)
else:
page_list = range(0, num_pages)
logging.info('Fetching {0} pages of {1}'.format(num_pages, r.url))
if num_pages == 1:
fetch_result_page(get_page_job(page_list[0]))
return
# set num workers based on proesses
if not r.processes:
try:
num_workers = cpu_count() * 2
except NotImplementedError:
num_workers = 4
else:
num_workers = r.processes
num_workers = min(num_workers, num_pages)
# generate page jobs
job_list = map(get_page_job, page_list)
run_workers(num_workers, job_list, not r.in_order)
def main():
r = get_args()
collinfo = requests.get(urljoin(r.cdx_server_url, 'collinfo.json')).json()
if not r.coll:
collinfo = [collinfo[0]]
elif r.coll and r.coll != 'all':
collinfo = filter(lambda c: c['id'] == r.coll, collinfo)
for info in collinfo:
read_index(r, info['cdx-api'], info['id'])
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.